diff --git a/assembly/dependencies-apache-ignite-slim.xml b/assembly/dependencies-apache-ignite-slim.xml index 96b28f8b2e89d4..e98695c8a0c859 100644 --- a/assembly/dependencies-apache-ignite-slim.xml +++ b/assembly/dependencies-apache-ignite-slim.xml @@ -145,20 +145,17 @@ org.apache.ignite:ignite-aop org.apache.ignite:ignite-aws - org.apache.ignite:ignite-camel org.apache.ignite:ignite-cassandra-serializers org.apache.ignite:ignite-cassandra-store org.apache.ignite:ignite-cloud org.apache.ignite:ignite-direct-io org.apache.ignite:ignite-gce org.apache.ignite:ignite-jcl - org.apache.ignite:ignite-jms11 org.apache.ignite:ignite-mesos org.apache.ignite:ignite-ml org.apache.ignite:ignite-ml-h2o-model-parser org.apache.ignite:ignite-ml-spark-model-parser org.apache.ignite:ignite-ml-xgboost-model-parser - org.apache.ignite:ignite-mqtt org.apache.ignite:ignite-osgi org.apache.ignite:ignite-osgi-karaf org.apache.ignite:ignite-osgi-paxlogging @@ -167,7 +164,6 @@ org.apache.ignite:ignite-spark org.apache.ignite:ignite-spark-2.4 org.apache.ignite:ignite-ssh - org.apache.ignite:ignite-storm org.apache.ignite:ignite-web org.apache.ignite:ignite-yarn org.apache.ignite:ignite-zookeeper diff --git a/docs/README.adoc b/docs/README.adoc index 856b993f9f45cc..710f7847fc6785 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -68,6 +68,46 @@ $ docker run -v "$PWD:/srv/jekyll" -p 4000:4000 jekyll/jekyll:latest jekyll s Open `http://localhost:4000/docs[window=_blank]` in your browser. +=== Troubleshooting + +Below are some issues you might hit during an installation of the Jekyll environment or while building the tutorials. +Let us know if you come across a new and found a workaround. + +==== MacOS: Issues with FFI library during Jekyll installation + +You should see an error trace similar to this: https://github.com/ffi/ffi/issues/653 + +Attempt to fix the problem by following this sequence of commands (typically it's the last command only): + +[source, text] +---- +brew reinstall libffi +export LDFLAGS="-L/usr/local/opt/libffi/lib" +export CPPFLAGS="-I/usr/local/opt/libffi/include" +export PKG_CONFIG_PATH="/usr/local/opt/libffi/lib/pkgconfig" +gem install --user-install bundler jekyll +---- + +==== MacOS: jekyll-asciidoc gem is not installed by default + +Try to follow this procedure to fix the issue. + +* Comment out the `rm -rf $tmp_dir` at the very end of the `build.sh` script, so that the temp folder is not deleted after the execution. +* Run `build.sh` (fails with `Could not find gem 'jekyll-asciidoc'...` error). +* Go to `tmp/web_site` folder. +* Run `bundle install`. +* Revert the `build.sh` script and run it again. + +==== MacOS: can't build project due to inability to load openssl + +You should see an error like this: + +`LoadError: dlopen(/Users/dmagda/.rbenv/versions/2.6.2/lib/ruby/2.6.0/x86_64-darwin18/digest/sha1.bundle, 9): Library not loaded: /usr/local/opt/openssl/lib/libssl.1.0.0.dylib + Referenced from: /Users/dmagda/.rbenv/versions/2.6.2/lib/ruby/2.6.0/x86_64-darwin18/digest/sha1.bundle` + +Try to upgrade Ruby, rbenv to the latest version (2.7.1) and then reinstall Jekyll. Use the official instructions: +https://jekyllrb.com/docs/installation/ + == How to Contribute If you want to contribute to the documentation, add or modify the relevant page in the `docs/_docs` directory. diff --git a/docs/_docs/clustering/network-configuration.adoc b/docs/_docs/clustering/network-configuration.adoc index d656b0c8040c88..8d47b60a853983 100644 --- a/docs/_docs/clustering/network-configuration.adoc +++ b/docs/_docs/clustering/network-configuration.adoc @@ -49,13 +49,26 @@ tab:C++[unsupported] The following table describes some most important properties of `TcpDiscoverySpi`. You can find the complete list of properties in the javadoc:org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi[] javadoc. +[CAUTION] +==== +You should initialize the `IgniteConfiguration.localHost` or `TcpDiscoverySpi.localAddress` parameter with the network +interface that will be used for inter-node communication. By default, a node binds to and listens on all available IP +addresses of an environment it's running on. It can prolong node failures detection if some of the node's addresses are +not reachable from other cluster nodes. +==== + [cols="1,2,1",opts="header"] |=== |Property | Description| Default Value -| `localAddress`| Local host IP address used for discovery. | By default, the node uses the first non-loopback address it finds. If there is no non-loopback address available, then `java.net.InetAddress.getLocalHost()` is used. +| `localAddress`| Local host IP address used for discovery. If set, overrides the `IgniteConfiguration.localHost` setting. | By default, a node binds to all available network addresses. If there is a non-loopback address available, then java.net.InetAddress.getLocalHost() is used. | `localPort` | The port that the node binds to. If set to a non-default value, other cluster nodes must know this port to be able to discover the node. | `47500` | `localPortRange`| If the `localPort` is busy, the node attempts to bind to the next port (incremented by 1) and continues this process until it finds a free port. The `localPortRange` property defines the number of ports the node will try (starting from `localPort`). | `100` +| `soLinger`| Specifies a linger-on-close timeout of TCP sockets used by Discovery SPI. See Java `Socket.setSoLinger` API +for details on how to adjust this setting. In Ignite, the timeout defaults to a non-negative value to prevent +link:https://bugs.openjdk.java.net/browse/JDK-8219658[potential deadlocks with SSL connections, window=_blank] but, +as a side effect, this can prolong the detection of cluster node failures. Alternatively, update your JRE version to the +one with the SSL issue fixed and adjust this setting accordingly. | `0` | `reconnectCount` | The number of times the node tries to (re)establish connection to another node. |`10` | `networkTimeout` | The maximum network timeout in milliseconds for network operations. |`5000` | `socketTimeout` | The socket operations timeout. This timeout is used to limit connection time and write-to-socket time. |`5000` @@ -110,7 +123,7 @@ You can find the list of all properties in the javadoc:org.apache.ignite.spi.com [cols="1,2,1",opts="header"] |=== |Property | Description| Default Value -| `localAddress` | The local address for the communication SPI to bind to. | +| `localAddress` | The local address for the communication SPI to bind to. If set, overrides the `IgniteConfiguration.localHost` setting. | | `localPort` | The local port that the node uses for communication. | `47100` diff --git a/docs/_docs/extensions-and-integrations/spring/spring-data.adoc b/docs/_docs/extensions-and-integrations/spring/spring-data.adoc index ece798bd92b7df..8216a591bde990 100644 --- a/docs/_docs/extensions-and-integrations/spring/spring-data.adoc +++ b/docs/_docs/extensions-and-integrations/spring/spring-data.adoc @@ -32,12 +32,18 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-spring-data + ignite-spring-data_2.2 {ignite.version} ---- -- +[NOTE] +==== +If your Spring Data version is earlier than Spring Data 2.2 then set `ignite-spring-data_2.0` +or `ignite-spring-data` as an `artifactId` in the pom.xml configuration. +==== + == Apache Ignite Repository Apache Ignite introduces a special `IgniteRepository` interface that extends default `CrudRepository`. This interface @@ -220,7 +226,7 @@ System.out.println("\n>>> Top Person with surname 'Smith': " + == Example -The complete example is available on link:{githubUrl}/examples/src/main/java/org/apache/ignite/examples/springdata[GitHub, window=_blank]. +The complete example is available on link: https://github.com/apache/ignite-extensions/tree/master/modules/spring-data-2.0-ext/examples/main[GitHub, windows="_blank"] == Tutorial diff --git a/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc index 8734d8133e95dc..a42129383c68df 100644 --- a/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc @@ -43,7 +43,7 @@ to interact with Ignite Caches, Compute, Events, Messaging, etc. from within a C == Maven Dependency -To make use of the `ignite-camel` streamer, you need to add the following dependency: +To make use of the `ignite-camel-ext` streamer, you need to add the following dependency: [tabs] -- @@ -52,8 +52,8 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-camel - ${ignite.version} + ignite-camel-ext + ${ignite-camel-ext.version} ---- -- diff --git a/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc index f25ed05dce9b27..92ab398167d670 100644 --- a/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc @@ -21,7 +21,7 @@ Starting data transfer to Ignite cache can be done with the following steps. . Import Ignite Flink Sink Module in Maven Project If you are using Maven to manage dependencies of your project, you can add Flink module -dependency like this (replace `${ignite.version}` with actual Ignite version you are +dependency like this (replace `${ignite-flink-ext.version}` with actual Ignite Flink Extension version you are interested in): + [tabs] @@ -38,8 +38,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-flink - ${ignite.version} + ignite-flink-ext + ${ignite-flink-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc b/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc index 97a741df8add71..3697c7cf08e182 100644 --- a/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc @@ -41,7 +41,7 @@ plugins.d/ `-- libext |-- cache-api-1.0.0.jar |-- ignite-core-x.x.x.jar - |-- ignite-flume-x.x.x.jar <-- IgniteSink + |-- ignite-flume-ext.x.x.x.jar <-- IgniteSink |-- ignite-spring-x.x.x.jar |-- spring-aop-4.1.0.RELEASE.jar |-- spring-beans-4.1.0.RELEASE.jar diff --git a/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc index 5c7c883ec6e692..b3f9be9864bb74 100644 --- a/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc @@ -116,8 +116,8 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-jms11 - ${ignite.version} + ignite-jms11-ext + ${ignite-jms11-ext.version} ---- -- diff --git a/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc index f00946a78d62e8..a45fa4d792f8f2 100644 --- a/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc @@ -153,7 +153,7 @@ http://node1:8080/ignite?cmd=size&cacheName=cache1 == Streaming data with Ignite Kafka Streamer Module If you are using Maven to manage dependencies of your project, first of all you will have to add Kafka Streamer module -dependency like this (replace `${ignite.version}` with actual Ignite version you are interested in): +dependency like this (replace `${ignite-kafka-ext.version}` with actual Ignite Kafka Extension version you are interested in): [tabs] -- @@ -169,8 +169,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-kafka - ${ignite.version} + ignite-kafka-ext + ${ignite-kafka-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc index f7ec04cf8c3060..1339c97fce2685 100644 --- a/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc @@ -73,4 +73,4 @@ streamer.start(); ---- -- -Refer to the Javadocs of the `ignite-mqtt` module for more info on the available options. +Refer to the Javadocs of the `ignite-mqtt-ext` module for more info on the available options. diff --git a/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc index 4f7dcbb9e5854b..a302ca722c4b22 100644 --- a/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc @@ -20,7 +20,7 @@ to Ignite. To use Ignite RocketMQ Streamer module . Import it to your Maven project. If you are using Maven to manage dependencies of your project, you can add an Ignite -RocketMQ module dependency like this (replace `${ignite.version}` with actual Ignite version you are interested in): +RocketMQ module dependency like this (replace `${ignite-rocketmq-ext.version}` with actual Ignite RocketMQ Extension version you are interested in): + [tabs] -- @@ -36,8 +36,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-rocketmq - ${ignite.version} + ignite-rocketmq-ext + ${ignite-rocketmq-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc index e6871b79807753..887712e1058fd1 100644 --- a/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc @@ -19,7 +19,7 @@ Apache Ignite Storm Streamer module provides streaming via http://storm.apache.o Starting data transfer to Ignite can be done with the following steps. . Import Ignite Storm Streamer Module In Maven Project. If you are using Maven to manage dependencies of your project, -you can add Storm module dependency like this (replace `${ignite.version}` with actual Ignite version you are interested in): +you can add Storm module dependency like this (replace `${ignite-storm-ext.version}` with actual Ignite Storm Extension version you are interested in): + [tabs] -- @@ -35,8 +35,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-storm - ${ignite.version} + ignite-storm-ext + ${ignite-storm-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc index 8c6e65737f6037..4f47c60f250632 100644 --- a/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc @@ -18,7 +18,7 @@ Ignite Twitter Streamer module consumes tweets from Twitter and feeds the transf To stream data from Twitter into Ignite, you need to: -. Import Ignite Twitter Module with Maven and replace `${ignite.version}` with the actual Ignite version you are interested in. +. Import Ignite Twitter Module with Maven and replace `${ignite-twitter-ext.version}` with the actual Ignite Twitter Extension version you are interested in. + [tabs] -- @@ -27,8 +27,8 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-twitter - ${ignite.version} + ignite-twitter-ext + ${ignite-twitter-ext.version} ---- -- diff --git a/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc index 9432624be1cfa4..918c0e827f94ea 100644 --- a/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc @@ -29,8 +29,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-zeromq - ${ignite.version} + ignite-zeromq-ext + ${ignite-zeromq-ext.version} ... diff --git a/docs/_docs/machine-learning/binary-classification/decision-trees.adoc b/docs/_docs/machine-learning/binary-classification/decision-trees.adoc index 57ab7bf21b1d14..bc9ff058402ced 100644 --- a/docs/_docs/machine-learning/binary-classification/decision-trees.adoc +++ b/docs/_docs/machine-learning/binary-classification/decision-trees.adoc @@ -39,12 +39,12 @@ The model works this way - the split process stops when either the algorithm has == Model -The Model in a decision tree classification is represented by the class `DecisionTreeNode`. We can make a prediction for a given vector of features in the following way: +The Model in a decision tree classification is represented by the class `DecisionTreeModel`. We can make a prediction for a given vector of features in the following way: [source, java] ---- -DecisionTreeNode mdl = ...; +DecisionTreeModel mdl = ...; double prediction = mdl.apply(observation); ---- @@ -68,7 +68,7 @@ DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTraine ); // Train model. -DecisionTreeNode mdl = trainer.fit(ignite, dataCache, vectorizer); +DecisionTreeModel mdl = trainer.fit(ignite, dataCache, vectorizer); ---- diff --git a/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc b/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc index 92992f87200ca1..065cb78d35bcbc 100644 --- a/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc +++ b/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc @@ -71,7 +71,7 @@ To load in Ignite ML you should use SparkModelParser class via method parse() ca [source, java] ---- -DecisionTreeNode mdl = (DecisionTreeNode)SparkModelParser.parse( +DecisionTreeModel mdl = (DecisionTreeModel)SparkModelParser.parse( SPARK_MDL_PATH, SupportedSparkModels.DECISION_TREE ); diff --git a/docs/_docs/machine-learning/model-selection/cross-validation.adoc b/docs/_docs/machine-learning/model-selection/cross-validation.adoc index 8e64c68e67f4ec..39e00f1a5c6f31 100644 --- a/docs/_docs/machine-learning/model-selection/cross-validation.adoc +++ b/docs/_docs/machine-learning/model-selection/cross-validation.adoc @@ -27,7 +27,7 @@ Let’s imagine that we have a trainer, a training set and we want to make cross DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); // Create cross-validation instance -CrossValidation scoreCalculator +CrossValidation scoreCalculator = new CrossValidation<>(); // Set up the cross-validation process @@ -67,7 +67,7 @@ Pipeline pipeline // Create cross-validation instance -CrossValidation scoreCalculator +CrossValidation scoreCalculator = new CrossValidation<>(); // Set up the cross-validation process diff --git a/docs/_docs/machine-learning/model-selection/pipeline-api.adoc b/docs/_docs/machine-learning/model-selection/pipeline-api.adoc index 7f0cb93e3bcba9..9b2798c25865d7 100644 --- a/docs/_docs/machine-learning/model-selection/pipeline-api.adoc +++ b/docs/_docs/machine-learning/model-selection/pipeline-api.adoc @@ -64,7 +64,7 @@ Preprocessor normalizationPreprocessor = new NormalizationTrain DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); -CrossValidation scoreCalculator = new CrossValidation<>(); +CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() .addHyperParam("maxDeep", trainerCV::withMaxDeep, new Double[] {1.0, 2.0, 3.0, 4.0, 5.0, 10.0}) @@ -101,7 +101,7 @@ Pipeline pipeline = new Pipeline()) .addTrainer(trainer); -CrossValidation scoreCalculator = new CrossValidation<>(); +CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() .addHyperParam("maxDeep", trainer::withMaxDeep, new Double[] {1.0, 2.0, 3.0, 4.0, 5.0, 10.0}) diff --git a/docs/_docs/machine-learning/regression/decision-trees-regression.adoc b/docs/_docs/machine-learning/regression/decision-trees-regression.adoc index 48f9d5cc289e22..2abbaa8dc71ddc 100644 --- a/docs/_docs/machine-learning/regression/decision-trees-regression.adoc +++ b/docs/_docs/machine-learning/regression/decision-trees-regression.adoc @@ -39,12 +39,12 @@ The model works this way - the split process stops when either the algorithm has == Model -The Model in a decision tree classification is represented by the class `DecisionTreeNode`. We can make a prediction for a given vector of features in the following way: +The Model in a decision tree classification is represented by the class `DecisionTreeModel`. We can make a prediction for a given vector of features in the following way: [source, java] ---- -DecisionTreeNode mdl = ...; +DecisionTreeModel mdl = ...; double prediction = mdl.apply(observation); ---- @@ -67,7 +67,7 @@ DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer( ); // Train model. -DecisionTreeNode mdl = trainer.fit(ignite, dataCache, vectorizer); +DecisionTreeModel mdl = trainer.fit(ignite, dataCache, vectorizer); ---- == Examples diff --git a/docs/_docs/monitoring-metrics/system-views.adoc b/docs/_docs/monitoring-metrics/system-views.adoc index 92b713ff1ff118..1d400c6681800c 100644 --- a/docs/_docs/monitoring-metrics/system-views.adoc +++ b/docs/_docs/monitoring-metrics/system-views.adoc @@ -32,7 +32,7 @@ See the link:SQL/schemas[Understanding Schemas] page for the information on how == Querying System Views -To query the system views using the link:sqlline[SQLLine] tool, connect to the SYS schema as follows: +To query the system views using the link:tools/sqlline[SQLLine] tool, connect to the SYS schema as follows: [source, shell] ---- diff --git a/docs/_docs/quick-start/sql.adoc b/docs/_docs/quick-start/sql.adoc index 7d1c3dfe829344..c1d1eed2e13f5a 100644 --- a/docs/_docs/quick-start/sql.adoc +++ b/docs/_docs/quick-start/sql.adoc @@ -126,4 +126,4 @@ Easy! From here, you may want to: * Read more about using Ignite and link:SQL/sql-introduction[SQL] -* Read more about using link:sqlline[sqlline] +* Read more about using link:tools/sqlline[sqlline] diff --git a/docs/_docs/security/ssl-tls.adoc b/docs/_docs/security/ssl-tls.adoc index bf5a90e2bfd1d8..b56b2094d586cf 100644 --- a/docs/_docs/security/ssl-tls.adoc +++ b/docs/_docs/security/ssl-tls.adoc @@ -32,6 +32,14 @@ To enable SSL/TLS for cluster nodes, configure an `SSLContext` factory in the no You can use the `org.apache.ignite.ssl.SslContextFactory`, which is the default factory that uses a configurable keystore to initialize the SSL context. //You can also implement your own `SSLContext` factory. +[CAUTION] +==== +Ensure that your version of the JVM addresses +link:https://bugs.openjdk.java.net/browse/JDK-8219658[the following issue, window=_blank] that can cause deadlocks +in SSL connections. If your JVM is affected but can't be updated, then set +the link:clustering/network-configuration[`TcpDiscoverySpi.soLinger`] parameter to a non-negative value. +==== + Below is an example of `SslContextFactory` configuration: [tabs] diff --git a/docs/_docs/sql-reference/operational-commands.adoc b/docs/_docs/sql-reference/operational-commands.adoc index f5dea2254ce6d6..be7223f6bb51fe 100644 --- a/docs/_docs/sql-reference/operational-commands.adoc +++ b/docs/_docs/sql-reference/operational-commands.adoc @@ -115,7 +115,7 @@ While streaming mode allows you to load data much faster than other data loading 2. Due to streaming mode's asynchronous nature, you cannot know update counts for every statement executed; all JDBC/ODBC commands returning update counts will return 0. === Example -As an example, you can use the sample world.sql file that is shipped with the latest Ignite distribution. It can be found in the `{IGNITE_HOME}/examples/sql/` directory. You can use the `run` command from link:sqlline[SQLLine, window=_blank], as shown below: +As an example, you can use the sample world.sql file that is shipped with the latest Ignite distribution. It can be found in the `{IGNITE_HOME}/examples/sql/` directory. You can use the `run` command from tools/sqlline[SQLLine, window=_blank], as shown below: [source,shell] ---- diff --git a/docs/_docs/thin-client-comparison.csv b/docs/_docs/thin-client-comparison.csv index ee2fe80ab1c5cb..232518383cda93 100644 --- a/docs/_docs/thin-client-comparison.csv +++ b/docs/_docs/thin-client-comparison.csv @@ -7,9 +7,10 @@ Async Operations,No,{yes},No,{yes},{yes},{yes} SSL/TLS,{yes},{yes},{yes},{yes},{yes},{yes} Authentication,{yes},{yes},{yes},{yes},{yes},{yes} Partition Awareness,{yes},{yes},{yes},{yes},{yes},No -Failover,{yes},No,{yes},{yes},{yes},{yes} -Transactions,{yes},No,No,No,No,No +Failover,{yes},{yes},{yes},{yes},{yes},{yes} +Transactions,{yes},{yes},No,No,No,No Cluster API,{yes},{yes},No,No,No,No -Cluster discovery,No,{yes},No,No,No,No Compute,{yes},{yes},No,No,No,No -Service invocation,{yes},No,No,No,No,No \ No newline at end of file +Service invocation,{yes},{yes},No,No,No,No +Server Discovery,No,{yes},No,No,No,No +Server Discovery in Kubernetes,{yes},No,No,No,No,No \ No newline at end of file diff --git a/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc b/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc index 7860bf49491de0..5e0c37ccd26603 100644 --- a/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc +++ b/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc @@ -46,7 +46,7 @@ include::thin-client-comparison.csv[] === Client Connection Failover -All thin clients (except for the .NET thin client) support a connection failover mechanism, whereby the client automatically switches to an available node in case of the current node or connection failure. +All thin clients support a connection failover mechanism, whereby the client automatically switches to an available node in case of the current node or connection failure. For this mechanism to work, you need to provide a list of node addresses you want to use for failover purposes in the client configuration. Refer to the specific client documentation for more details. diff --git a/examples/pom.xml b/examples/pom.xml index 388a78ad029b52..25a5b87852a8b3 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -106,6 +106,12 @@ org.apache.ignite ignite-ml ${project.version} + + + com.fasterxml.jackson.core + * + + @@ -248,6 +254,16 @@ net.alchim31.maven scala-maven-plugin + + org.apache.maven.plugins + maven-checkstyle-plugin + + + ${spark.folder} + ${spark.test.folder} + + + @@ -279,6 +295,21 @@ ${project.version} + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + ${lgpl.folder} + ${lgpl.test.folder} + + + + + spark-2.4 @@ -321,6 +352,16 @@ net.alchim31.maven scala-maven-plugin + + org.apache.maven.plugins + maven-checkstyle-plugin + + + ${spark.folder} + ${spark.test.folder} + + + diff --git a/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java b/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java index d480309b84bfb4..c58de3bc4b10d1 100644 --- a/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java +++ b/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java @@ -29,6 +29,9 @@ * Demonstrates a cron-based {@link Runnable} execution scheduling. * Test runnable object broadcasts a phrase to all cluster nodes every minute * three times with initial scheduling delay equal to five seconds. + * This example uses an Ignite extension to Cron syntax, + * which can be used to specify an initial delay in seconds and a number of runs. + * https://apacheignite.readme.io/docs/cron-based-scheduling#syntax-extension *

* Remote nodes should always be started with special configuration file which * enables P2P class loading: {@code 'ignite.{sh|bat} examples/config/example-ignite.xml'}. @@ -68,12 +71,19 @@ public static void main(String[] args) throws IgniteException { return invocations; } }, - "{9, 5, 3} * * * * *" // Cron expression. + // Callable object broadcasts a phrase to all cluster nodes every minute + // three times with initial scheduling delay equal to five seconds. + // https://apacheignite.readme.io/docs/cron-based-scheduling#syntax-extension + "{5, 3} * * * * *" // Cron expression. ); while (!fut.isDone()) System.out.println(">>> Invocation #: " + fut.get()); + // In case the Cron expression is invalid, SchedulerFuture will be immediately completed with an error, + // that provides additional details. + fut.get(); + System.out.println(); System.out.println(">>> Schedule future is done and has been unscheduled."); System.out.println(">>> Check all nodes for hello message output."); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java index beee4f6a721e53..3127418f9653fd 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java @@ -73,8 +73,8 @@ public static void main(String[] args) throws IOException { ); System.out.println(">>> KMeans centroids"); - Tracer.showAscii(mdl.getCenters()[0]); - Tracer.showAscii(mdl.getCenters()[1]); + Tracer.showAscii(mdl.centers()[0]); + Tracer.showAscii(mdl.centers()[1]); System.out.println(">>>"); System.out.println(">>> --------------------------------------------"); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/ANNClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/ANNClassificationExportImportExample.java new file mode 100644 index 00000000000000..618e4c6cdaf463 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/ANNClassificationExportImportExample.java @@ -0,0 +1,339 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.UUID; +import javax.cache.Cache; +import org.apache.commons.math3.util.Precision; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; +import org.apache.ignite.ml.knn.NNClassificationModel; +import org.apache.ignite.ml.knn.ann.ANNClassificationModel; +import org.apache.ignite.ml.knn.ann.ANNClassificationTrainer; +import org.apache.ignite.ml.math.distances.EuclideanDistance; +import org.apache.ignite.ml.math.distances.ManhattanDistance; +import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector; + +/** + * Run ANN multi-class classification trainer ({@link ANNClassificationTrainer}) over distributed dataset. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

+ *

+ * After that it trains the model based on the specified data using + * kNN algorithm.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster does + * this point belong to, and compares prediction to expected outcome (ground truth).

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class ANNClassificationExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> ANN multi-class classification algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = getTestCache(ignite); + + ANNClassificationTrainer trainer = new ANNClassificationTrainer() + .withDistance(new ManhattanDistance()) + .withK(50) + .withMaxIterations(1000) + .withEpsilon(1e-2); + + ANNClassificationModel mdl = (ANNClassificationModel) trainer.fit( + ignite, + dataCache, + new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST) + ).withK(5) + .withDistanceMeasure(new EuclideanDistance()) + .withWeighted(true); + + System.out.println("\n>>> Exported ANN model: " + mdl.toString(true)); + + double accuracy = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Accuracy for exported ANN model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + ANNClassificationModel modelImportedFromJSON = ANNClassificationModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported ANN model: " + modelImportedFromJSON.toString(true)); + + accuracy = evaluateModel(dataCache, modelImportedFromJSON); + + System.out.println("\n>>> Accuracy for imported ANN model:" + accuracy); + + System.out.println(">>> ANN multi-class classification algorithm over cached dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static double evaluateModel(IgniteCache dataCache, NNClassificationModel knnMdl) { + int amountOfErrors = 0; + int totalAmount = 0; + + double accuracy; + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + for (Cache.Entry observation : observations) { + double[] val = observation.getValue(); + double[] inputs = Arrays.copyOfRange(val, 1, val.length); + double groundTruth = val[0]; + + double prediction = knnMdl.predict(new DenseVector(inputs)); + + totalAmount++; + if (!Precision.equals(groundTruth, prediction, Precision.EPSILON)) + amountOfErrors++; + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); + } + + System.out.println(">>> ---------------------------------"); + + accuracy = 1 - amountOfErrors / (double) totalAmount; + + } + return accuracy; + } + + /** + * Fills cache with data and returns it. + * + * @param ignite Ignite instance. + * @return Filled Ignite Cache. + */ + private static IgniteCache getTestCache(Ignite ignite) { + CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); + cacheConfiguration.setName("TEST_" + UUID.randomUUID()); + cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache cache = ignite.createCache(cacheConfiguration); + + for (int k = 0; k < 10; k++) { // multiplies the Iris dataset k times. + for (int i = 0; i < data.length; i++) + cache.put(k * 10000 + i, mutate(data[i], k)); + } + + return cache; + } + + /** + * Tiny changing of data depending on k parameter. + * + * @param datum The vector data. + * @param k The passed parameter. + * @return The changed vector data. + */ + private static double[] mutate(double[] datum, int k) { + for (int i = 0; i < datum.length; i++) + datum[i] += k / 100000; + return datum; + } + + /** + * The Iris dataset. + */ + private static final double[][] data = { + {1, 5.1, 3.5, 1.4, 0.2}, + {1, 4.9, 3, 1.4, 0.2}, + {1, 4.7, 3.2, 1.3, 0.2}, + {1, 4.6, 3.1, 1.5, 0.2}, + {1, 5, 3.6, 1.4, 0.2}, + {1, 5.4, 3.9, 1.7, 0.4}, + {1, 4.6, 3.4, 1.4, 0.3}, + {1, 5, 3.4, 1.5, 0.2}, + {1, 4.4, 2.9, 1.4, 0.2}, + {1, 4.9, 3.1, 1.5, 0.1}, + {1, 5.4, 3.7, 1.5, 0.2}, + {1, 4.8, 3.4, 1.6, 0.2}, + {1, 4.8, 3, 1.4, 0.1}, + {1, 4.3, 3, 1.1, 0.1}, + {1, 5.8, 4, 1.2, 0.2}, + {1, 5.7, 4.4, 1.5, 0.4}, + {1, 5.4, 3.9, 1.3, 0.4}, + {1, 5.1, 3.5, 1.4, 0.3}, + {1, 5.7, 3.8, 1.7, 0.3}, + {1, 5.1, 3.8, 1.5, 0.3}, + {1, 5.4, 3.4, 1.7, 0.2}, + {1, 5.1, 3.7, 1.5, 0.4}, + {1, 4.6, 3.6, 1, 0.2}, + {1, 5.1, 3.3, 1.7, 0.5}, + {1, 4.8, 3.4, 1.9, 0.2}, + {1, 5, 3, 1.6, 0.2}, + {1, 5, 3.4, 1.6, 0.4}, + {1, 5.2, 3.5, 1.5, 0.2}, + {1, 5.2, 3.4, 1.4, 0.2}, + {1, 4.7, 3.2, 1.6, 0.2}, + {1, 4.8, 3.1, 1.6, 0.2}, + {1, 5.4, 3.4, 1.5, 0.4}, + {1, 5.2, 4.1, 1.5, 0.1}, + {1, 5.5, 4.2, 1.4, 0.2}, + {1, 4.9, 3.1, 1.5, 0.1}, + {1, 5, 3.2, 1.2, 0.2}, + {1, 5.5, 3.5, 1.3, 0.2}, + {1, 4.9, 3.1, 1.5, 0.1}, + {1, 4.4, 3, 1.3, 0.2}, + {1, 5.1, 3.4, 1.5, 0.2}, + {1, 5, 3.5, 1.3, 0.3}, + {1, 4.5, 2.3, 1.3, 0.3}, + {1, 4.4, 3.2, 1.3, 0.2}, + {1, 5, 3.5, 1.6, 0.6}, + {1, 5.1, 3.8, 1.9, 0.4}, + {1, 4.8, 3, 1.4, 0.3}, + {1, 5.1, 3.8, 1.6, 0.2}, + {1, 4.6, 3.2, 1.4, 0.2}, + {1, 5.3, 3.7, 1.5, 0.2}, + {1, 5, 3.3, 1.4, 0.2}, + {2, 7, 3.2, 4.7, 1.4}, + {2, 6.4, 3.2, 4.5, 1.5}, + {2, 6.9, 3.1, 4.9, 1.5}, + {2, 5.5, 2.3, 4, 1.3}, + {2, 6.5, 2.8, 4.6, 1.5}, + {2, 5.7, 2.8, 4.5, 1.3}, + {2, 6.3, 3.3, 4.7, 1.6}, + {2, 4.9, 2.4, 3.3, 1}, + {2, 6.6, 2.9, 4.6, 1.3}, + {2, 5.2, 2.7, 3.9, 1.4}, + {2, 5, 2, 3.5, 1}, + {2, 5.9, 3, 4.2, 1.5}, + {2, 6, 2.2, 4, 1}, + {2, 6.1, 2.9, 4.7, 1.4}, + {2, 5.6, 2.9, 3.6, 1.3}, + {2, 6.7, 3.1, 4.4, 1.4}, + {2, 5.6, 3, 4.5, 1.5}, + {2, 5.8, 2.7, 4.1, 1}, + {2, 6.2, 2.2, 4.5, 1.5}, + {2, 5.6, 2.5, 3.9, 1.1}, + {2, 5.9, 3.2, 4.8, 1.8}, + {2, 6.1, 2.8, 4, 1.3}, + {2, 6.3, 2.5, 4.9, 1.5}, + {2, 6.1, 2.8, 4.7, 1.2}, + {2, 6.4, 2.9, 4.3, 1.3}, + {2, 6.6, 3, 4.4, 1.4}, + {2, 6.8, 2.8, 4.8, 1.4}, + {2, 6.7, 3, 5, 1.7}, + {2, 6, 2.9, 4.5, 1.5}, + {2, 5.7, 2.6, 3.5, 1}, + {2, 5.5, 2.4, 3.8, 1.1}, + {2, 5.5, 2.4, 3.7, 1}, + {2, 5.8, 2.7, 3.9, 1.2}, + {2, 6, 2.7, 5.1, 1.6}, + {2, 5.4, 3, 4.5, 1.5}, + {2, 6, 3.4, 4.5, 1.6}, + {2, 6.7, 3.1, 4.7, 1.5}, + {2, 6.3, 2.3, 4.4, 1.3}, + {2, 5.6, 3, 4.1, 1.3}, + {2, 5.5, 2.5, 4, 1.3}, + {2, 5.5, 2.6, 4.4, 1.2}, + {2, 6.1, 3, 4.6, 1.4}, + {2, 5.8, 2.6, 4, 1.2}, + {2, 5, 2.3, 3.3, 1}, + {2, 5.6, 2.7, 4.2, 1.3}, + {2, 5.7, 3, 4.2, 1.2}, + {2, 5.7, 2.9, 4.2, 1.3}, + {2, 6.2, 2.9, 4.3, 1.3}, + {2, 5.1, 2.5, 3, 1.1}, + {2, 5.7, 2.8, 4.1, 1.3}, + {3, 6.3, 3.3, 6, 2.5}, + {3, 5.8, 2.7, 5.1, 1.9}, + {3, 7.1, 3, 5.9, 2.1}, + {3, 6.3, 2.9, 5.6, 1.8}, + {3, 6.5, 3, 5.8, 2.2}, + {3, 7.6, 3, 6.6, 2.1}, + {3, 4.9, 2.5, 4.5, 1.7}, + {3, 7.3, 2.9, 6.3, 1.8}, + {3, 6.7, 2.5, 5.8, 1.8}, + {3, 7.2, 3.6, 6.1, 2.5}, + {3, 6.5, 3.2, 5.1, 2}, + {3, 6.4, 2.7, 5.3, 1.9}, + {3, 6.8, 3, 5.5, 2.1}, + {3, 5.7, 2.5, 5, 2}, + {3, 5.8, 2.8, 5.1, 2.4}, + {3, 6.4, 3.2, 5.3, 2.3}, + {3, 6.5, 3, 5.5, 1.8}, + {3, 7.7, 3.8, 6.7, 2.2}, + {3, 7.7, 2.6, 6.9, 2.3}, + {3, 6, 2.2, 5, 1.5}, + {3, 6.9, 3.2, 5.7, 2.3}, + {3, 5.6, 2.8, 4.9, 2}, + {3, 7.7, 2.8, 6.7, 2}, + {3, 6.3, 2.7, 4.9, 1.8}, + {3, 6.7, 3.3, 5.7, 2.1}, + {3, 7.2, 3.2, 6, 1.8}, + {3, 6.2, 2.8, 4.8, 1.8}, + {3, 6.1, 3, 4.9, 1.8}, + {3, 6.4, 2.8, 5.6, 2.1}, + {3, 7.2, 3, 5.8, 1.6}, + {3, 7.4, 2.8, 6.1, 1.9}, + {3, 7.9, 3.8, 6.4, 2}, + {3, 6.4, 2.8, 5.6, 2.2}, + {3, 6.3, 2.8, 5.1, 1.5}, + {3, 6.1, 2.6, 5.6, 1.4}, + {3, 7.7, 3, 6.1, 2.3}, + {3, 6.3, 3.4, 5.6, 2.4}, + {3, 6.4, 3.1, 5.5, 1.8}, + {3, 6, 3, 4.8, 1.8}, + {3, 6.9, 3.1, 5.4, 2.1}, + {3, 6.7, 3.1, 5.6, 2.4}, + {3, 6.9, 3.1, 5.1, 2.3}, + {3, 5.8, 2.7, 5.1, 1.9}, + {3, 6.8, 3.2, 5.9, 2.3}, + {3, 6.7, 3.3, 5.7, 2.5}, + {3, 6.7, 3, 5.2, 2.3}, + {3, 6.3, 2.5, 5, 1.9}, + {3, 6.5, 3, 5.2, 2}, + {3, 6.2, 3.4, 5.4, 2.3}, + {3, 5.9, 3, 5.1, 1.8} + }; +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/CompoundNaiveBayesExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/CompoundNaiveBayesExportImportExample.java new file mode 100644 index 00000000000000..7d05f5e82129c2 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/CompoundNaiveBayesExportImportExample.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.naivebayes.compound.CompoundNaiveBayesModel; +import org.apache.ignite.ml.naivebayes.compound.CompoundNaiveBayesTrainer; +import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesTrainer; +import org.apache.ignite.ml.naivebayes.gaussian.GaussianNaiveBayesTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +import static java.util.Arrays.asList; + +/** + * Run naive Compound Bayes classification model based on + * Nnaive Bayes classifier algorithm ({@link GaussianNaiveBayesTrainer})and Discrete naive Bayes + * classifier algorithm ({@link DiscreteNaiveBayesTrainer}) over distributed cache. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points. + *

+ * After that it trains the naive Bayes classification model based on the specified data.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class CompoundNaiveBayesExportImportExample { + /** Run example. */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Compound Naive Bayes classification model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite) + .fillCacheWith(MLSandboxDatasets.MIXED_DATASET); + + double[] priorProbabilities = new double[]{.5, .5}; + double[][] thresholds = new double[][]{{.5}, {.5}, {.5}, {.5}, {.5}}; + + System.out.println("\n>>> Create new naive Bayes classification trainer object."); + CompoundNaiveBayesTrainer trainer = new CompoundNaiveBayesTrainer() + .withPriorProbabilities(priorProbabilities) + .withGaussianNaiveBayesTrainer(new GaussianNaiveBayesTrainer()) + .withGaussianFeatureIdsToSkip(asList(3, 4, 5, 6, 7)) + .withDiscreteNaiveBayesTrainer(new DiscreteNaiveBayesTrainer() + .setBucketThresholds(thresholds)) + .withDiscreteFeatureIdsToSkip(asList(0, 1, 2)); + System.out.println("\n>>> Perform the training to get the model."); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + CompoundNaiveBayesModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported Compound Naive Bayes model: " + mdl.toString(true)); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported Compound Naive Bayes model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + CompoundNaiveBayesModel modelImportedFromJSON = CompoundNaiveBayesModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Compound Naive Bayes model: " + modelImportedFromJSON.toString(true)); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported Compound Naive Bayes model:" + accuracy); + + System.out.println("\n>>> Compound Naive Bayes model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeClassificationExportImportExample.java new file mode 100644 index 00000000000000..e7ad7ca71e676e --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeClassificationExportImportExample.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Random; +import org.apache.commons.math3.util.Precision; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.structures.LabeledVector; +import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; +import org.apache.ignite.ml.tree.DecisionTreeModel; + +/** + * Example of using distributed {@link DecisionTreeClassificationTrainer}. + *

+ * Code in this example launches Ignite grid and fills the cache with pseudo random training data points.

+ *

+ * After that it creates classification trainer and uses it to train the model on the training set.

+ *

+ * Finally, this example loops over the pseudo randomly generated test set of data points, applies the trained model, + * and compares prediction to expected outcome.

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class DecisionTreeClassificationExportImportExample { + /** + * Executes example. + * + * @param args Command line arguments, none required. + */ + public static void main(String[] args) throws IOException { + System.out.println(">>> Decision tree classification trainer example started."); + + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration> trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache> trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = ignite.createCache(trainingSetCfg); + + Random rnd = new Random(0); + + // Fill training data. + for (int i = 0; i < 1000; i++) + trainingSet.put(i, generatePoint(rnd)); + + // Create classification trainer. + DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); + + // Train decision tree model. + LabeledDummyVectorizer vectorizer = new LabeledDummyVectorizer<>(); + DecisionTreeModel mdl = trainer.fit( + ignite, + trainingSet, + vectorizer + ); + + System.out.println("\n>>> Exported Decision tree classification model: " + mdl); + + int correctPredictions = evaluateModel(rnd, mdl); + + System.out.println("\n>>> Accuracy for exported Decision tree classification model: " + correctPredictions / 10.0 + "%"); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + DecisionTreeModel modelImportedFromJSON = DecisionTreeModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Decision tree classification model: " + modelImportedFromJSON); + + correctPredictions = evaluateModel(rnd, modelImportedFromJSON); + + System.out.println("\n>>> Accuracy for imported Decision tree classification model: " + correctPredictions / 10.0 + "%"); + + System.out.println("\n>>> Decision tree classification trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static int evaluateModel(Random rnd, DecisionTreeModel mdl) { + // Calculate score. + int correctPredictions = 0; + for (int i = 0; i < 1000; i++) { + LabeledVector pnt = generatePoint(rnd); + + double prediction = mdl.predict(pnt.features()); + double lbl = pnt.label(); + + if (i % 50 == 1) + System.out.printf(">>> test #: %d\t\t predicted: %.4f\t\tlabel: %.4f\n", i, prediction, lbl); + + if (Precision.equals(prediction, lbl, Precision.EPSILON)) + correctPredictions++; + } + return correctPredictions; + } + + /** + * Generate point with {@code x} in (-0.5, 0.5) and {@code y} in the same interval. If {@code x * y > 0} then label + * is 1, otherwise 0. + * + * @param rnd Random. + * @return Point with label. + */ + private static LabeledVector generatePoint(Random rnd) { + + double x = rnd.nextDouble() - 0.5; + double y = rnd.nextDouble() - 0.5; + + return new LabeledVector<>(VectorUtils.of(x, y), x * y > 0 ? 1. : 0.); + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeRegressionExportImportExample.java new file mode 100644 index 00000000000000..9857ba9edab557 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeRegressionExportImportExample.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.structures.LabeledVector; +import org.apache.ignite.ml.tree.DecisionTreeModel; +import org.apache.ignite.ml.tree.DecisionTreeRegressionTrainer; + +/** + * Example of using distributed {@link DecisionTreeRegressionTrainer}. + *

+ * Code in this example launches Ignite grid and fills the cache with generated test data points ({@code sin(x)} on + * interval {@code [0, 10)}).

+ *

+ * After that it creates classification trainer and uses it to train the model on the training set.

+ *

+ * Finally, this example loops over the test data points, applies the trained model, and compares prediction to expected + * outcome (ground truth).

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class DecisionTreeRegressionExportImportExample { + /** + * Executes example. + * + * @param args Command line arguments, none required. + */ + public static void main(String... args) throws IOException { + System.out.println(">>> Decision tree regression trainer example started."); + + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration> trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache> trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = ignite.createCache(trainingSetCfg); + + // Fill training data. + generatePoints(trainingSet); + + // Create regression trainer. + DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(10, 0); + + // Train decision tree model. + DecisionTreeModel mdl = trainer.fit(ignite, trainingSet, new LabeledDummyVectorizer<>()); + + System.out.println("\n>>> Exported Decision tree regression model: " + mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + DecisionTreeModel modelImportedFromJSON = DecisionTreeModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Decision tree regression model: " + modelImportedFromJSON); + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + // Calculate score. + for (int x = 0; x < 10; x++) { + double predicted = mdl.predict(VectorUtils.of(x)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", predicted, Math.sin(x)); + } + + System.out.println(">>> ---------------------------------"); + + System.out.println("\n>>> Decision tree regression trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + /** + * Generates {@code sin(x)} on interval {@code [0, 10)} and loads into the specified cache. + */ + private static void generatePoints(IgniteCache> trainingSet) { + for (int i = 0; i < 1000; i++) { + double x = i / 100.0; + double y = Math.sin(x); + + trainingSet.put(i, new LabeledVector<>(VectorUtils.of(x), y)); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DiscreteNaiveBayesExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DiscreteNaiveBayesExportImportExample.java new file mode 100644 index 00000000000000..c4d44c45684792 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DiscreteNaiveBayesExportImportExample.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesModel; +import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run naive Bayes classification model based on + * naive Bayes classifier algorithm ({@link DiscreteNaiveBayesTrainer}) over distributed cache. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points. + *

+ *

+ * After that it trains the Discrete naive Bayes classification model based on the specified data.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class DiscreteNaiveBayesExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(">>> Discrete naive Bayes classification model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.ENGLISH_VS_SCOTTISH); + + double[][] thresholds = new double[][] {{.5}, {.5}, {.5}, {.5}, {.5}}; + System.out.println(">>> Create new Discrete naive Bayes classification trainer object."); + DiscreteNaiveBayesTrainer trainer = new DiscreteNaiveBayesTrainer() + .setBucketThresholds(thresholds); + + System.out.println("\n>>> Perform the training to get the model."); + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + DiscreteNaiveBayesModel mdl = trainer.fit(ignite, dataCache, vectorizer); + System.out.println("\n>>> Exported Discrete Naive Bayes model: " + mdl.toString(true)); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported Discrete Naive Bayes model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + DiscreteNaiveBayesModel modelImportedFromJSON = DiscreteNaiveBayesModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Discrete Naive Bayes model: " + modelImportedFromJSON.toString(true)); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported Discrete Naive Bayes model:" + accuracy); + + System.out.println("\n>>> Discrete Naive bayes model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesClassificationExportImportExample.java new file mode 100644 index 00000000000000..9aa8f228f717aa --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesClassificationExportImportExample.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; +import org.apache.ignite.ml.math.functions.IgniteFunction; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; +import org.jetbrains.annotations.NotNull; + +/** + * Example represents a solution for the task of classification learning based on Gradient Boosting on trees + * implementation. It shows an initialization of {@link GDBBinaryClassifierOnTreesTrainer}, initialization of Ignite + * Cache, learning step and comparing of predicted and real values. + *

+ * In this example dataset is created automatically by meander function {@code f(x) = [sin(x) > 0]}.

+ */ +public class GDBOnTreesClassificationExportImportExample { + /** + * Run example. + * + * @param args Command line arguments, none required. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> GDB classification trainer example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration trainingSetCfg = createCacheConfiguration(); + IgniteCache trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = fillTrainingData(ignite, trainingSetCfg); + + // Create classification trainer. + GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.) + .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.1)); + + // Train decision tree model. + GDBModel mdl = trainer.fit( + ignite, + trainingSet, + new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) + ); + + System.out.println("\n>>> Exported GDB classification model: " + mdl.toString(true)); + + predictOnGeneratedData(mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + IgniteFunction lbMapper = lb -> lb > 0.5 ? 1.0 : 0.0; + GDBModel modelImportedFromJSON = GDBModel.fromJSON(jsonMdlPath).withLblMapping(lbMapper); + + System.out.println("\n>>> Imported GDB classification model: " + modelImportedFromJSON.toString(true)); + + predictOnGeneratedData(modelImportedFromJSON); + + System.out.println(">>> GDB classification trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static void predictOnGeneratedData(GDBModel mdl) { + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Valid answer\t|"); + System.out.println(">>> ---------------------------------"); + + // Calculate score. + for (int x = -5; x < 5; x++) { + double predicted = mdl.predict(VectorUtils.of(x)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", predicted, Math.sin(x) < 0 ? 0.0 : 1.0); + } + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> Count of trees = " + mdl.getModels().size()); + System.out.println(">>> ---------------------------------"); + } + + /** + * Create cache configuration. + */ + @NotNull private static CacheConfiguration createCacheConfiguration() { + CacheConfiguration trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + return trainingSetCfg; + } + + /** + * Fill meander-like training data. + * + * @param ignite Ignite instance. + * @param trainingSetCfg Training set config. + */ + @NotNull private static IgniteCache fillTrainingData(Ignite ignite, + CacheConfiguration trainingSetCfg) { + IgniteCache trainingSet = ignite.getOrCreateCache(trainingSetCfg); + for (int i = -50; i <= 50; i++) { + double x = ((double)i) / 10.0; + double y = Math.sin(x) < 0 ? 0.0 : 1.0; + trainingSet.put(i, new double[] {x, y}); + } + return trainingSet; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesRegressionExportImportExample.java new file mode 100644 index 00000000000000..14233e316e4a3d --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesRegressionExportImportExample.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; +import org.apache.ignite.ml.math.functions.IgniteFunction; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer; +import org.jetbrains.annotations.NotNull; + +/** + * Example represents a solution for the task of regression learning based on Gradient Boosting on trees implementation. + * It shows an initialization of {@link GDBRegressionOnTreesTrainer}, initialization of Ignite Cache, learning step and + * comparing of predicted and real values. + *

+ * In this example dataset is created automatically by parabolic function {@code f(x) = x^2}.

+ */ +public class GDBOnTreesRegressionExportImportExample { + /** + * Run example. + * + * @param args Command line arguments, none required. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> GDB regression trainer example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration trainingSetCfg = createCacheConfiguration(); + IgniteCache trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = fillTrainingData(ignite, trainingSetCfg); + + // Create regression trainer. + GDBTrainer trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.) + .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.001)); + + // Train decision tree model. + GDBModel mdl = trainer.fit( + ignite, + trainingSet, + new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) + ); + + System.out.println("\n>>> Exported GDB regression model: " + mdl.toString(true)); + + predictOnGeneratedData(mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + IgniteFunction lbMapper = lb -> lb; + GDBModel modelImportedFromJSON = GDBModel.fromJSON(jsonMdlPath).withLblMapping(lbMapper); + + System.out.println("\n>>> Imported GDB regression model: " + modelImportedFromJSON.toString(true)); + + predictOnGeneratedData(modelImportedFromJSON); + + System.out.println(">>> GDB regression trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static void predictOnGeneratedData(GDBModel mdl) { + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Valid answer \t|"); + System.out.println(">>> ---------------------------------"); + + // Calculate score. + for (int x = -5; x < 5; x++) { + double predicted = mdl.predict(VectorUtils.of(x)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", predicted, Math.pow(x, 2)); + } + + System.out.println(">>> ---------------------------------"); + } + + /** + * Create cache configuration. + */ + @NotNull private static CacheConfiguration createCacheConfiguration() { + CacheConfiguration trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + return trainingSetCfg; + } + + /** + * Fill parabolic training data. + * + * @param ignite Ignite instance. + * @param trainingSetCfg Training set config. + */ + @NotNull private static IgniteCache fillTrainingData(Ignite ignite, + CacheConfiguration trainingSetCfg) { + IgniteCache trainingSet = ignite.getOrCreateCache(trainingSetCfg); + for (int i = -50; i <= 50; i++) { + double x = ((double)i) / 10.0; + double y = Math.pow(x, 2); + trainingSet.put(i, new double[] {x, y}); + } + return trainingSet; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GaussianNaiveBayesExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GaussianNaiveBayesExportImportExample.java new file mode 100644 index 00000000000000..b6fb9c9fd20979 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GaussianNaiveBayesExportImportExample.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.naivebayes.gaussian.GaussianNaiveBayesModel; +import org.apache.ignite.ml.naivebayes.gaussian.GaussianNaiveBayesTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run naive Bayes classification model based on naive + * Bayes classifier algorithm ({@link GaussianNaiveBayesTrainer}) over distributed cache. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

+ *

+ * After that it trains the naive Bayes classification model based on the specified data.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class GaussianNaiveBayesExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Naive Bayes classification model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + System.out.println(">>> Create new Gaussian Naive Bayes classification trainer object."); + GaussianNaiveBayesTrainer trainer = new GaussianNaiveBayesTrainer(); + + System.out.println("\n>>> Perform the training to get the model."); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + GaussianNaiveBayesModel mdl = trainer.fit(ignite, dataCache, vectorizer); + System.out.println("\n>>> Exported Gaussian Naive Bayes model: " + mdl.toString(true)); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported Gaussian Naive Bayes model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + GaussianNaiveBayesModel modelImportedFromJSON = GaussianNaiveBayesModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Gaussian Naive Bayes model: " + modelImportedFromJSON.toString(true)); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported Gaussian Naive Bayes model:" + accuracy); + + System.out.println("\n>>> Gaussian Naive bayes model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/KMeansClusterizationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/KMeansClusterizationExportImportExample.java new file mode 100644 index 00000000000000..ec5e6899f7eab9 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/KMeansClusterizationExportImportExample.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.clustering.kmeans.KMeansModel; +import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.distances.WeightedMinkowskiDistance; +import org.apache.ignite.ml.math.primitives.vector.Vector; + +/** + * Run KMeans clustering algorithm ({@link KMeansTrainer}) over distributed dataset. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

+ *

+ * After that it trains the model based on the specified data using + * KMeans algorithm.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster does + * this point belong to, and compares prediction to expected outcome (ground truth).

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class KMeansClusterizationExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> KMeans clustering algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + Vectorizer vectorizer = new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST); + + KMeansTrainer trainer = new KMeansTrainer() + .withDistance(new WeightedMinkowskiDistance(2, new double[] {5.9360, 2.7700, 4.2600, 1.3260})); + //.withDistance(new MinkowskiDistance(2)); + + KMeansModel mdl = trainer.fit( + ignite, + dataCache, + vectorizer + ); + + System.out.println("\n>>> Exported KMeans model: " + mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + KMeansModel modelImportedFromJSON = KMeansModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported KMeans model: " + modelImportedFromJSON); + + System.out.println("\n>>> KMeans clustering algorithm over cached dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LinearRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LinearRegressionExportImportExample.java new file mode 100644 index 00000000000000..723784bb999c77 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LinearRegressionExportImportExample.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; +import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run linear regression model based on LSQR algorithm + * ({@link LinearRegressionLSQRTrainer}) over cached dataset. + *

+ * Code in this example launches Ignite grid and fills the cache with simple test data.

+ *

+ * After that it trains the linear regression model based on the specified data.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value + * and compares prediction to expected outcome (ground truth).

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class LinearRegressionExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Linear regression model over cache based dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.MORTALITY_DATA); + + System.out.println("\n>>> Create new linear regression trainer object."); + LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer(); + + System.out.println("\n>>> Perform the training to get the model."); + + LinearRegressionModel mdl = trainer.fit( + ignite, + dataCache, + new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST) + ); + + System.out.println("\n>>> Exported LinearRegression model: " + mdl); + + double rmse = Evaluator.evaluate( + dataCache, + mdl, + new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST), + MetricName.RMSE + ); + + System.out.println("\n>>> RMSE for exported LinearRegression model: " + rmse); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + LinearRegressionModel modelImportedFromJSON = LinearRegressionModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported LinearRegression model: " + modelImportedFromJSON); + + rmse = Evaluator.evaluate( + dataCache, + mdl, + new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST), + MetricName.RMSE + ); + + System.out.println("\n>>> RMSE for imported LinearRegression model: " + rmse); + + System.out.println("\n>>> Linear regression model over cache based dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LogisticRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LogisticRegressionExportImportExample.java new file mode 100644 index 00000000000000..6491f7edd5a3fd --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LogisticRegressionExportImportExample.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.nn.UpdatesStrategy; +import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate; +import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator; +import org.apache.ignite.ml.regressions.logistic.LogisticRegressionModel; +import org.apache.ignite.ml.regressions.logistic.LogisticRegressionSGDTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run logistic regression model based on + * stochastic gradient descent algorithm ({@link LogisticRegressionSGDTrainer}) over distributed cache. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

+ *

+ * After that it trains the logistic regression model based on the specified data.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class LogisticRegressionExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Logistic regression model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + System.out.println("\n>>> Create new logistic regression trainer object."); + LogisticRegressionSGDTrainer trainer = new LogisticRegressionSGDTrainer() + .withUpdatesStgy(new UpdatesStrategy<>( + new SimpleGDUpdateCalculator(0.2), + SimpleGDParameterUpdate.SUM_LOCAL, + SimpleGDParameterUpdate.AVG + )) + .withMaxIterations(100000) + .withLocIterations(100) + .withBatchSize(10) + .withSeed(123L); + + System.out.println("\n>>> Perform the training to get the model."); + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + LogisticRegressionModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported logistic regression model: " + mdl); + + double accuracy = Evaluator.evaluate(dataCache, + mdl, vectorizer, MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported logistic regression model " + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + LogisticRegressionModel modelImportedFromJSON = LogisticRegressionModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported logistic regression model: " + modelImportedFromJSON); + + accuracy = Evaluator.evaluate(dataCache, + modelImportedFromJSON, vectorizer, MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported logistic regression model " + accuracy); + + System.out.println("\n>>> Logistic regression model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestClassificationExportImportExample.java new file mode 100644 index 00000000000000..6bb368f56f6b28 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestClassificationExportImportExample.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import javax.cache.Cache; +import org.apache.commons.math3.util.Precision; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.FeatureMeta; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.tree.randomforest.RandomForestClassifierTrainer; +import org.apache.ignite.ml.tree.randomforest.RandomForestModel; +import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies; + +/** + * Example represents a solution for the task of wine classification based on a + * Random Forest implementation for + * multi-classification. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Wine recognition dataset).

+ *

+ * After that it initializes the {@link RandomForestClassifierTrainer} with thread pool for multi-thread learning and + * trains the model based on the specified data using random forest regression algorithm.

+ *

+ * Finally, this example loops over the test set of data points, compares prediction of the trained model to the + * expected outcome (ground truth), and evaluates accuracy of the model.

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class RandomForestClassificationExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Random Forest multi-class classification algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.WINE_RECOGNITION); + + AtomicInteger idx = new AtomicInteger(0); + RandomForestClassifierTrainer classifier = new RandomForestClassifierTrainer( + IntStream.range(0, dataCache.get(1).size() - 1).mapToObj( + x -> new FeatureMeta("", idx.getAndIncrement(), false)).collect(Collectors.toList()) + ).withAmountOfTrees(101) + .withFeaturesCountSelectionStrgy(FeaturesCountSelectionStrategies.ONE_THIRD) + .withMaxDepth(4) + .withMinImpurityDelta(0.) + .withSubSampleSize(0.3) + .withSeed(0); + + System.out.println(">>> Configured trainer: " + classifier.getClass().getSimpleName()); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + RandomForestModel mdl = classifier.fit(ignite, dataCache, vectorizer); + + System.out.println(">>> Exported Random Forest classification model: " + mdl.toString(true)); + + double accuracy = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Accuracy for exported Random Forest classification model " + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + RandomForestModel modelImportedFromJSON = RandomForestModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Random Forest classification model: " + modelImportedFromJSON); + + accuracy = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Accuracy for imported Random Forest classification model " + accuracy); + + System.out.println("\n>>> Random Forest multi-class classification algorithm over cached dataset usage example completed."); + + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static double evaluateModel(IgniteCache dataCache, RandomForestModel randomForestMdl) { + int amountOfErrors = 0; + int totalAmount = 0; + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + Vector val = observation.getValue(); + Vector inputs = val.copyOfRange(1, val.size()); + double groundTruth = val.get(0); + + double prediction = randomForestMdl.predict(inputs); + + totalAmount++; + if (!Precision.equals(groundTruth, prediction, Precision.EPSILON)) + amountOfErrors++; + } + } + + return 1 - amountOfErrors / (double) totalAmount; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestRegressionExportImportExample.java new file mode 100644 index 00000000000000..4d7d4ad738fb8b --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestRegressionExportImportExample.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import javax.cache.Cache; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.FeatureMeta; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.environment.LearningEnvironmentBuilder; +import org.apache.ignite.ml.environment.logging.ConsoleLogger; +import org.apache.ignite.ml.environment.parallelism.ParallelismStrategy; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.tree.randomforest.RandomForestModel; +import org.apache.ignite.ml.tree.randomforest.RandomForestRegressionTrainer; +import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies; + +/** + * Example represents a solution for the task of price predictions for houses in Boston based on a + * Random Forest implementation for regression. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Boston Housing dataset).

+ *

+ * After that it initializes the {@link RandomForestRegressionTrainer} and trains the model based on the specified data + * using random forest regression algorithm.

+ *

+ * Finally, this example loops over the test set of data points, compares prediction of the trained model to the + * expected outcome (ground truth), and evaluates model quality in terms of Mean Squared Error (MSE) and Mean Absolute + * Error (MAE).

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class RandomForestRegressionExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Random Forest regression algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.BOSTON_HOUSE_PRICES); + + AtomicInteger idx = new AtomicInteger(0); + RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer( + IntStream.range(0, dataCache.get(1).size() - 1).mapToObj( + x -> new FeatureMeta("", idx.getAndIncrement(), false)).collect(Collectors.toList()) + ).withAmountOfTrees(101) + .withFeaturesCountSelectionStrgy(FeaturesCountSelectionStrategies.ONE_THIRD) + .withMaxDepth(4) + .withMinImpurityDelta(0.) + .withSubSampleSize(0.3) + .withSeed(0); + + trainer.withEnvironmentBuilder(LearningEnvironmentBuilder.defaultBuilder() + .withParallelismStrategyTypeDependency(ParallelismStrategy.ON_DEFAULT_POOL) + .withLoggingFactoryDependency(ConsoleLogger.Factory.LOW) + ); + + System.out.println("\n>>> Configured trainer: " + trainer.getClass().getSimpleName()); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + RandomForestModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported Random Forest regression model: " + mdl.toString(true)); + + double mae = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Mean absolute error (MAE) for exported Random Forest regression model " + mae); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + RandomForestModel modelImportedFromJSON = RandomForestModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Exported Random Forest regression model: " + modelImportedFromJSON.toString(true)); + + mae = evaluateModel(dataCache, modelImportedFromJSON); + + System.out.println("\n>>> Mean absolute error (MAE) for exported Random Forest regression model " + mae); + + System.out.println("\n>>> Random Forest regression algorithm over cached dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static double evaluateModel(IgniteCache dataCache, RandomForestModel randomForestMdl) { + double mae = 0.0; + int totalAmount = 0; + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + Vector val = observation.getValue(); + Vector inputs = val.copyOfRange(1, val.size()); + double groundTruth = val.get(0); + + double prediction = randomForestMdl.predict(inputs); + + mae += Math.abs(prediction - groundTruth); + + totalAmount++; + } + + mae /= totalAmount; + } + return mae; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/SVMExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/SVMExportImportExample.java new file mode 100644 index 00000000000000..24262901b88950 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/SVMExportImportExample.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; +import org.apache.ignite.ml.svm.SVMLinearClassificationModel; +import org.apache.ignite.ml.svm.SVMLinearClassificationTrainer; + +/** + * Run SVM binary-class classification model ({@link SVMLinearClassificationModel}) over distributed dataset. + *

+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

+ *

+ * After that it trains the model based on the specified data using KMeans algorithm.

+ *

+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster does + * this point belong to, compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

+ *

+ * You can change the test data used in this example and re-run it to explore this algorithm further.

+ */ +public class SVMExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> SVM Binary classification model over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + SVMLinearClassificationTrainer trainer = new SVMLinearClassificationTrainer(); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + SVMLinearClassificationModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported SVM model: " + mdl); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported SVM model: " + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + SVMLinearClassificationModel modelImportedFromJSON = SVMLinearClassificationModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported SVM model: " + modelImportedFromJSON); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported SVM model: " + accuracy); + + System.out.println("\n>>> SVM Binary classification model over cache based dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java index 3340ed9d33e0a8..d03bb966f6a7d5 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java @@ -34,7 +34,7 @@ import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.sparkmodelparser.SparkModelParser; import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Run Decision Tree model loaded from snappy.parquet file. The snappy.parquet file was generated by Spark MLLib @@ -69,7 +69,7 @@ public static void main(String[] args) throws FileNotFoundException { final Vectorizer vectorizer = new DummyVectorizer(0, 5, 6, 4).labeled(1); - DecisionTreeNode mdl = (DecisionTreeNode)SparkModelParser.parse( + DecisionTreeModel mdl = (DecisionTreeModel)SparkModelParser.parse( SPARK_MDL_PATH, SupportedSparkModels.DECISION_TREE, env diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java index 9c36198b2cf389..5fd446140f38af 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java @@ -35,7 +35,7 @@ import org.apache.ignite.ml.sparkmodelparser.SparkModelParser; import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels; import org.apache.ignite.ml.structures.LabeledVector; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Run Decision tree regression model loaded from snappy.parquet file. The snappy.parquet file was generated by Spark @@ -69,7 +69,7 @@ public static void main(String[] args) throws FileNotFoundException { final Vectorizer vectorizer = new DummyVectorizer(0, 1, 5, 6).labeled(4); - DecisionTreeNode mdl = (DecisionTreeNode)SparkModelParser.parse( + DecisionTreeModel mdl = (DecisionTreeModel)SparkModelParser.parse( SPARK_MDL_PATH, SupportedSparkModels.DECISION_TREE_REGRESSION, env diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java index c24091c253b392..233cb13b9135b6 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example that shows how to use String Encoder preprocessor to encode features presented as a strings. @@ -73,7 +73,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, encoderPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java index d9482a5123477a..7270b03e4016f1 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java @@ -32,7 +32,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example that shows how to combine together two preprocessors: String Encoder preprocessor to encode features presented as a strings @@ -80,7 +80,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, normalizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java index d97c49c78411a4..3547d7e20106c4 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example that shows how to use Label Encoder preprocessor to encode labels presented as a strings. @@ -79,7 +79,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, lbEncoderPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java index 511eb0501c1817..c572d81038741a 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java @@ -105,7 +105,7 @@ public static void main(String[] args) throws IOException { private static String toString(LinearRegressionModel mdl) { BiFunction formatter = (idx, val) -> String.format("%.2f*f%d", val, idx); - Vector weights = mdl.getWeights(); + Vector weights = mdl.weights(); StringBuilder sb = new StringBuilder(formatter.apply(0, weights.get(0))); for (int fid = 1; fid < weights.size(); fid++) { @@ -114,7 +114,7 @@ private static String toString(LinearRegressionModel mdl) { .append(formatter.apply(fid, Math.abs(w))); } - double intercept = mdl.getIntercept(); + double intercept = mdl.intercept(); sb.append(" ").append(intercept > 0 ? "+" : "-").append(" ") .append(String.format("%.2f", Math.abs(intercept))); return sb.toString(); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java index e6a4461ca64e1d..93dc0513ebf801 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java @@ -30,7 +30,7 @@ import org.apache.ignite.ml.selection.scoring.metric.MetricName; import org.apache.ignite.ml.structures.LabeledVector; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Run decision tree classification with @@ -75,7 +75,7 @@ public static void main(String... args) { LabeledDummyVectorizer vectorizer = new LabeledDummyVectorizer<>(); - CrossValidation> scoreCalculator + CrossValidation> scoreCalculator = new CrossValidation<>(); double[] accuracyScores = scoreCalculator diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java index 543e211f06b3f1..68058b75b9eb32 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java @@ -30,7 +30,7 @@ import org.apache.ignite.ml.sql.SQLFunctions; import org.apache.ignite.ml.sql.SqlDatasetBuilder; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import static org.apache.ignite.examples.ml.sql.DecisionTreeClassificationTrainerSQLTableExample.loadTitanicDatasets; @@ -101,7 +101,7 @@ public static void main(String[] args) throws IOException { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); System.out.println(">>> Perform training..."); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( new SqlDatasetBuilder(ignite, "SQL_PUBLIC_TITANIC_TRAIN"), new BinaryObjectVectorizer<>("pclass", "age", "sibsp", "parch", "fare") .withFeature("sex", BinaryObjectVectorizer.Mapping.create().map("male", 1.0).defaultValue(0.0)) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java index 083608ee23e674..d05d1a91d814f7 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java @@ -34,7 +34,7 @@ import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.sql.SqlDatasetBuilder; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example of using distributed {@link DecisionTreeClassificationTrainer} on a data stored in SQL table. @@ -101,7 +101,7 @@ public static void main(String[] args) throws IgniteCheckedException, IOExceptio DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); System.out.println(">>> Perform training..."); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( new SqlDatasetBuilder(ignite, "SQL_PUBLIC_TITANIC_TRAIN"), new BinaryObjectVectorizer<>("pclass", "age", "sibsp", "parch", "fare") .withFeature("sex", BinaryObjectVectorizer.Mapping.create().map("male", 1.0).defaultValue(0.0)) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java index 600f4a595e0a4e..b1cf23e2de2dd2 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java @@ -28,7 +28,7 @@ import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.structures.LabeledVector; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example of using distributed {@link DecisionTreeClassificationTrainer}. @@ -75,7 +75,7 @@ public static void main(String... args) { // Train decision tree model. LabeledDummyVectorizer vectorizer = new LabeledDummyVectorizer<>(); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, trainingSet, vectorizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java index 1a1977124879b8..5cfb828c8bd870 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java @@ -25,7 +25,7 @@ import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.structures.LabeledVector; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import org.apache.ignite.ml.tree.DecisionTreeRegressionTrainer; /** @@ -70,7 +70,7 @@ public static void main(String... args) { DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(10, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit(ignite, trainingSet, new LabeledDummyVectorizer<>()); + DecisionTreeModel mdl = trainer.fit(ignite, trainingSet, new LabeledDummyVectorizer<>()); System.out.println(">>> Decision tree regression model: " + mdl); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java index a2eaf47d46636d..7e6c5d3f723d67 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java @@ -22,12 +22,12 @@ import org.apache.ignite.Ignition; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; -import org.apache.ignite.ml.trainers.DatasetTrainer; import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; import org.jetbrains.annotations.NotNull; @@ -58,11 +58,11 @@ public static void main(String... args) { trainingSet = fillTrainingData(ignite, trainingSetCfg); // Create classification trainer. - DatasetTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.) + GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.) .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.1)); // Train decision tree model. - ModelsComposition mdl = trainer.fit( + GDBModel mdl = trainer.fit( ignite, trainingSet, new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java index 09dd708b0bb891..a6ea135aa8b5db 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java @@ -22,14 +22,12 @@ import org.apache.ignite.Ignition; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; -import org.apache.ignite.ml.inference.Model; -import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; -import org.apache.ignite.ml.trainers.DatasetTrainer; import org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer; import org.jetbrains.annotations.NotNull; @@ -60,11 +58,11 @@ public static void main(String... args) { trainingSet = fillTrainingData(ignite, trainingSetCfg); // Create regression trainer. - DatasetTrainer trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.) + GDBTrainer trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.) .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.001)); // Train decision tree model. - Model mdl = trainer.fit( + GDBModel mdl = trainer.fit( ignite, trainingSet, new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java index b9006f536505f8..b8e1d00f690dfa 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java @@ -21,7 +21,8 @@ import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; -import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; import org.apache.ignite.ml.composition.boosting.convergence.median.MedianOfMedianConvergenceCheckerFactory; import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; @@ -36,7 +37,6 @@ import org.apache.ignite.ml.selection.scoring.metric.MetricName; import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; -import org.apache.ignite.ml.trainers.DatasetTrainer; import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; /** @@ -102,11 +102,11 @@ public static void main(String[] args) { ); // Create classification trainer. - DatasetTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(0.5, 500, 4, 0.) + GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(0.5, 500, 4, 0.) .withCheckConvergenceStgyFactory(new MedianOfMedianConvergenceCheckerFactory(0.1)); // Train decision tree model. - ModelsComposition mdl = trainer.fit( + GDBModel mdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java index b6df5d68a637a9..97ccb5835a721a 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java @@ -27,7 +27,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Usage of {@link DecisionTreeClassificationTrainer} to predict death in the disaster. @@ -56,7 +56,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, vectorizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java index 094a966e350864..a020dbea669f36 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java @@ -29,7 +29,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Usage of {@link ImputerTrainer} to fill missed data ({@code Double.NaN}) values in the chosen columns. @@ -66,7 +66,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, vectorizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java index 68b05a46d8fd5a..c97ee387310e2b 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Let's add two categorial features "sex", "embarked" to predict more precisely than in {@link Step_1_Read_and_Learn}. @@ -80,7 +80,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, imputingPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java index 206d2dcaa06b5b..1355979d228faa 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Let's add two categorial features "sex", "embarked" to predict more precisely than in {@link @@ -83,7 +83,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, imputingPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java index 1d85a14ac74c22..f4763a1f2b66b4 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Add yet two numerical features "age", "fare" to improve our model over {@link Step_3_Categorial}. @@ -79,7 +79,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, imputingPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java index dfb6de0c7d5434..05d0137e05b2eb 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java @@ -33,7 +33,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * {@link MinMaxScalerTrainer} and {@link NormalizationTrainer} are used in this example due to different values @@ -97,7 +97,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, normalizationPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java index e104c510b348a4..a60a8bac9812bf 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java @@ -35,7 +35,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * The highest accuracy in the previous example ({@link Step_6_KNN}) is the result of @@ -103,7 +103,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java index 0da797d06b55f2..20f4a7287b7bd2 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java @@ -38,7 +38,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation will be used in this example. @@ -126,7 +126,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(maxDeep, 0); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); double[] scores = scoreCalculator @@ -167,7 +167,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(bestMaxDeep, 0); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java index 5b6271414541e9..963e1b7fed1b0e 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java @@ -40,7 +40,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -119,7 +119,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -156,7 +156,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java index 6be849624382ae..1aa2d576946cf5 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java @@ -36,7 +36,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -91,7 +91,7 @@ public static void main(String[] args) { // Tune hyper-parameters with K-fold Cross-Validation on the split training set. - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java index d7e2f27aad9009..c489fc962bba77 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java @@ -42,7 +42,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -123,7 +123,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -166,7 +166,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java index 017f123674494c..b63bf9643be632 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java @@ -45,7 +45,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -126,7 +126,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -168,7 +168,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java index 3a3e9e8cdddfb5..ac6c1eb3c988a9 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java @@ -45,7 +45,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -125,7 +125,7 @@ public static void main(String[] args) { // Tune hyper-parameters with K-fold Cross-Validation on the split training set. DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -171,7 +171,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java index bee51e4b1e97f9..408eb48289c212 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java @@ -42,7 +42,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -123,7 +123,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -162,7 +162,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java index 34a8158dec6305..a9d39bd3092199 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java @@ -45,7 +45,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -126,7 +126,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -168,7 +168,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientAbstractBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientAbstractBenchmark.java new file mode 100644 index 00000000000000..6b6dc53dffd545 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientAbstractBenchmark.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.thin; + +import java.util.stream.IntStream; + +import org.apache.ignite.Ignite; +import org.apache.ignite.Ignition; +import org.apache.ignite.client.ClientCache; +import org.apache.ignite.client.IgniteClient; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.benchmarks.jmh.JmhAbstractBenchmark; +import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; + +/** + * Base class for thin client benchmarks. + */ +@State(Scope.Benchmark) +public abstract class JmhThinClientAbstractBenchmark extends JmhAbstractBenchmark { + /** Property: nodes count. */ + protected static final String PROP_DATA_NODES = "ignite.jmh.thin.dataNodes"; + + /** Default amount of nodes. */ + protected static final int DFLT_DATA_NODES = 4; + + /** Items count. */ + protected static final int CNT = 1000; + + /** Cache value. */ + protected static final byte[] PAYLOAD = new byte[1000]; + + /** IP finder shared across nodes. */ + private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** Default cache name. */ + private static final String DEFAULT_CACHE_NAME = "default"; + + /** Target node. */ + protected Ignite node; + + /** Target cache. */ + protected ClientCache cache; + + /** Thin client. */ + protected IgniteClient client; + + /** + * Setup routine. Child classes must invoke this method first. + * + */ + @Setup + public void setup() { + System.out.println(); + System.out.println("--------------------"); + System.out.println("IGNITE BENCHMARK INFO: "); + System.out.println("\tdata nodes: " + intProperty(PROP_DATA_NODES, DFLT_DATA_NODES)); + System.out.println("--------------------"); + System.out.println(); + + int nodesCnt = intProperty(PROP_DATA_NODES, DFLT_DATA_NODES); + + A.ensure(nodesCnt >= 1, "nodesCnt >= 1"); + + node = Ignition.start(configuration("node0")); + + for (int i = 1; i < nodesCnt; i++) + Ignition.start(configuration("node" + i)); + + String[] addrs = IntStream + .range(10800, 10800 + nodesCnt) + .mapToObj(p -> "127.0.0.1:" + p) + .toArray(String[]::new); + + ClientConfiguration cfg = new ClientConfiguration() + .setAddresses(addrs) + .setPartitionAwarenessEnabled(true); + + client = Ignition.startClient(cfg); + + cache = client.getOrCreateCache(DEFAULT_CACHE_NAME); + + System.out.println("Loading test data..."); + + for (int i = 0; i < CNT; i++) + cache.put(i, PAYLOAD); + + System.out.println("Test data loaded: " + CNT); + } + + /** + * Tear down routine. + * + */ + @TearDown + public void tearDown() throws Exception { + client.close(); + Ignition.stopAll(true); + } + + /** + * Create Ignite configuration. + * + * @param igniteInstanceName Ignite instance name. + * @return Configuration. + */ + protected IgniteConfiguration configuration(String igniteInstanceName) { + + return new IgniteConfiguration() + .setIgniteInstanceName(igniteInstanceName) + .setLocalHost("127.0.0.1") + .setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER)); + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientCacheBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientCacheBenchmark.java new file mode 100644 index 00000000000000..88e6a87171d9c6 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientCacheBenchmark.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.thin; + +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +import org.apache.ignite.internal.benchmarks.jmh.runner.JmhIdeBenchmarkRunner; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Mode; + +/** + * Thin client cache benchmark. + * + * Results on i7-9700K, Ubuntu 20.04.1, JDK 1.8.0_275: + * Benchmark Mode Cnt Score Error Units + * JmhThinClientCacheBenchmark.get thrpt 10 92501.557 ± 1380.384 ops/s + * JmhThinClientCacheBenchmark.put thrpt 10 82907.446 ± 7572.537 ops/s + * + * JmhThinClientCacheBenchmark.get avgt 10 41.505 ± 1.018 us/op + * JmhThinClientCacheBenchmark.put avgt 10 44.623 ± 0.779 us/op + */ +public class JmhThinClientCacheBenchmark extends JmhThinClientAbstractBenchmark { + /** + * Cache put benchmark. + */ + @Benchmark + public void put() { + int key = ThreadLocalRandom.current().nextInt(CNT); + + cache.put(key, PAYLOAD); + } + + /** + * Cache get benchmark. + */ + @Benchmark + public Object get() { + int key = ThreadLocalRandom.current().nextInt(CNT); + + return cache.get(key); + } + + /** + * Run benchmarks. + * + * @param args Arguments. + * @throws Exception If failed. + */ + public static void main(String[] args) throws Exception { + JmhIdeBenchmarkRunner runner = JmhIdeBenchmarkRunner.create() + .forks(1) + .threads(4) + .benchmarks(JmhThinClientCacheBenchmark.class.getSimpleName()) + .jvmArguments("-Xms4g", "-Xmx4g"); + + runner + .benchmarkModes(Mode.Throughput) + .run(); + + runner + .benchmarkModes(Mode.AverageTime) + .outputTimeUnit(TimeUnit.MICROSECONDS) + .run(); + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java index 7a35430b36ea5a..af843cbbd5f74d 100644 --- a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java @@ -103,6 +103,11 @@ private static class FakeReuseList implements ReuseList { return pageId == null ? 0L : pageId; } + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return pageId; + } + /** {@inheritDoc} */ @Override public long recycledPagesCount() throws IgniteCheckedException { return deque.size(); @@ -186,6 +191,7 @@ protected static class TestTree extends BPlusTree { reuseList, new IOVersions<>(new LongInnerIO()), new IOVersions<>(new LongLeafIO()), + PageIdAllocator.FLAG_IDX, null, null ); diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/rest/GridProbeCommandTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/rest/GridProbeCommandTest.java new file mode 100644 index 00000000000000..af09544db0d57a --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/rest/GridProbeCommandTest.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.rest; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.rest.GridRestCommand; +import org.apache.ignite.internal.processors.rest.GridRestResponse; +import org.apache.ignite.internal.processors.rest.handlers.GridRestCommandHandler; +import org.apache.ignite.internal.processors.rest.handlers.probe.GridProbeCommandHandler; +import org.apache.ignite.internal.processors.rest.request.GridRestCacheRequest; +import org.apache.ignite.plugin.AbstractTestPluginProvider; +import org.apache.ignite.plugin.PluginProvider; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** + * Test whether REST probe command works correctly when kernal has started and vice versa. + */ +public class GridProbeCommandTest extends GridCommonAbstractTest { + /** */ + private static final int JETTY_PORT = 8080; + + /** */ + private CountDownLatch triggerRestCmdLatch = new CountDownLatch(1); + + /** */ + private CountDownLatch triggerPluginStartLatch = new CountDownLatch(1); + + /** */ + public static Map executeProbeRestRequest() throws IOException { + HttpURLConnection conn = (HttpURLConnection)(new URL("http://localhost:" + JETTY_PORT + "/ignite?cmd=probe").openConnection()); + conn.connect(); + + boolean isHTTP_OK = conn.getResponseCode() == HttpURLConnection.HTTP_OK; + + Map restResponse = null; + + try (InputStreamReader streamReader = new InputStreamReader(isHTTP_OK ? conn.getInputStream() : conn.getErrorStream())) { + + ObjectMapper objMapper = new ObjectMapper(); + restResponse = objMapper.readValue(streamReader, + new TypeReference>() { + }); + + log.info("probe command response is: " + restResponse); + + } + catch (Exception e) { + log.error("error executing probe rest command", e); + } + return restResponse; + + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + cfg.setConnectorConfiguration(new ConnectorConfiguration()); + + if (igniteInstanceName.equals("regular")) + return cfg; + else if (igniteInstanceName.equals("delayedStart")) { + PluginProvider delayedStartPluginProvider = new DelayedStartPluginProvider(triggerPluginStartLatch, triggerRestCmdLatch); + + cfg.setPluginProviders(new PluginProvider[] {delayedStartPluginProvider}); + } + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(false); + } + + /** + * Test for the REST probe command + * + * @throws Exception If failed. + */ + @Test + public void testRestProbeCommand() throws Exception { + startGrid("regular"); + + GridRestCommandHandler hnd = new GridProbeCommandHandler((grid("regular")).context()); + + GridRestCacheRequest req = new GridRestCacheRequest(); + req.command(GridRestCommand.PROBE); + + IgniteInternalFuture resp = hnd.handleAsync(req); + resp.get(); + + assertEquals(GridRestResponse.STATUS_SUCCESS, resp.result().getSuccessStatus()); + assertEquals("grid has started", resp.result().getResponse()); + + } + + /** + *

Test rest cmd=probe command given a non fully started kernal.

+ *

1. start the grid on a seperate thread w/a plugin that will keep it waiting, at a point after rest http + * processor is ready, until signaled to proceed.

+ *

2. when the grid.start() has reached the plugin init method(rest http processor has started now), issue a + * rest command against the non-fully started kernal.

+ *

3. validate that the probe cmd has returned the appropriate erroneous code and message.

+ *

4. stop the grid.

+ * + * @throws Exception If failed. + */ + @Test + public void testRestProbeCommandGridNotStarted() throws Exception { + new Thread(new Runnable() { + @Override public void run() { + try { + startGrid("delayedStart"); + } + catch (Exception e) { + log.error("error when starting delatedStart grid", e); + } + } + }).start(); + + Map probeRestCommandResponse; + + log.info("awaiting plugin handler latch"); + triggerPluginStartLatch.await(); + log.info("starting rest command url call"); + try { + probeRestCommandResponse = executeProbeRestRequest(); + log.info("finished rest command url call"); + } + finally { + triggerRestCmdLatch.countDown(); //make sure the grid shuts down + } + + assertTrue(probeRestCommandResponse.get("error").equals("grid has not started")); + assertEquals(GridRestResponse.SERVICE_UNAVAILABLE, probeRestCommandResponse.get("successStatus")); + } + + /** + *

Start a regular grid, issue a cmd=probe rest command, and validate restponse + * + * @throws Exception If failed. + */ + @Test + public void testRestProbeCommandGridStarted() throws Exception { + startGrid("regular"); + + Map probeRestCommandResponse; + + probeRestCommandResponse = executeProbeRestRequest(); + + assertTrue(probeRestCommandResponse.get("response").equals("grid has started")); + assertEquals(0, probeRestCommandResponse.get("successStatus")); + } + + /** + * This plugin awaits until it is given the signal to process -- thereby allowing an http request against a non + * fully started kernal. + */ + public static class DelayedStartPluginProvider extends AbstractTestPluginProvider { + /** */ + private CountDownLatch triggerRestCmd; + + /** */ + private CountDownLatch triggerPluginStart; + + /** */ + public DelayedStartPluginProvider(CountDownLatch triggerPluginStartLatch, + CountDownLatch triggerRestCmdLatch) { + this.triggerPluginStart = triggerPluginStartLatch; + this.triggerRestCmd = triggerRestCmdLatch; + } + + /** {@inheritDoc} */ + @Override public String name() { + return "DelayedStartPlugin"; + } + + /** {@inheritDoc} */ + @Override public void onIgniteStart() { + super.onIgniteStart(); + + triggerPluginStart.countDown(); + + log.info("awaiting rest command latch ..."); + + try { + triggerRestCmd.await(); + } + catch (InterruptedException e) { + log.error("error in custom plugin", e); + } + + log.info("finished awaiting rest command latch."); + } + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java index 8a3936b17abd14..7408f4e3e40e1c 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java @@ -42,6 +42,7 @@ import org.apache.ignite.internal.client.integration.ClientTcpSslMultiNodeSelfTest; import org.apache.ignite.internal.client.integration.ClientTcpSslSelfTest; import org.apache.ignite.internal.client.integration.ClientTcpUnreachableMultiNodeSelfTest; +import org.apache.ignite.internal.client.rest.GridProbeCommandTest; import org.apache.ignite.internal.client.router.ClientFailedInitSelfTest; import org.apache.ignite.internal.client.router.RouterFactorySelfTest; import org.apache.ignite.internal.client.router.TcpRouterMultiNodeSelfTest; @@ -134,6 +135,9 @@ ClientTcpUnreachableMultiNodeSelfTest.class, ClientPreferDirectSelfTest.class, + //Test REST probe cmd + GridProbeCommandTest.class, + // Test client with many nodes and in multithreaded scenarios ClientTcpMultiThreadedSelfTest.class, ClientTcpSslMultiThreadedSelfTest.class, diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java index 610d9008de37bf..4b2242e57006b1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.affinity.AffinityKey; @@ -28,6 +29,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; @@ -255,6 +257,38 @@ public void testCalculatedValue() throws Exception { assert cnt == 3; } + /** + * @throws Exception If failed. + */ + @Test + public void testWrongArgumentType() throws Exception { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = '2'")) { + assertFalse(rs.next()); + } + + // Check non-indexed field. + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + + // Check indexed field. + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = '2'")) { + assertFalse(rs.next()); + } + + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + } + /** * Person. */ @@ -264,7 +298,7 @@ private static class Person implements Serializable { private final int id; /** Name. */ - @QuerySqlField(index = false) + @QuerySqlField(index = true) private final String name; /** Age. */ diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java index c9ee62a75acf17..5975eb0e4e7660 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java @@ -720,6 +720,8 @@ public void testGetAllColumns() throws Exception { "SYS.CACHE_GROUPS.REBALANCE_DELAY.null.19", "SYS.CACHE_GROUPS.REBALANCE_ORDER.null.10", "SYS.CACHE_GROUPS.BACKUPS.null.10", + "SYS.INDEXES.CACHE_GROUP_ID.null.10", + "SYS.INDEXES.CACHE_GROUP_NAME.null.2147483647", "SYS.INDEXES.CACHE_ID.null.10", "SYS.INDEXES.CACHE_NAME.null.2147483647", "SYS.INDEXES.SCHEMA_NAME.null.2147483647", @@ -836,6 +838,8 @@ public void testGetAllColumns() throws Exception { "SYS.NODE_METRICS.RECEIVED_MESSAGES_COUNT.null.10", "SYS.NODE_METRICS.RECEIVED_BYTES_COUNT.null.19", "SYS.NODE_METRICS.OUTBOUND_MESSAGES_QUEUE.null.10", + "SYS.TABLES.CACHE_GROUP_ID.null.10", + "SYS.TABLES.CACHE_GROUP_NAME.null.2147483647", "SYS.TABLES.CACHE_ID.null.10", "SYS.TABLES.CACHE_NAME.null.2147483647", "SYS.TABLES.SCHEMA_NAME.null.2147483647", @@ -920,7 +924,7 @@ public void testGetAllColumns() throws Exception { "SYS.TRANSACTIONS.TOP_VER.null.2147483647", "SYS.TRANSACTIONS.KEYS_COUNT.null.10", "SYS.TRANSACTIONS.CACHE_IDS.null.2147483647", - "SYS.SCHEMAS.NAME.null.2147483647", + "SYS.SCHEMAS.SCHEMA_NAME.null.2147483647", "SYS.SCHEMAS.PREDEFINED.null.1", "SYS.VIEWS.NAME.null.2147483647", "SYS.VIEWS.DESCRIPTION.null.2147483647", diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/AbstractClientCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/AbstractClientCompatibilityTest.java new file mode 100644 index 00000000000000..f1ed37b818c16a --- /dev/null +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/AbstractClientCompatibilityTest.java @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.compatibility.clients; + +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; +import org.apache.ignite.Ignite; +import org.apache.ignite.compatibility.testframework.junits.Dependency; +import org.apache.ignite.compatibility.testframework.junits.IgniteCompatibilityAbstractTest; +import org.apache.ignite.compatibility.testframework.junits.IgniteCompatibilityNodeRunner; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteVersionUtils; +import org.apache.ignite.internal.util.GridJavaProcess; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.lang.IgniteProductVersion; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy; +import org.jetbrains.annotations.NotNull; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests that current client version can connect to the server with specified version and + * specified client version can connect to the current server version. + */ +@RunWith(Parameterized.class) +public abstract class AbstractClientCompatibilityTest extends IgniteCompatibilityAbstractTest { + /** Version 2.5.0. */ + protected static final IgniteProductVersion VER_2_5_0 = IgniteProductVersion.fromString("2.5.0"); + + /** Version 2.7.0. */ + protected static final IgniteProductVersion VER_2_7_0 = IgniteProductVersion.fromString("2.7.0"); + + /** Version 2.8.0. */ + protected static final IgniteProductVersion VER_2_8_0 = IgniteProductVersion.fromString("2.8.0"); + + /** Version 2.9.0. */ + protected static final IgniteProductVersion VER_2_9_0 = IgniteProductVersion.fromString("2.9.0"); + + /** Ignite versions to test. Note: Only released versions or current version should be included to this list. */ + protected static final String[] TESTED_IGNITE_VERSIONS = new String[] { + "2.4.0", + "2.5.0", + "2.6.0", + "2.7.0", + "2.7.5", + "2.7.6", + "2.8.0", + "2.8.1", + "2.9.0", + IgniteVersionUtils.VER_STR + }; + + /** Parameters. */ + @Parameterized.Parameters(name = "Version {0}") + public static Iterable versions() { + return Arrays.stream(TESTED_IGNITE_VERSIONS) + .map(v -> new Object[] {v}) + .collect(Collectors.toList()); + } + + /** Old Ignite version. */ + @Parameterized.Parameter + public String verFormatted; + + /** */ + protected IgniteProductVersion ver; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + ver = IgniteProductVersion.fromString(verFormatted); + } + + /** {@inheritDoc} */ + @Override protected @NotNull Collection getDependencies(String igniteVer) { + Collection dependencies = super.getDependencies(igniteVer); + + dependencies.add(new Dependency("indexing", "ignite-indexing", false)); + + // Add corresponding H2 version. + if (ver.compareTo(VER_2_7_0) < 0) + dependencies.add(new Dependency("h2", "com.h2database", "h2", "1.4.195", false)); + + return dependencies; + } + + /** + * @throws Exception If failed. + */ + @Test + public void testOldClientToCurrentServer() throws Exception { + try (Ignite ignite = startGrid(0)) { + initNode(ignite); + + if (verFormatted.equals(IgniteVersionUtils.VER_STR)) + testClient(verFormatted); + else { + String fileName = IgniteCompatibilityNodeRunner.storeToFile((IgniteInClosure)this::testClient); + + GridJavaProcess proc = GridJavaProcess.exec( + RemoteClientRunner.class.getName(), + IgniteVersionUtils.VER_STR + ' ' + fileName, + log, + log::info, + null, + null, + getProcessProxyJvmArgs(verFormatted), + null + ); + + try { + GridTestUtils.waitForCondition(() -> !proc.getProcess().isAlive(), 5_000L); + + assertEquals(0, proc.getProcess().exitValue()); + } + finally { + if (proc.getProcess().isAlive()) + proc.kill(); + } + } + } + } + + /** + * @throws Exception If failed. + */ + @Test + public void testCurrentClientToOldServer() throws Exception { + IgniteProcessProxy proxy = null; + + try { + if (verFormatted.equals(IgniteVersionUtils.VER_STR)) { + Ignite ignite = startGrid(0); + + initNode(ignite); + } + else { + Ignite ignite = startGrid(1, verFormatted, this::processRemoteConfiguration, this::initNode); + + proxy = IgniteProcessProxy.ignite(ignite.name()); + } + + testClient(verFormatted); + } + finally { + stopAllGrids(); + + if (proxy != null) { + Process proc = proxy.getProcess().getProcess(); + + // We should wait until process exits, or it can affect next tests. + assertTrue(GridTestUtils.waitForCondition(() -> !proc.isAlive(), 5_000L)); + } + } + } + + /** + * Method to initiate server node (node can be local or remote). + * + * @param ignite Ignite. + */ + protected void initNode(Ignite ignite) { + // No-op. + } + + /** + * Method to change remote server node configuration. + * + * @param cfg Ignite configuraion. + */ + protected void processRemoteConfiguration(IgniteConfiguration cfg) { + cfg.setLocalHost("127.0.0.1"); + cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(new TcpDiscoveryVmIpFinder(true))); + } + + /** + * Method to test client operations (client can be local or remote). + * + * @param clientVer Client version. + * @param serverVer Server version. + */ + protected abstract void testClient(IgniteProductVersion clientVer, IgniteProductVersion serverVer) + throws Exception; + + /** + * @param serverVer Server version. + */ + private void testClient(String serverVer) { + try { + IgniteProductVersion clientVer = IgniteVersionUtils.VER; + + X.println(">>> Started client test [clientVer=" + clientVer + ", serverVer=" + serverVer + ']'); + + testClient(clientVer, IgniteProductVersion.fromString(serverVer)); + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Runner class to test client operations from remote JVM process with old Ignite version + * as dependencies in class path. + */ + public static class RemoteClientRunner { + /** */ + public static void main(String[] args) throws Exception { + X.println(GridJavaProcess.PID_MSG_PREFIX + U.jvmPid()); + X.println("Start client connection with Ignite version: " + IgniteVersionUtils.VER); + + if (args.length < 2) + throw new IllegalArgumentException("At least 2 arguments expected: [version] [path/to/closure/file]"); + + String ver = args[0]; + String fileName = args[1]; + + IgniteInClosure clo = IgniteCompatibilityNodeRunner.readClosureFromFileAndDelete(fileName); + + clo.apply(ver); + + X.println("Success"); + } + } +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JavaThinCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JavaThinCompatibilityTest.java new file mode 100644 index 00000000000000..0b870bf7e0dc97 --- /dev/null +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JavaThinCompatibilityTest.java @@ -0,0 +1,415 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.compatibility.clients; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.cache.Cache; +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteBinary; +import org.apache.ignite.IgniteException; +import org.apache.ignite.Ignition; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.client.ClientCache; +import org.apache.ignite.client.ClientCacheConfiguration; +import org.apache.ignite.client.ClientTransaction; +import org.apache.ignite.client.IgniteClient; +import org.apache.ignite.client.Person; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.compute.ComputeJob; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.compute.ComputeTaskAdapter; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.ThinClientConfiguration; +import org.apache.ignite.internal.processors.platform.cache.expiry.PlatformExpiryPolicy; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.lang.IgniteProductVersion; +import org.apache.ignite.services.Service; +import org.apache.ignite.services.ServiceContext; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.junit.Assume; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests java thin client compatibility. This test only checks that thin client can perform basic operations with + * different client and server versions. Whole API not checked, corner cases not checked. + */ +@RunWith(Parameterized.class) +public class JavaThinCompatibilityTest extends AbstractClientCompatibilityTest { + /** Thin client endpoint. */ + private static final String ADDR = "127.0.0.1:10800"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName).setClientConnectorConfiguration( + new ClientConnectorConfiguration().setThinClientConfiguration( + new ThinClientConfiguration().setMaxActiveComputeTasksPerConnection(1) + ) + ); + } + + /** {@inheritDoc} */ + @Override protected void initNode(Ignite ignite) { + ignite.services().deployNodeSingleton("test_service", new EchoService()); + + super.initNode(ignite); + } + + /** {@inheritDoc} */ + @Override protected void processRemoteConfiguration(IgniteConfiguration cfg) { + super.processRemoteConfiguration(cfg); + + if (ver.compareTo(VER_2_9_0) >= 0) { + cfg.setClientConnectorConfiguration(new ClientConnectorConfiguration() + .setThinClientConfiguration(new ThinClientConfiguration() + .setMaxActiveComputeTasksPerConnection(1))); + } + } + + /** {@inheritDoc} */ + @Override public void testOldClientToCurrentServer() throws Exception { + Assume.assumeTrue("Java thin client exists only from 2.5.0 release", ver.compareTo(VER_2_5_0) >= 0); + + super.testOldClientToCurrentServer(); + } + + /** */ + private void testCacheConfiguration( + boolean checkFieldsPrecessionAndScale, + boolean checkExpiryPlc + ) throws Exception { + X.println(">>>> Testing cache configuration"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + String cacheName = "testCacheConfiguration"; + + ClientCacheConfiguration ccfg = new ClientCacheConfiguration(); + ccfg.setName(cacheName); + ccfg.setBackups(3); + ccfg.setGroupName("cache"); + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity qryEntity = new QueryEntity(int.class.getName(), "Entity") + .setTableName("ENTITY") + .setFields(new LinkedHashMap<>( + F.asMap("id", Integer.class.getName(), "rate", Double.class.getName()))); + + if (checkFieldsPrecessionAndScale) { + qryEntity.setFieldsPrecision(F.asMap("rate", 5)); + qryEntity.setFieldsScale(F.asMap("rate", 2)); + } + + ccfg.setQueryEntities(qryEntity); + + if (checkExpiryPlc) + ccfg.setExpiryPolicy(new PlatformExpiryPolicy(10, 20, 30)); + + client.createCache(ccfg); + + ClientCacheConfiguration ccfg1 = client.cache(cacheName).getConfiguration(); + + assertEquals(ccfg.getName(), ccfg1.getName()); + assertEquals(ccfg.getBackups(), ccfg1.getBackups()); + assertEquals(ccfg.getGroupName(), ccfg1.getGroupName()); + assertEquals(ccfg.getCacheMode(), ccfg1.getCacheMode()); + assertEquals(ccfg.getQueryEntities().length, ccfg1.getQueryEntities().length); + assertEquals(ccfg.getQueryEntities()[0].getTableName(), ccfg1.getQueryEntities()[0].getTableName()); + assertEquals(ccfg.getQueryEntities()[0].getFields(), ccfg1.getQueryEntities()[0].getFields()); + + if (checkFieldsPrecessionAndScale) { + assertEquals(ccfg.getQueryEntities()[0].getFieldsPrecision(), + ccfg1.getQueryEntities()[0].getFieldsPrecision()); + assertEquals(ccfg.getQueryEntities()[0].getFieldsScale(), ccfg1.getQueryEntities()[0].getFieldsScale()); + } + + if (checkExpiryPlc) { + assertEquals(ccfg.getExpiryPolicy().getExpiryForCreation(), + ccfg1.getExpiryPolicy().getExpiryForCreation()); + assertEquals(ccfg.getExpiryPolicy().getExpiryForAccess(), ccfg1.getExpiryPolicy().getExpiryForAccess()); + assertEquals(ccfg.getExpiryPolicy().getExpiryForUpdate(), ccfg1.getExpiryPolicy().getExpiryForUpdate()); + } + } + } + + /** */ + private void testCacheApi() throws Exception { + X.println(">>>> Testing cache API"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache("testCacheApi"); + + cache.put(1, 1); + + assertEquals(1, cache.get(1)); + + Person person = new Person(2, "name"); + + cache.put(2, person); + + assertEquals(person, cache.get(2)); + } + } + + /** */ + private void testAuthentication() throws Exception { + X.println(">>>> Testing authentication"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR) + .setUserName("user").setUserPassword("password"))) { + assertNotNull(client); + } + + } + + /** */ + private void testTransactions() throws Exception { + X.println(">>>> Testing transactions"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache(new ClientCacheConfiguration() + .setName("testTransactions") + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) + ); + + try (ClientTransaction tx = client.transactions().txStart()) { + cache.put(1, 1); + cache.put(2, 2); + + tx.commit(); + } + + assertEquals(1, cache.get(1)); + assertEquals(2, cache.get(2)); + } + } + + /** */ + private void testBinary() throws Exception { + X.println(">>>> Testing binary"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + IgniteBinary binary = client.binary(); + + BinaryObject val = binary.builder("Person") + .setField("id", 1, int.class) + .setField("name", "Joe", String.class) + .build(); + + ClientCache cache = client.getOrCreateCache("testBinary").withKeepBinary(); + + cache.put(0, val); + + BinaryObject cachedVal = cache.get(0); + + assertEquals(val, cachedVal); + } + } + + /** */ + private void testQueries() throws Exception { + X.println(">>>> Testing queries"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache("testQueries"); + + cache.put(1, 1); + + List> res = cache.query(new ScanQuery<>()).getAll(); + + assertEquals(1, res.size()); + assertEquals(1, res.get(0).getKey()); + assertEquals(1, res.get(0).getValue()); + } + } + + /** */ + private void testExpiryPolicy() throws Exception { + X.println(">>>> Testing expiry policy"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache("testExpiryPolicy"); + cache = cache.withExpirePolicy(new CreatedExpiryPolicy(new Duration(TimeUnit.MILLISECONDS, 1))); + + cache.put(1, 1); + + doSleep(10); + + assertFalse(cache.containsKey(1)); + } + } + + /** */ + private void testUserAttributes() throws Exception { + X.println(">>>> Testing user attributes"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR) + .setUserAttributes(F.asMap("attr", "val")))) { + assertNotNull(client); + } + } + + /** */ + private void testClusterAPI() throws Exception { + X.println(">>>> Testing cluster API"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertTrue(client.cluster().state().active()); + } + } + + /** */ + private void testClusterGroups() throws Exception { + X.println(">>>> Testing cluster groups"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertEquals(1, client.cluster().forServers().nodes().size()); + } + } + + /** */ + private void testCompute() throws Exception { + X.println(">>>> Testing compute"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertEquals((Integer)1, client.compute().execute(EchoTask.class.getName(), 1)); + } + } + + /** */ + private void testServices() throws Exception { + X.println(">>>> Testing services"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertEquals(1, client.services().serviceProxy("test_service", EchoServiceInterface.class) + .echo(1)); + } + } + + /** {@inheritDoc} */ + @Override protected void testClient(IgniteProductVersion clientVer, IgniteProductVersion serverVer) throws Exception { + IgniteProductVersion minVer = clientVer.compareTo(serverVer) < 0 ? clientVer : serverVer; + + testCacheConfiguration( + minVer.compareTo(VER_2_7_0) >= 0, + minVer.compareTo(VER_2_8_0) >= 0 + ); + + testCacheApi(); + + testBinary(); + + testQueries(); + + if (minVer.compareTo(VER_2_5_0) >= 0) + testAuthentication(); + + if (minVer.compareTo(VER_2_8_0) >= 0) { + testTransactions(); + testExpiryPolicy(); + } + + if (clientVer.compareTo(VER_2_9_0) >= 0 && serverVer.compareTo(VER_2_8_0) >= 0) + testClusterAPI(); + + if (minVer.compareTo(VER_2_9_0) >= 0) { + testUserAttributes(); + testClusterGroups(); + testCompute(); + testServices(); + } + } + + /** */ + public static interface EchoServiceInterface { + /** */ + public int echo(int val); + } + + /** */ + public static class EchoService implements Service, EchoServiceInterface { + /** {@inheritDoc} */ + @Override public void cancel(ServiceContext ctx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void init(ServiceContext ctx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void execute(ServiceContext ctx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public int echo(int val) { + return val; + } + } + + /** */ + public static class EchoJob implements ComputeJob { + /** Value. */ + private final Integer val; + + /** + * @param val Value. + */ + public EchoJob(Integer val) { + this.val = val; + } + + /** {@inheritDoc} */ + @Override public void cancel() { + // No-op. + } + + /** {@inheritDoc} */ + @Override public Object execute() throws IgniteException { + return val; + } + } + + /** */ + public static class EchoTask extends ComputeTaskAdapter { + /** {@inheritDoc} */ + @Override public @NotNull Map map(List subgrid, + @Nullable Integer arg) throws IgniteException { + return F.asMap(new EchoJob(arg), subgrid.get(0)); + } + + /** {@inheritDoc} */ + @Nullable @Override public Integer reduce(List results) throws IgniteException { + return results.get(0).getData(); + } + } +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JdbcThinCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JdbcThinCompatibilityTest.java new file mode 100644 index 00000000000000..857df9f8a91e73 --- /dev/null +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JdbcThinCompatibilityTest.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.compatibility.clients; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.Statement; +import org.apache.ignite.Ignite; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.lang.IgniteProductVersion; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests JDBC thin compatibility. + */ +@RunWith(Parameterized.class) +public class JdbcThinCompatibilityTest extends AbstractClientCompatibilityTest { + /** Table name. */ + private static final String TABLE_NAME = "test_table"; + + /** URL. */ + private static final String URL = "jdbc:ignite:thin://127.0.0.1"; + + /** Rows count. */ + private static final int ROWS_CNT = 10; + + /** Execute sql. */ + private static void executeSql(IgniteEx igniteEx, String sql) { + igniteEx.context().query().querySqlFields(new SqlFieldsQuery(sql), false).getAll(); + } + + /** {@inheritDoc} */ + @Override protected void initNode(Ignite ignite) { + IgniteEx igniteEx = (IgniteEx)ignite; + + executeSql(igniteEx, "CREATE TABLE " + TABLE_NAME + " (id int primary key, name varchar)"); + + for (int i = 0; i < ROWS_CNT; i++) + executeSql(igniteEx, "INSERT INTO " + TABLE_NAME + " (id, name) VALUES(" + i + ", 'name" + i + "')"); + } + + /** {@inheritDoc} */ + @Override protected void testClient(IgniteProductVersion clientVer, IgniteProductVersion serverVer) throws Exception { + try (Connection conn = DriverManager.getConnection(URL); Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("SELECT id, name FROM " + TABLE_NAME + " ORDER BY id"); + + assertNotNull(rs); + + int cnt = 0; + + while (rs.next()) { + int id = rs.getInt("id"); + String name = rs.getString("name"); + + assertEquals(cnt, id); + assertEquals("name" + cnt, name); + + cnt++; + } + + assertEquals(ROWS_CNT, cnt); + } + } +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/package-info.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/package-info.java similarity index 87% rename from modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/package-info.java rename to modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/package-info.java index 50d961b28f5fd8..08c36dcb06cc8d 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/package-info.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/package-info.java @@ -16,7 +16,7 @@ */ /** - * Contains compatibility tests related to JDBC. + * Contains compatibility tests related to different clients. */ -package org.apache.ignite.compatibility.jdbc; +package org.apache.ignite.compatibility.clients; diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/JdbcThinCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/JdbcThinCompatibilityTest.java deleted file mode 100644 index 50254e6e6ea21e..00000000000000 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/JdbcThinCompatibilityTest.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.compatibility.jdbc; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.Statement; -import java.util.Arrays; -import java.util.Collection; -import org.apache.ignite.Ignite; -import org.apache.ignite.cache.query.SqlFieldsQuery; -import org.apache.ignite.compatibility.testframework.junits.Dependency; -import org.apache.ignite.compatibility.testframework.junits.IgniteCompatibilityAbstractTest; -import org.apache.ignite.internal.IgniteEx; -import org.apache.ignite.internal.IgniteVersionUtils; -import org.apache.ignite.internal.util.GridJavaProcess; -import org.apache.ignite.internal.util.typedef.X; -import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; -import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; -import org.apache.ignite.testframework.GridTestUtils; -import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy; -import org.jetbrains.annotations.NotNull; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Tests that current client version can connect to the server with specified version and - * specified client version can connect to the current server version. - */ -@RunWith(Parameterized.class) -public class JdbcThinCompatibilityTest extends IgniteCompatibilityAbstractTest { - /** Table name. */ - private static final String TABLE_NAME = "test_table"; - - /** URL. */ - private static final String URL = "jdbc:ignite:thin://127.0.0.1"; - - /** Rows count. */ - private static final int ROWS_CNT = 10; - - /** Parameters. */ - @Parameterized.Parameters(name = "Version {0}") - public static Iterable versions() { - return Arrays.asList( - new String[] {"2.7.0"}, - new String[] {"2.7.5"}, - new String[] {"2.7.6"}, - new String[] {"2.8.0"}, - new String[] {"2.8.1"} - ); - } - - /** Old Ignite version. */ - @Parameterized.Parameter - public String ver; - - /** {@inheritDoc} */ - @Override protected @NotNull Collection getDependencies(String igniteVer) { - Collection dependencies = super.getDependencies(igniteVer); - - dependencies.add(new Dependency("indexing", "ignite-indexing", false)); - - return dependencies; - } - - /** - * @throws Exception If failed. - */ - @Test - public void testOldClientToCurrentServer() throws Exception { - try (Ignite ignite = startGrid(0)) { - initTable(ignite); - - GridJavaProcess proc = GridJavaProcess.exec( - JdbcThinQueryRunner.class.getName(), - null, - log, - log::info, - null, - null, - getProcessProxyJvmArgs(ver), - null - ); - - try { - GridTestUtils.waitForCondition(() -> !proc.getProcess().isAlive(), 5_000L); - - assertEquals(0, proc.getProcess().exitValue()); - } - finally { - if (proc.getProcess().isAlive()) - proc.kill(); - } - } - } - - /** - * @throws Exception If failed. - */ - @Test - public void testCurrentClientToOldServer() throws Exception { - IgniteProcessProxy proxy = null; - - try { - Ignite ignite = startGrid(1, ver, - cfg -> cfg - .setLocalHost("127.0.0.1") - .setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(new TcpDiscoveryVmIpFinder(true))), - JdbcThinCompatibilityTest::initTable); - - proxy = IgniteProcessProxy.ignite(ignite.name()); - - testJdbcQuery(); - } - finally { - stopAllGrids(); - - if (proxy != null) { - Process proc = proxy.getProcess().getProcess(); - - // We should wait until process exits, or it can affect next tests. - GridTestUtils.waitForCondition(() -> !proc.isAlive(), 5_000L); - } - } - } - - /** Execute sql. */ - private static void executeSql(IgniteEx igniteEx, String sql) { - igniteEx.context().query().querySqlFields(new SqlFieldsQuery(sql), false).getAll(); - } - - /** */ - private static void initTable(Ignite ignite) { - IgniteEx igniteEx = (IgniteEx)ignite; - - executeSql(igniteEx, "CREATE TABLE " + TABLE_NAME + " (id int primary key, name varchar)"); - - for (int i = 0; i < ROWS_CNT; i++) - executeSql(igniteEx, "INSERT INTO " + TABLE_NAME + " (id, name) VALUES(" + i + ", 'name" + i + "')"); - } - - /** */ - private static void testJdbcQuery() throws Exception { - try (Connection conn = DriverManager.getConnection(URL); Statement stmt = conn.createStatement()) { - ResultSet rs = stmt.executeQuery("SELECT id, name FROM " + TABLE_NAME + " ORDER BY id"); - - assertNotNull(rs); - - int cnt = 0; - - while (rs.next()) { - int id = rs.getInt("id"); - String name = rs.getString("name"); - - assertEquals(cnt, id); - assertEquals("name" + cnt, name); - - cnt++; - } - - assertEquals(ROWS_CNT, cnt); - } - } - - /** - * Runner class to test query from remote JVM process with old Ignite version as dependencies in class path. - */ - public static class JdbcThinQueryRunner { - /** */ - public static void main(String[] args) throws Exception { - X.println(GridJavaProcess.PID_MSG_PREFIX + U.jvmPid()); - X.println("Start JDBC connection with Ignite version: " + IgniteVersionUtils.VER); - - testJdbcQuery(); - - X.println("Success"); - } - } -} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java index e3f693af1c5616..71340d6793fda0 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java @@ -45,6 +45,7 @@ /** * Super class for all compatibility tests. */ +@SuppressWarnings("TransientFieldInNonSerializableClass") public abstract class IgniteCompatibilityAbstractTest extends GridCommonAbstractTest { /** */ private static final ClassLoader CLASS_LOADER = IgniteCompatibilityAbstractTest.class.getClassLoader(); @@ -56,10 +57,10 @@ public abstract class IgniteCompatibilityAbstractTest extends GridCommonAbstract protected static final int NODE_JOIN_TIMEOUT = 30_000; /** Local JVM Ignite node. */ - protected Ignite locJvmInstance = null; + protected transient Ignite locJvmInstance = null; /** Remote JVM Ignite instance. */ - protected Ignite rmJvmInstance = null; + protected transient Ignite rmJvmInstance = null; /** {@inheritDoc} */ @Override protected boolean isMultiJvm() { diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java index 4da7401aba74b8..45821689cb9980 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java @@ -18,7 +18,8 @@ package org.apache.ignite.compatibility.testsuites; import org.apache.ignite.compatibility.cache.LocalCacheTest; -import org.apache.ignite.compatibility.jdbc.JdbcThinCompatibilityTest; +import org.apache.ignite.compatibility.clients.JavaThinCompatibilityTest; +import org.apache.ignite.compatibility.clients.JdbcThinCompatibilityTest; import org.apache.ignite.compatibility.persistence.FoldersReuseCompatibilityTest; import org.apache.ignite.compatibility.persistence.MetaStorageCompatibilityTest; import org.apache.ignite.compatibility.persistence.MigratingToWalV2SerializerWithCompactionTest; @@ -38,7 +39,8 @@ MetaStorageCompatibilityTest.class, LocalCacheTest.class, MoveBinaryMetadataCompatibility.class, - JdbcThinCompatibilityTest.class + JdbcThinCompatibilityTest.class, + JavaThinCompatibilityTest.class }) public class IgniteCompatibilityBasicTestSuite { } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/AbstractCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/AbstractCommand.java new file mode 100644 index 00000000000000..504961ed8e78d8 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/AbstractCommand.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; + +/** + * Abstract class for control.sh commands, that support verbose mode. + */ +public abstract class AbstractCommand implements Command { + /** Use verbose mode or not. */ + protected boolean verbose; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log, boolean verbose) throws Exception { + this.verbose = verbose; + return execute(clientCfg, log); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java index d1fb112ebf634e..d2d12ca88fcef0 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java @@ -32,7 +32,7 @@ * @deprecated Use {@link ClusterStateChangeCommand} instead. */ @Deprecated -public class ActivateCommand implements Command { +public class ActivateCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { Command.usage(logger, "Activate cluster (deprecated. Use " + SET_STATE.toString() + " instead):", ACTIVATE); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java index b377b711382d88..969542bd2879a5 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java @@ -18,12 +18,17 @@ package org.apache.ignite.internal.commandline; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; +import java.util.function.Function; import java.util.logging.Logger; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientNode; @@ -37,8 +42,10 @@ import org.apache.ignite.internal.visor.baseline.VisorBaselineTask; import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskArg; import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskResult; +import org.apache.ignite.internal.visor.util.VisorTaskUtils; import static java.lang.Boolean.TRUE; +import static java.util.Collections.singletonMap; import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; import static org.apache.ignite.internal.commandline.CommandList.BASELINE; import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; @@ -50,7 +57,7 @@ /** * Commands associated with baseline functionality. */ -public class BaselineCommand implements Command { +public class BaselineCommand extends AbstractCommand { /** Arguments. */ private BaselineArguments baselineArgs; @@ -58,7 +65,8 @@ public class BaselineCommand implements Command { @Override public void printUsage(Logger logger) { final String constistIds = "consistentId1[,consistentId2,....,consistentIdN]"; - Command.usage(logger, "Print cluster baseline topology:", BASELINE); + Command.usage(logger, "Print cluster baseline topology:", BASELINE, + singletonMap("verbose", "Show the full list of node ips."), optional("--verbose")); Command.usage(logger, "Add nodes into baseline topology:", BASELINE, BaselineSubcommands.ADD.text(), constistIds, optional(CMD_AUTO_CONFIRMATION)); Command.usage(logger, "Remove nodes from baseline topology:", BASELINE, BaselineSubcommands.REMOVE.text(), @@ -166,13 +174,38 @@ else if (res.getRemainingTimeToBaselineAdjust() < 0) Map srvs = res.getServers(); // if task runs on a node with VisorBaselineNode of old version (V1) we'll get order=null for all nodes. + Function extractFormattedAddrs = node -> { + Stream sortedByIpHosts = + Optional.ofNullable(node) + .map(addrs -> node.getAddrs()) + .orElse(Collections.emptyList()) + .stream() + .sorted(Comparator + .comparing(resolvedAddr -> new VisorTaskUtils.SortableAddress(resolvedAddr.address()))) + .map(resolvedAddr -> { + if (!resolvedAddr.hostname().equals(resolvedAddr.address())) + return resolvedAddr.hostname() + "/" + resolvedAddr.address(); + else + return resolvedAddr.address(); + }); + if (verbose) { + String hosts = String.join(",", sortedByIpHosts.collect(Collectors.toList())); + + if (!hosts.isEmpty()) + return ", Addresses=" + hosts; + else + return ""; + } else + return sortedByIpHosts.findFirst().map(ip -> ", Address=" + ip).orElse(""); + }; String crdStr = srvs.values().stream() // check for not null .filter(node -> node.getOrder() != null) .min(Comparator.comparing(VisorBaselineNode::getOrder)) // format - .map(crd -> " (Coordinator: ConsistentId=" + crd.getConsistentId() + ", Order=" + crd.getOrder() + ")") + .map(crd -> " (Coordinator: ConsistentId=" + crd.getConsistentId() + extractFormattedAddrs.apply(crd) + + ", Order=" + crd.getOrder() + ")") .orElse(""); logger.info("Current topology version: " + res.getTopologyVersion() + crdStr); @@ -190,7 +223,8 @@ else if (res.getRemainingTimeToBaselineAdjust() < 0) String order = srvNode != null ? ", Order=" + srvNode.getOrder() : ""; - logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + state + order); + logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + + extractFormattedAddrs.apply(srvNode) + state + order); } logger.info(DELIM); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java index 58e5f05a6d3dd8..f5b8c10218dac8 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java @@ -35,7 +35,7 @@ /** * Command to access cluster ID and tag functionality. */ -public class ClusterChangeTagCommand implements Command { +public class ClusterChangeTagCommand extends AbstractCommand { /** */ private static final String ERR_NO_NEW_TAG_PROVIDED = "Please provide new tag."; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java index 33580881f1e5ec..0e92cca2cb90f9 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java @@ -35,7 +35,7 @@ /** * Command to change cluster state. */ -public class ClusterStateChangeCommand implements Command { +public class ClusterStateChangeCommand extends AbstractCommand { /** Flag of forced cluster deactivation. */ static final String FORCE_COMMAND = "--force"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java index 12f85a0e072474..fe667b0033f56d 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java @@ -186,6 +186,22 @@ public static String extendToLen(String s, int targetLen) { */ public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception; + /** + * Actual command execution with verbose mode if needed. + * Implement it if your command supports verbose mode. + * + * @see Command#execute(GridClientConfiguration, Logger) + * + * @param clientCfg Thin client configuration if connection to cluster is necessary. + * @param logger Logger to use. + * @param verbose Use verbose mode or not + * @return Result of operation (mostly usable for tests). + * @throws Exception If error occur. + */ + default Object execute(GridClientConfiguration clientCfg, Logger logger, boolean verbose) throws Exception { + return execute(clientCfg, logger); + } + /** * Prepares confirmation for the command. * diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java index fbfc14a9483245..14c8799ecda704 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java @@ -46,6 +46,7 @@ import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.logger.java.JavaLoggerFileHandler; import org.apache.ignite.logger.java.JavaLoggerFormatter; @@ -266,10 +267,10 @@ public int execute(List rawArgs) { } logger.info("Command [" + commandName + "] started"); - logger.info("Arguments: " + String.join(" ", rawArgs)); + logger.info("Arguments: " + argumentsToString(rawArgs)); logger.info(DELIM); - lastOperationRes = command.execute(clientCfg, logger); + lastOperationRes = command.execute(clientCfg, logger, args.verbose()); break; } @@ -454,6 +455,36 @@ private boolean isConnectionClosedSilentlyException(Throwable e) { return false; } + /** + * Joins user's arguments and hides sensitive information. + * + * @param rawArgs Arguments which user has provided. + * @return String which could be shown in console and pritned to log. + */ + private String argumentsToString(List rawArgs) { + boolean hide = false; + + SB sb = new SB(); + + for (int i = 0; i < rawArgs.size(); i++) { + if (hide) { + sb.a("***** "); + + hide = false; + + continue; + } + + String arg = rawArgs.get(i); + + sb.a(arg).a(' '); + + hide = CommonArgParser.isSensitiveArgument(arg); + } + + return sb.toString(); + } + /** * Does one of three things: *

    diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java index 2daaf86b9e8a1c..f00a4c0606067b 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java @@ -19,7 +19,7 @@ import org.apache.ignite.internal.commandline.cache.CacheCommands; import org.apache.ignite.internal.commandline.diagnostic.DiagnosticCommand; -import org.apache.ignite.internal.commandline.encryption.EncryptionCommand; +import org.apache.ignite.internal.commandline.encryption.EncryptionCommands; import org.apache.ignite.internal.commandline.meta.MetadataCommand; import org.apache.ignite.internal.commandline.metric.MetricCommand; import org.apache.ignite.internal.commandline.property.PropertyCommand; @@ -59,7 +59,7 @@ public enum CommandList { DIAGNOSTIC("--diagnostic", new DiagnosticCommand()), /** Encryption features command. */ - ENCRYPTION("--encryption", new EncryptionCommand()), + ENCRYPTION("--encryption", new EncryptionCommands()), /** Kill command. */ KILL("--kill", new KillCommand()), @@ -89,7 +89,10 @@ public enum CommandList { SYSTEM_VIEW("--system-view", new SystemViewCommand()), /** Command for printing metric values. */ - METRIC("--metric", new MetricCommand()); + METRIC("--metric", new MetricCommand()), + + /** */ + PERSISTENCE("--persistence", new PersistenceCommand()); /** Private values copy so there's no need in cloning it every time. */ private static final CommandList[] VALUES = CommandList.values(); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java index 52e95f44af5e1c..e27179d7191f97 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java @@ -103,6 +103,9 @@ public class CommonArgParser { /** List of optional auxiliary commands. */ private static final Set AUX_COMMANDS = new HashSet<>(); + /** Set of sensitive arguments */ + private static final Set SENSITIVE_ARGUMENTS = new HashSet<>(); + static { AUX_COMMANDS.add(CMD_HOST); AUX_COMMANDS.add(CMD_PORT); @@ -127,6 +130,18 @@ public class CommonArgParser { AUX_COMMANDS.add(CMD_TRUSTSTORE); AUX_COMMANDS.add(CMD_TRUSTSTORE_PASSWORD); AUX_COMMANDS.add(CMD_TRUSTSTORE_TYPE); + + SENSITIVE_ARGUMENTS.add(CMD_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_KEYSTORE_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_TRUSTSTORE_PASSWORD); + } + + /** + * @param arg To check. + * @return True if provided argument is among sensitive one and not should be displayed. + */ + public static boolean isSensitiveArgument(String arg) { + return SENSITIVE_ARGUMENTS.contains(arg); } /** diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java index 10d0fe56235d31..750001662ce60c 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java @@ -33,7 +33,7 @@ * @deprecated Use {@link ClusterStateChangeCommand} instead. */ @Deprecated -public class DeactivateCommand implements Command { +public class DeactivateCommand extends AbstractCommand { /** Cluster name. */ private String clusterName; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/PersistenceCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/PersistenceCommand.java new file mode 100644 index 00000000000000..d41269a2a0fc78 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/PersistenceCommand.java @@ -0,0 +1,290 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Logger; + +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg; +import org.apache.ignite.internal.commandline.persistence.PersistenceArguments; +import org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.persistence.PersistenceCleanAndBackupSettings; +import org.apache.ignite.internal.visor.persistence.PersistenceCleanAndBackupType; +import org.apache.ignite.internal.visor.persistence.PersistenceTask; +import org.apache.ignite.internal.visor.persistence.PersistenceTaskArg; +import org.apache.ignite.internal.visor.persistence.PersistenceTaskResult; +import org.apache.ignite.lang.IgniteBiTuple; + +import static org.apache.ignite.internal.commandline.Command.usage; +import static org.apache.ignite.internal.commandline.CommandList.PERSISTENCE; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg.ALL; +import static org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg.CACHES; +import static org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg.CORRUPTED; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.BACKUP; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.CLEAN; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.INFO; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.of; + +/** */ +public class PersistenceCommand implements Command { + /** */ + private PersistenceArguments cleaningArgs; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Optional firstNodeOpt = client.compute().nodes().stream().findFirst(); + + if (firstNodeOpt.isPresent()) { + UUID uuid = firstNodeOpt.get().nodeId(); + + PersistenceTaskResult res = executeTaskByNameOnNode(client, + PersistenceTask.class.getName(), + convertArguments(cleaningArgs), + uuid, + clientCfg + ); + + printResult(res, logger); + } + else + logger.warning("No nodes found in topology, command won't be executed."); + } + catch (Throwable t) { + logger.severe("Failed to execute persistence command='" + cleaningArgs.subcommand().text() + "'"); + logger.severe(CommandLogger.errorMessage(t)); + + throw t; + } + + return null; + } + + /** + * Prints result of command execution: information about caches or result of clean/backup command. + * + * @param res {@link PersistenceTaskResult} object with results of command execution. + * @param logger {@link Logger} to print output to. + */ + private void printResult(PersistenceTaskResult res, Logger logger) { + if (!res.inMaintenanceMode()) { + logger.warning("Persistence command can be sent only to node in Maintenance Mode."); + + return; + } + //info command + else if (res.cachesInfo() != null) { + logger.info("Persistent caches found on node:"); + + //sort results so corrupted caches occur in the list at the top + res.cachesInfo().entrySet().stream().sorted((ci0, ci1) -> { + IgniteBiTuple t0 = ci0.getValue(); + IgniteBiTuple t1 = ci1.getValue(); + + boolean corrupted0 = t0.get1() || t0.get2(); + boolean corrupted1 = t1.get1() || t1.get2(); + + if (corrupted0 && corrupted1) + return 0; + else if (!corrupted0 && !corrupted1) + return 0; + else if (corrupted0 && !corrupted1) + return -1; + else + return 1; + }).forEach( + e -> { + IgniteBiTuple t = e.getValue(); + + String status; + + if (!t.get1()) + status = "corrupted - WAL disabled globally."; + else if (!t.get1()) + status = "corrupted - WAL disabled locally."; + else + status = "no corruption."; + + logger.info(INDENT + "cache name: " + e.getKey() + ". Status: " + status); + } + ); + } + //clean command + else if (cleaningArgs != null && cleaningArgs.subcommand() == CLEAN) { + logger.info("Maintenance task is " + (!res.maintenanceTaskCompleted() ? "not " : "") + "fixed."); + + List cleanedCaches = res.handledCaches(); + + if (cleanedCaches != null && !cleanedCaches.isEmpty()) { + String cacheDirNames = String.join(", ", cleanedCaches); + + logger.info("Cache directories were cleaned: [" + cacheDirNames + ']'); + } + + List failedToHandleCaches = res.failedCaches(); + + if (failedToHandleCaches != null && !failedToHandleCaches.isEmpty()) { + String failedToHandleCachesStr = String.join(", ", failedToHandleCaches); + + logger.info("Failed to clean following directories: [" + failedToHandleCachesStr + ']'); + } + } + // backup command + else { + List backupCompletedCaches = res.handledCaches(); + + if (backupCompletedCaches != null && !backupCompletedCaches.isEmpty()) { + String cacheDirNames = String.join(", ", backupCompletedCaches); + + logger.info("Cache data files was backed up to the following directories in node's work directory: [" + + cacheDirNames + ']'); + } + + List backupFailedCaches = res.failedCaches(); + + if (backupFailedCaches != null && !backupFailedCaches.isEmpty()) { + String backupFailedCachesStr = String.join(", ", backupFailedCaches); + + logger.info("Failed to backup the following directories in node's work directory: [" + + backupFailedCachesStr + ']'); + } + } + } + + /** {@inheritDoc} */ + @Override public PersistenceArguments arg() { + return cleaningArgs; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + final String cacheNames = "cache1,cache2,cache3"; + + usage(logger, "Print information about potentially corrupted caches on local node:", + PERSISTENCE); + usage(logger, "The same information is printed when info subcommand is passed:", PERSISTENCE, + INFO.text()); + + usage(logger, "Clean directories of caches with corrupted data files:", PERSISTENCE, CLEAN.text(), + CORRUPTED.argName()); + usage(logger, "Clean directories of all caches:", PERSISTENCE, CLEAN.text(), + ALL.argName()); + usage(logger, "Clean directories of only given caches:", PERSISTENCE, CLEAN.text(), + CACHES.argName(), cacheNames); + + usage(logger, "Backup data files of corrupted caches only:", PERSISTENCE, BACKUP.text(), + CORRUPTED.argName()); + usage(logger, "Backup data files of all caches:", PERSISTENCE, BACKUP.text(), ALL.argName()); + usage(logger, "Backup data files of only given caches:", PERSISTENCE, BACKUP.text(), + CACHES.argName(), cacheNames); + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + if (!argIter.hasNextSubArg()) { + cleaningArgs = new PersistenceArguments.Builder(INFO).build(); + + return; + } + + PersistenceSubcommands cmd = of(argIter.nextArg("Expected persistence maintenance action")); + + if (cmd == null) + throw new IllegalArgumentException("Expected correct persistence maintenance action"); + + PersistenceArguments.Builder bldr = new PersistenceArguments.Builder(cmd); + + switch (cmd) { + case BACKUP: + case CLEAN: + CleanAndBackupSubcommandArg cleanAndBackupSubcommandArg = CommandArgUtils.of( + argIter.nextArg("Expected one of subcommand arguments"), CleanAndBackupSubcommandArg.class + ); + + if (cleanAndBackupSubcommandArg == null) + throw new IllegalArgumentException("Expected one of subcommand arguments"); + + bldr.withCleanAndBackupSubcommandArg(cleanAndBackupSubcommandArg); + + if (cleanAndBackupSubcommandArg == ALL || cleanAndBackupSubcommandArg == CORRUPTED) + break; + + if (cleanAndBackupSubcommandArg == CACHES) { + Set caches = argIter.nextStringSet("list of cache names"); + + if (F.isEmpty(caches)) + throw new IllegalArgumentException("Empty list of cache names"); + + bldr.withCacheNames(new ArrayList<>(caches)); + } + + break; + } + + cleaningArgs = bldr.build(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return PERSISTENCE.toCommandName(); + } + + /** */ + private PersistenceTaskArg convertArguments(PersistenceArguments args) { + PersistenceCleanAndBackupSettings cleanSettings = convertCleanAndBackupSettings(args); + + PersistenceTaskArg taskArgs = new PersistenceTaskArg(args.subcommand().operation(), cleanSettings); + + return taskArgs; + } + + /** */ + private PersistenceCleanAndBackupSettings convertCleanAndBackupSettings(PersistenceArguments args) { + if (args.subcommand() == INFO) + return null; + + PersistenceCleanAndBackupType type; + + switch (args.cleanArg()) { + case ALL: + type = PersistenceCleanAndBackupType.ALL; + + break; + case CORRUPTED: + type = PersistenceCleanAndBackupType.CORRUPTED; + + break; + + default: + type = PersistenceCleanAndBackupType.CACHES; + } + + return new PersistenceCleanAndBackupSettings(type, args.cachesList()); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java index 3830bdb713ba79..4ef6833a00375d 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java @@ -34,7 +34,7 @@ /** * Command for change or display policy for shutdown. */ -public class ShutdownPolicyCommand implements Command { +public class ShutdownPolicyCommand extends AbstractCommand { /** Arguments. */ private ShutdownPolicyArgument shutdownPolicyArgument; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java index 7084f2fe4af334..f19637ba2a2c50 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java @@ -29,7 +29,7 @@ /** * Command to print cluster state. */ -public class StateCommand implements Command { +public class StateCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { Command.usage(logger, "Print current cluster state:", STATE); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java index d8a9673ea57eba..a9d16543f8d04f 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java @@ -50,7 +50,7 @@ /** * Commands associated with tracing configuration functionality. */ -public class TracingConfigurationCommand implements Command { +public class TracingConfigurationCommand extends AbstractCommand { /** Arguments. */ private TracingConfigurationArguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java index fbe89f22ee1fde..e266f592f28c7a 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java @@ -61,7 +61,7 @@ /** * Transaction commands. */ -public class TxCommands implements Command { +public class TxCommands extends AbstractCommand { /** Arguments */ private VisorTxTaskArg args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java index 2919470db4a1b2..783318cebb3cb8 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java @@ -44,7 +44,7 @@ /** * Wal commands. */ -public class WalCommands implements Command> { +public class WalCommands extends AbstractCommand> { /** */ static final String WAL_PRINT = "print"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java index 2219c5abf446af..3f03e2c4bdcede 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java @@ -31,7 +31,7 @@ /** * Command for interacting with warm-up. */ -public class WarmUpCommand implements Command { +public class WarmUpCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { Command.usage(logger, "Stop warm-up:", WARM_UP, WarmUpCommandArg.STOP.argName()); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java index 681c238eb55d77..778280da11ae8b 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java @@ -21,6 +21,7 @@ import java.util.Map; import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -42,7 +43,7 @@ /** * High-level "cache" command implementation. */ -public class CacheCommands implements Command { +public class CacheCommands extends AbstractCommand { /** Empty group name. */ public static final String EMPTY_GROUP_NAME = "no_group"; @@ -75,7 +76,7 @@ public class CacheCommands implements Command { if (command == null) throw new IllegalStateException("Unknown command " + subcommand); - return command.execute(clientCfg, logger); + return command.execute(clientCfg, logger, verbose); } /** {@inheritDoc} */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java index 6f7062057e090d..35e658498b4169 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java @@ -21,6 +21,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -39,7 +40,7 @@ /** * Cache contention detection subcommand. */ -public class CacheContention implements Command { +public class CacheContention extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String description = "Show the keys that are point of contention for multiple transactions."; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java index 4b5f3b9813fa6f..1def122d1ea34c 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java @@ -23,6 +23,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandHandler; @@ -46,7 +47,7 @@ /** * Would collect and print info about how data is spread between nodes and partitions. */ -public class CacheDistribution implements Command { +public class CacheDistribution extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java index ace4baebcd0d00..c1415ce11c616b 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java @@ -25,6 +25,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.TaskExecutor; @@ -50,7 +51,7 @@ /** * Cache subcommand that triggers indexes force rebuild. */ -public class CacheIndexesForceRebuild implements Command { +public class CacheIndexesForceRebuild extends AbstractCommand { /** Command parsed arguments. */ private Arguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java index b31ea5a71e70ab..11d317662441d0 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java @@ -27,6 +27,7 @@ import java.util.regex.PatternSyntaxException; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.TaskExecutor; @@ -47,7 +48,7 @@ /** * Cache subcommand that allows to show indexes. */ -public class CacheIndexesList implements Command { +public class CacheIndexesList extends AbstractCommand { /** Command parsed arguments. */ private Arguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java index 0ec662e9f966ff..4bf4115237ac7d 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java @@ -23,6 +23,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.TaskExecutor; @@ -41,7 +42,7 @@ /** * Cache subcommand that allows to show caches that have */ -public class CacheIndexesRebuildStatus implements Command { +public class CacheIndexesRebuildStatus extends AbstractCommand { /** Command parsed arguments. */ private Arguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java index d467b82208de33..fb2ffae5d70e90 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java @@ -26,6 +26,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -62,7 +63,7 @@ /** * Validate indexes command. */ -public class CacheValidateIndexes implements Command { +public class CacheValidateIndexes extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java index 859a815df9bf0f..95171c9c999446 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java @@ -26,6 +26,7 @@ import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientException; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.OutputFormat; @@ -68,7 +69,7 @@ /** * Command to show caches on cluster. */ -public class CacheViewer implements Command { +public class CacheViewer extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String description = "Show information about caches, groups or sequences that match a regular expression. " + diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java index 7821d07a56b582..457f189afb0547 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java @@ -30,6 +30,7 @@ import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesResult; import org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesTask; @@ -44,7 +45,7 @@ /** * Command for check secondary indexes inline size on the different nodes. */ -public class CheckIndexInlineSizes implements Command { +public class CheckIndexInlineSizes extends AbstractCommand { /** Success message. */ public static final String INDEXES_INLINE_SIZE_ARE_THE_SAME = "All secondary indexes have the same effective inline size on all cluster nodes."; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java index 7fa625b7f019b1..b2c97a3615fe5a 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java @@ -24,6 +24,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -44,7 +45,7 @@ /** * Command to find and delete garbage which could left after destroying caches in shared group. */ -public class FindAndDeleteGarbage implements Command { +public class FindAndDeleteGarbage extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String GROUPS = "groupName1,...,groupNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java index 7f4ecb05d2c0d4..29b3447cb1a07a 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientException; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; @@ -65,7 +66,7 @@ /** * */ -public class IdleVerify implements Command { +public class IdleVerify extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java index dc35436196b286..34fb57a81ba99d 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java @@ -21,6 +21,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTask; @@ -34,7 +35,7 @@ /** * Command for reseting lost partition state. */ -public class ResetLostPartitions implements Command> { +public class ResetLostPartitions extends AbstractCommand> { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java index b4ca6e4606c3bd..c0e59a32f2358b 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java @@ -20,6 +20,7 @@ import java.util.Arrays; import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; @@ -34,7 +35,7 @@ /** * */ -public class DiagnosticCommand implements Command { +public class DiagnosticCommand extends AbstractCommand { /** */ private DiagnosticSubCommand subcommand; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java index 5d91227762939a..18d3d5db6eddc5 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java @@ -26,6 +26,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -52,7 +53,7 @@ /** * */ -public class PageLocksCommand implements Command { +public class PageLocksCommand extends AbstractCommand { /** */ private Arguments arguments; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java new file mode 100644 index 00000000000000..d4c09b896f96fc --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandList; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskArg; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult; +import org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionResumeTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; + +/** + * Base cache group encryption multinode subcommand. + * + * @param Command result type. + */ +public abstract class CacheGroupEncryptionCommand extends AbstractCommand { + /** Cache group reencryption task argument. */ + private VisorCacheGroupEncryptionTaskArg taskArg; + + /** {@inheritDoc} */ + @Override public VisorCacheGroupEncryptionTaskArg arg() { + return taskArg; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + String grpName = argIter.nextArg("Сache group name is expected."); + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected command argument: " + argIter.peekNextArg()); + + taskArg = new VisorCacheGroupEncryptionTaskArg(grpName); + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + VisorCacheGroupEncryptionTaskResult res = executeTaskByNameOnNode( + client, + visorTaskName(), + taskArg, + BROADCAST_UUID, + clientCfg + ); + + printResults(res, taskArg.groupName(), log); + + return res; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** + * @param res Response. + * @param grpName Cache group name. + * @param log Logger. + */ + protected void printResults(VisorCacheGroupEncryptionTaskResult res, String grpName, Logger log) { + Map exceptions = res.exceptions(); + + for (Map.Entry entry : exceptions.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + + log.info(String.format("%sfailed to execute command for the cache group \"%s\": %s.", + DOUBLE_INDENT, grpName, entry.getValue().getMessage())); + } + + Map results = res.results(); + + for (Map.Entry entry : results.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + + printNodeResult(entry.getValue(), grpName, log); + } + } + + /** + * @param res Response. + * @param grpName Cache group name. + * @param log Logger. + */ + protected abstract void printNodeResult(T res, String grpName, Logger log); + + /** + * @return Visor task name. + */ + protected abstract String visorTaskName(); + + /** Subcommand to Display re-encryption status of the cache group. */ + protected static class ReencryptionStatus extends CacheGroupEncryptionCommand { + /** {@inheritDoc} */ + @Override protected void printNodeResult(Long bytesLeft, String grpName, Logger log) { + if (bytesLeft == -1) + log.info(DOUBLE_INDENT + "re-encryption completed or not required"); + else if (bytesLeft == 0) + log.info(DOUBLE_INDENT + "re-encryption will be completed after the next checkpoint"); + else + log.info(String.format("%s%d KB of data left for re-encryption", DOUBLE_INDENT, bytesLeft / 1024)); + } + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorReencryptionStatusTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return EncryptionSubcommands.REENCRYPTION_STATUS.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Display re-encryption status of the cache group:", CommandList.ENCRYPTION, + EncryptionSubcommands.REENCRYPTION_STATUS.toString(), "cacheGroupName"); + } + } + + /** Subcommand to view current encryption key IDs of the cache group. */ + protected static class CacheKeyIds extends CacheGroupEncryptionCommand> { + /** {@inheritDoc} */ + @Override protected void printResults( + VisorCacheGroupEncryptionTaskResult> res, + String grpName, + Logger log + ) { + log.info("Encryption key identifiers for cache: " + grpName); + + super.printResults(res, grpName, log); + } + + /** {@inheritDoc} */ + @Override protected void printNodeResult(List keyIds, String grpName, Logger log) { + if (F.isEmpty(keyIds)) { + log.info(DOUBLE_INDENT + "---"); + + return; + } + + for (int i = 0; i < keyIds.size(); i++) + log.info(DOUBLE_INDENT + keyIds.get(i) + (i == 0 ? " (active)" : "")); + } + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorEncryptionKeyIdsTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CACHE_GROUP_KEY_IDS.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "View encryption key identifiers of the cache group:", ENCRYPTION, + CACHE_GROUP_KEY_IDS.toString(), "cacheGroupName"); + } + } + + /** Subcommand to suspend re-encryption of the cache group. */ + protected static class SuspendReencryption extends CacheGroupEncryptionCommand { + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorReencryptionSuspendTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return EncryptionSubcommands.REENCRYPTION_SUSPEND.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Suspend re-encryption of the cache group:", CommandList.ENCRYPTION, + EncryptionSubcommands.REENCRYPTION_SUSPEND.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override protected void printNodeResult(Boolean success, String grpName, Logger log) { + log.info(String.format("%sre-encryption of the cache group \"%s\" has %sbeen suspended.", + DOUBLE_INDENT, grpName, (success ? "" : "already "))); + } + + /** {@inheritDoc} */ + @Override protected void printResults( + VisorCacheGroupEncryptionTaskResult res, + String grpName, + Logger log + ) { + super.printResults(res, grpName, log); + + log.info(""); + log.info("Note: the re-encryption suspend status is not persisted, re-encryption will be started " + + "automatically after the node is restarted."); + log.info(""); + } + } + + /** Subcommand to resume re-encryption of the cache group. */ + protected static class ResumeReencryption extends CacheGroupEncryptionCommand { + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorReencryptionResumeTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return EncryptionSubcommands.REENCRYPTION_RESUME.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Resume re-encryption of the cache group:", CommandList.ENCRYPTION, + EncryptionSubcommands.REENCRYPTION_RESUME.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override protected void printNodeResult(Boolean success, String grpName, Logger log) { + log.info(String.format("%sre-encryption of the cache group \"%s\" has %sbeen resumed.", + DOUBLE_INDENT, grpName, (success ? "" : "already "))); + } + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java new file mode 100644 index 00000000000000..8518e5d6290195 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskArg; +import org.apache.ignite.internal.visor.encryption.VisorChangeCacheGroupKeyTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_CACHE_GROUP_KEY; + +/** + * Change cache group key encryption subcommand. + */ +public class ChangeCacheGroupKeyCommand extends AbstractCommand { + /** Change cache group key task argument. */ + private VisorCacheGroupEncryptionTaskArg taskArg; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + executeTaskByNameOnNode( + client, + VisorChangeCacheGroupKeyTask.class.getName(), + taskArg, + null, + clientCfg + ); + + log.info("The encryption key has been changed for the cache group \"" + taskArg.groupName() + "\"."); + + return null; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: the command will change the encryption key of the cache group. Joining a node during " + + "the key change process is prohibited and will be rejected."; + } + + /** {@inheritDoc} */ + @Override public VisorCacheGroupEncryptionTaskArg arg() { + return taskArg; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + String argCacheGrpName = argIter.nextArg("Сache group name is expected."); + + taskArg = new VisorCacheGroupEncryptionTaskArg(argCacheGrpName); + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected command argument: " + argIter.peekNextArg()); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Change the encryption key of the cache group:", ENCRYPTION, + CHANGE_CACHE_GROUP_KEY.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CHANGE_CACHE_GROUP_KEY.text().toUpperCase(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java new file mode 100644 index 00000000000000..a48dc4bcbb4d36 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_MASTER_KEY; + +/** + * Change master key encryption subcommand. + */ +public class ChangeMasterKeyCommand extends AbstractCommand { + /** New master key name. */ + private String argMasterKeyName; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + String resMsg = executeTaskByNameOnNode( + client, + VisorChangeMasterKeyTask.class.getName(), + argMasterKeyName, + null, + clientCfg + ); + + log.info(resMsg); + + return resMsg; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: the command will change the master key. Cache start and node join during the key change " + + "process is prohibited and will be rejected."; + } + + /** {@inheritDoc} */ + @Override public String arg() { + return argMasterKeyName; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + argMasterKeyName = argIter.nextArg("Expected master key name."); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Change the master key:", ENCRYPTION, CHANGE_MASTER_KEY.toString(), "newMasterKeyName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CHANGE_MASTER_KEY.text().toUpperCase(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java deleted file mode 100644 index eae804979c2c7b..00000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import java.util.logging.Logger; -import org.apache.ignite.internal.client.GridClient; -import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.CommandArgIterator; -import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask; -import org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask; - -import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; -import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommand.CHANGE_MASTER_KEY; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommand.GET_MASTER_KEY_NAME; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommand.of; - -/** - * Commands assosiated with encryption features. - * - * @see EncryptionSubcommand - */ -public class EncryptionCommand implements Command { - /** Subcommand. */ - EncryptionSubcommand cmd; - - /** The task name. */ - String taskName; - - /** The task arguments. */ - Object taskArgs; - - /** {@inheritDoc} */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { - try (GridClient client = Command.startClient(clientCfg)) { - String res = executeTaskByNameOnNode( - client, - taskName, - taskArgs, - null, - clientCfg - ); - - logger.info(res); - - return res; - } - catch (Throwable e) { - logger.severe("Failed to perform operation."); - logger.severe(CommandLogger.errorMessage(e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public String confirmationPrompt() { - if (CHANGE_MASTER_KEY == cmd) { - return "Warning: the command will change the master key. Cache start and node join during the key change " + - "process is prohibited and will be rejected."; - } - - return null; - } - - /** {@inheritDoc} */ - @Override public void parseArguments(CommandArgIterator argIter) { - EncryptionSubcommand cmd = of(argIter.nextArg("Expected encryption action.")); - - if (cmd == null) - throw new IllegalArgumentException("Expected correct encryption action."); - - switch (cmd) { - case GET_MASTER_KEY_NAME: - taskName = VisorGetMasterKeyNameTask.class.getName(); - - taskArgs = null; - - break; - - case CHANGE_MASTER_KEY: - String masterKeyName = argIter.nextArg("Expected master key name."); - - taskName = VisorChangeMasterKeyTask.class.getName(); - - taskArgs = masterKeyName; - - break; - - default: - throw new IllegalArgumentException("Unknown encryption subcommand: " + cmd); - } - - this.cmd = cmd; - } - - /** {@inheritDoc} */ - @Override public Object arg() { - return taskArgs; - } - - /** {@inheritDoc} */ - @Override public void printUsage(Logger logger) { - Command.usage(logger, "Print the current master key name:", ENCRYPTION, GET_MASTER_KEY_NAME.toString()); - Command.usage(logger, "Change the master key:", ENCRYPTION, CHANGE_MASTER_KEY.toString(), "newMasterKeyName"); - } - - /** {@inheritDoc} */ - @Override public String name() { - return ENCRYPTION.toCommandName(); - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java new file mode 100644 index 00000000000000..fbae770bdc2edb --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandList; + +/** + * Commands related to encryption functions. + * + * @see EncryptionSubcommands + */ +public class EncryptionCommands extends AbstractCommand { + /** Subcommand. */ + private EncryptionSubcommands cmd; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + return cmd.subcommand().execute(clientCfg, logger); + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + EncryptionSubcommands cmd = EncryptionSubcommands.of(argIter.nextArg("Expected encryption action.")); + + if (cmd == null) + throw new IllegalArgumentException("Expected correct encryption action."); + + cmd.subcommand().parseArguments(argIter); + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected argument of --encryption subcommand: " + argIter.peekNextArg()); + + this.cmd = cmd; + } + + /** {@inheritDoc} */ + @Override public EncryptionSubcommands arg() { + return cmd; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + for (EncryptionSubcommands cmd : EncryptionSubcommands.values()) + cmd.subcommand().printUsage(logger); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CommandList.ENCRYPTION.toCommandName(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommand.java deleted file mode 100644 index 3c47c024ea137b..00000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommand.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import org.jetbrains.annotations.Nullable; - -/** - * Set of encryption subcommands. - * - * @see EncryptionCommand - */ -public enum EncryptionSubcommand { - /** Subcommand to get the current master key name. */ - GET_MASTER_KEY_NAME("get_master_key_name"), - - /** Subcommand to change the master key. */ - CHANGE_MASTER_KEY("change_master_key"); - - /** Subcommand name. */ - private final String name; - - /** @param name Encryption subcommand name. */ - EncryptionSubcommand(String name) { - this.name = name; - } - - /** - * @param text Command text (case insensitive). - * @return Command for the text. {@code Null} if there is no such command. - */ - @Nullable public static EncryptionSubcommand of(String text) { - for (EncryptionSubcommand cmd : EncryptionSubcommand.values()) { - if (cmd.name.equalsIgnoreCase(text)) - return cmd; - } - - return null; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return name; - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java new file mode 100644 index 00000000000000..c8d09419e39ea0 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import org.apache.ignite.internal.commandline.Command; +import org.jetbrains.annotations.Nullable; + +/** + * Set of encryption subcommands. + * + * @see EncryptionCommands + */ +public enum EncryptionSubcommands { + /** Subcommand to get the current master key name. */ + GET_MASTER_KEY_NAME("get_master_key_name", new GetMasterKeyNameCommand()), + + /** Subcommand to change the master key. */ + CHANGE_MASTER_KEY("change_master_key", new ChangeMasterKeyCommand()), + + /** Subcommand to change the current encryption key for specified cache group. */ + CHANGE_CACHE_GROUP_KEY("change_cache_key", new ChangeCacheGroupKeyCommand()), + + /** Subcommand to view current encryption key IDs of the cache group. */ + CACHE_GROUP_KEY_IDS("cache_key_ids", new CacheGroupEncryptionCommand.CacheKeyIds()), + + /** Subcommand to display re-encryption status of the cache group. */ + REENCRYPTION_STATUS("reencryption_status", new CacheGroupEncryptionCommand.ReencryptionStatus()), + + /** Subcommand to suspend re-encryption of the cache group. */ + REENCRYPTION_SUSPEND("suspend_reencryption", new CacheGroupEncryptionCommand.SuspendReencryption()), + + /** Subcommand to resume re-encryption of the cache group. */ + REENCRYPTION_RESUME("resume_reencryption", new CacheGroupEncryptionCommand.ResumeReencryption()), + + /** Subcommand to view/change cache group re-encryption rate limit. */ + REENCRYPTION_RATE("reencryption_rate_limit", new ReencryptionRateCommand()); + + /** Subcommand name. */ + private final String name; + + /** Command. */ + private final Command cmd; + + /** + * @param name Encryption subcommand name. + * @param cmd Command implementation. + */ + EncryptionSubcommands(String name, Command cmd) { + this.name = name; + this.cmd = cmd; + } + + /** + * @return Name. + */ + public String text() { + return name; + } + + /** + * @return Cache subcommand implementation. + */ + public Command subcommand() { + return cmd; + } + + /** + * @param text Command text (case insensitive). + * @return Command for the text. {@code Null} if there is no such command. + */ + @Nullable public static EncryptionSubcommands of(String text) { + for (EncryptionSubcommands cmd : values()) { + if (cmd.name.equalsIgnoreCase(text)) + return cmd; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java new file mode 100644 index 00000000000000..02bb8ed9750eff --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.GET_MASTER_KEY_NAME; + +/** + * Get master key name encryption subcommand. + */ +public class GetMasterKeyNameCommand extends AbstractCommand { + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + String masterKeyName = executeTaskByNameOnNode( + client, + VisorGetMasterKeyNameTask.class.getName(), + null, + null, + clientCfg + ); + + log.info(masterKeyName); + + return masterKeyName; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Print the current master key name:", ENCRYPTION, GET_MASTER_KEY_NAME.toString()); + } + + /** {@inheritDoc} */ + @Override public String name() { + return GET_MASTER_KEY_NAME.text().toUpperCase(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java new file mode 100644 index 00000000000000..7eb9f793049840 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTaskArg; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RATE; + +/** + * View/change cache group re-encryption rate limit subcommand. + */ +public class ReencryptionRateCommand extends AbstractCommand { + /** Re-encryption rate task argument. */ + private VisorReencryptionRateTaskArg taskArg; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + VisorCacheGroupEncryptionTaskResult res = executeTaskByNameOnNode( + client, + VisorReencryptionRateTask.class.getName(), + taskArg, + BROADCAST_UUID, + clientCfg + ); + + Map exceptions = res.exceptions(); + + for (Map.Entry entry : exceptions.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + log.info(DOUBLE_INDENT + + "failed to get/set re-encryption rate limit: " + entry.getValue().getMessage()); + } + + Map results = res.results(); + boolean read = taskArg.rate() == null; + + for (Map.Entry entry : results.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + + double rateLimit = read ? entry.getValue() : taskArg.rate(); + + if (rateLimit == 0) + log.info(DOUBLE_INDENT + "re-encryption rate is not limited."); + else { + log.info(String.format("%sre-encryption rate %s limited to %.2f MB/s.", + DOUBLE_INDENT, (read ? "is" : "has been"), rateLimit)); + } + } + + if (read) + return null; + + log.info(""); + log.info("Note: the changed value of the re-encryption rate limit is not persisted. " + + "When the node is restarted, the value will be set from the configuration."); + log.info(""); + + return null; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public VisorReencryptionRateTaskArg arg() { + return taskArg; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + Double rateLimit = null; + + while (argIter.hasNextSubArg()) { + String rateLimitArg = argIter.nextArg("Expected decimal value for re-encryption rate."); + + try { + rateLimit = Double.parseDouble(rateLimitArg); + } + catch (NumberFormatException e) { + throw new IllegalArgumentException("Failed to parse command argument. Decimal value expected.", e); + } + } + + taskArg = new VisorReencryptionRateTaskArg(rateLimit); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "View/change re-encryption rate limit:", ENCRYPTION, + singletonMap("new_limit", "Decimal value to change re-encryption rate limit (MB/s)."), + REENCRYPTION_RATE.toString(), optional("new_limit")); + } + + /** {@inheritDoc} */ + @Override public String name() { + return REENCRYPTION_RATE.text().toUpperCase(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java index 180d5ec9fb2beb..7eeab1f5bf487e 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java @@ -19,6 +19,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.meta.subcommands.MetadataRemoveCommand; @@ -37,7 +38,7 @@ /** * */ -public class MetadataCommand implements Command { +public class MetadataCommand extends AbstractCommand { /** * */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java index dded0fdb41243b..8111a023861d83 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java @@ -26,6 +26,7 @@ import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientDisconnectedException; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -37,7 +38,7 @@ public abstract class MetadataAbstractSubCommand< MetadataArgsDto extends IgniteDataTransferObject, MetadataResultDto extends IgniteDataTransferObject> - implements Command { + extends AbstractCommand { /** Filesystem. */ protected static final FileSystem FS = FileSystems.getDefault(); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java index 35db8f254468d1..da93f4f5fbbce7 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java @@ -19,12 +19,12 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.meta.MetadataCommand; import org.apache.ignite.internal.commandline.meta.MetadataSubCommandsList; /** */ -public class MetadataHelpCommand implements Command { +public class MetadataHelpCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger log) { throw new UnsupportedOperationException("printUsage"); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java index a409111f2e406c..861a8b9d196f4e 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java @@ -26,6 +26,7 @@ import java.util.stream.Collectors; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -42,7 +43,7 @@ import static org.apache.ignite.internal.visor.systemview.VisorSystemViewTask.SimpleType.STRING; /** Represents command for metric values printing. */ -public class MetricCommand implements Command { +public class MetricCommand extends AbstractCommand { /** * Argument for the metric values obtainig task. * @see VisorMetricTask diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/CleanAndBackupSubcommandArg.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/CleanAndBackupSubcommandArg.java new file mode 100644 index 00000000000000..08a0336172d8b2 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/CleanAndBackupSubcommandArg.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.persistence; + +import org.apache.ignite.internal.commandline.argument.CommandArg; + +/** + * {@link PersistenceSubcommands#CLEAN} subcommand arguments. + */ +public enum CleanAndBackupSubcommandArg implements CommandArg { + /** Clean all caches data files. */ + ALL("all"), + /** Clean corrupted caches data files. */ + CORRUPTED("corrupted"), + /** Clean only specified caches data files. */ + CACHES("caches"); + + /** */ + private final String name; + + /** */ + CleanAndBackupSubcommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceArguments.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceArguments.java new file mode 100644 index 00000000000000..8971680223c050 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceArguments.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.persistence; + +import java.util.List; + +/** + * Arguments of "persistence cleaning" command. + */ +public class PersistenceArguments { + /** */ + private PersistenceSubcommands cmd; + + /** */ + private CleanAndBackupSubcommandArg cleanArg; + + /** */ + private List cachesList; + + /** + * @param cmd + */ + public PersistenceArguments(PersistenceSubcommands cmd, CleanAndBackupSubcommandArg cleanArg, List cachesList) { + this.cmd = cmd; + this.cleanArg = cleanArg; + this.cachesList = cachesList; + } + + /** */ + public PersistenceSubcommands subcommand() { + return cmd; + } + + /** */ + public CleanAndBackupSubcommandArg cleanArg() { + return cleanArg; + } + + /** */ + public List cachesList() { + return cachesList; + } + + /** Builder of {@link PersistenceArguments}. */ + public static class Builder { + /** */ + private PersistenceSubcommands subCmd; + + /** */ + private CleanAndBackupSubcommandArg cleanSubCmdArg; + + /** */ + private List cacheNames; + + /** + * @param subCmd Subcommand. + */ + public Builder(PersistenceSubcommands subCmd) { + this.subCmd = subCmd; + } + + /** */ + public Builder withCleanAndBackupSubcommandArg(CleanAndBackupSubcommandArg cleanSubCmdArg) { + this.cleanSubCmdArg = cleanSubCmdArg; + + return this; + } + + public Builder withCacheNames(List cacheNames) { + this.cacheNames = cacheNames; + + return this; + } + + public PersistenceArguments build() { + return new PersistenceArguments( + subCmd, + cleanSubCmdArg, + cacheNames + ); + } + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceSubcommands.java new file mode 100644 index 00000000000000..d674316abd51d9 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceSubcommands.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.persistence; + +import org.apache.ignite.internal.visor.persistence.PersistenceOperation; +import org.jetbrains.annotations.Nullable; + +/** + * + */ +public enum PersistenceSubcommands { + /** Collects information about corrupted caches and cache groups and their file system paths. */ + INFO("info", PersistenceOperation.INFO), + + /** Cleans partition files of corrupted caches and cache groups. */ + CLEAN("clean", PersistenceOperation.CLEAN), + + /** */ + BACKUP("backup", PersistenceOperation.BACKUP); + + /** Subcommand name. */ + private final String name; + + /** Operation this subcommand triggers. */ + private final PersistenceOperation operation; + + /** + * @param name String representation of subcommand. + * @param operation Operation this command triggers. + */ + PersistenceSubcommands(String name, PersistenceOperation operation) { + this.name = name; + this.operation = operation; + } + + /** + * @param strRep String representation of subcommand. + * @return Subcommand for its string representation. + */ + public static @Nullable PersistenceSubcommands of(String strRep) { + for (PersistenceSubcommands cmd : values()) { + if (cmd.text().equals(strRep)) + return cmd; + } + + return null; + } + + /** */ + public String text() { + return name; + } + + /** */ + public PersistenceOperation operation() { + return operation; + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java index bee355f348d287..9a7a63b4d434b1 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java @@ -19,6 +19,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.processors.configuration.distributed.DistributedChangeableProperty; @@ -33,7 +34,7 @@ /** * Command to manage distributed properties (see {@link DistributedChangeableProperty}) */ -public class PropertyCommand implements Command { +public class PropertyCommand extends AbstractCommand { /** * */ @@ -51,13 +52,13 @@ public class PropertyCommand implements Command { LIST.toString() ); - usage(log, "Get the property value", + usage(log, "Get the property value:", PROPERTY, GET.toString(), PropertyArgs.NAME, ""); - usage(log, "Set the property value", + usage(log, "Set the property value:", PROPERTY, SET.toString(), PropertyArgs.NAME, diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java index 251ae21d54507b..f852459b9b6d3b 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java @@ -24,6 +24,7 @@ import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientDisconnectedException; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -35,7 +36,7 @@ public abstract class PropertyAbstractSubCommand< MetadataArgsDto extends IgniteDataTransferObject, MetadataResultDto extends IgniteDataTransferObject> - implements Command { + extends AbstractCommand { /** */ private MetadataArgsDto args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java index 61f3080d1b077e..dcf2e6f4f2a3fa 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java @@ -20,8 +20,8 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.meta.MetadataCommand; import org.apache.ignite.internal.commandline.meta.MetadataSubCommandsList; +import org.apache.ignite.internal.commandline.property.PropertyCommand; /** */ public class PropertyHelpCommand implements Command { @@ -32,7 +32,7 @@ public class PropertyHelpCommand implements Command { /** {@inheritDoc} */ @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { - new MetadataCommand().printUsage(log); + new PropertyCommand().printUsage(log); return null; } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java index 38b170d210a3f7..af2dbd565751af 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java @@ -23,6 +23,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -69,7 +70,7 @@ * @see ComputeMXBean * @see TransactionsMXBean */ -public class KillCommand implements Command { +public class KillCommand extends AbstractCommand { /** Command argument. */ private Object taskArgs; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java index 2c77e6997aae58..2f9597e5838f48 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java @@ -20,6 +20,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -41,7 +42,7 @@ * @see SnapshotMXBean * @see IgniteSnapshotManager */ -public class SnapshotCommand implements Command { +public class SnapshotCommand extends AbstractCommand { /** Command argument. */ private Object taskArgs; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java index cef934477002f8..14b544f8304a57 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java @@ -29,6 +29,7 @@ import java.util.stream.Collectors; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -49,7 +50,7 @@ import static org.apache.ignite.internal.visor.systemview.VisorSystemViewTask.SimpleType.STRING; /** Represents command for {@link SystemView} content printing. */ -public class SystemViewCommand implements Command { +public class SystemViewCommand extends AbstractCommand { /** Column separator. */ public static final String COLUMN_SEPARATOR = " "; diff --git a/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java b/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java index 18a60ca2e20314..a34c619846a06c 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java @@ -17,22 +17,29 @@ package org.apache.ignite.internal.processors.security; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Handler; +import java.util.logging.Logger; import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.commandline.CommandHandler; import org.apache.ignite.internal.commandline.NoopConsole; import org.apache.ignite.internal.processors.security.impl.TestSecurityPluginProvider; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; +import static org.apache.ignite.cluster.ClusterState.ACTIVE; import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_OK; import static org.apache.ignite.internal.commandline.CommandList.DEACTIVATE; import static org.apache.ignite.plugin.security.SecurityPermissionSetBuilder.ALLOW_ALL; +import static org.apache.ignite.testframework.GridTestUtils.assertContains; import static org.apache.ignite.testframework.GridTestUtils.keyStorePassword; import static org.apache.ignite.testframework.GridTestUtils.keyStorePath; import static org.apache.ignite.testframework.GridTestUtils.sslTrustedFactory; @@ -47,13 +54,51 @@ public class GridCommandHandlerSslWithSecurityTest extends GridCommonAbstractTes /** Password. */ private final String pwd = "testPwd"; + /** System out. */ + protected static PrintStream sysOut; + + /** + * Test out - can be injected via {@link #injectTestSystemOut()} instead of System.out and analyzed in test. + * Will be as well passed as a handler output for an anonymous logger in the test. + */ + protected static ByteArrayOutputStream testOut; + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + testOut = new ByteArrayOutputStream(16 * 1024); + + sysOut = System.out; + } + /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { super.afterTest(); + System.setOut(sysOut); + + testOut.reset(); + stopAllGrids(); } + /** + * Sets test output stream. + */ + protected void injectTestSystemOut() { + System.setOut(new PrintStream(testOut)); + } + + /** + * Flushes all Logger handlers to make log data available to test. + * @param hnd Command handler. + */ + private void flushCommandOutput(CommandHandler hnd) { + Logger log = U.field(hnd, "logger"); + Arrays.stream(log.getHandlers()).forEach(Handler::flush); + } + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { return super.getConfiguration(igniteInstanceName) @@ -77,7 +122,7 @@ public class GridCommandHandlerSslWithSecurityTest extends GridCommonAbstractTes public void testInputKeyTrustStorePwdOnlyOnce() throws Exception { IgniteEx crd = startGrid(); - crd.cluster().active(true); + crd.cluster().state(ACTIVE); CommandHandler cmd = new CommandHandler(); @@ -129,7 +174,9 @@ else if (fmt.contains("truststore")) { public void testConnector() throws Exception { IgniteEx crd = startGrid(); - crd.cluster().active(true); + crd.cluster().state(ACTIVE); + + injectTestSystemOut(); CommandHandler hnd = new CommandHandler(); @@ -143,5 +190,13 @@ public void testConnector() throws Exception { "--truststore-password", keyStorePassword())); assertEquals(EXIT_CODE_OK, exitCode); + + flushCommandOutput(hnd); + + // Make sure all sensitive information is masked. + String testOutput = testOut.toString(); + assertContains(log, testOutput, "--password *****"); + assertContains(log, testOutput, "--keystore-password *****"); + assertContains(log, testOutput, "--truststore-password *****"); } } diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java index bc31ce71304a62..8dfbb371df4052 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java @@ -40,6 +40,7 @@ import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.EncryptionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; @@ -68,6 +69,8 @@ import static java.util.Objects.nonNull; import static org.apache.ignite.IgniteSystemProperties.IGNITE_ENABLE_EXPERIMENTAL_COMMAND; import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_CHECKPOINT_FREQ; +import static org.apache.ignite.configuration.EncryptionConfiguration.DFLT_REENCRYPTION_BATCH_SIZE; +import static org.apache.ignite.configuration.EncryptionConfiguration.DFLT_REENCRYPTION_RATE_MBPS; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.KEYSTORE_PASSWORD; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.KEYSTORE_PATH; import static org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsDumpTask.IDLE_DUMP_FILE_PREFIX; @@ -85,6 +88,9 @@ public abstract class GridCommandHandlerAbstractTest extends GridCommonAbstractT /** */ protected static final String CLIENT_NODE_NAME_PREFIX = "client"; + /** */ + protected static final String DAEMON_NODE_NAME_PREFIX = "daemon"; + /** Option is used for auto confirmation. */ protected static final String CMD_AUTO_CONFIRMATION = "--yes"; @@ -113,7 +119,13 @@ public abstract class GridCommandHandlerAbstractTest extends GridCommonAbstractT protected boolean autoConfirmation = true; /** {@code True} if encription is enabled. */ - protected boolean encriptionEnabled; + protected boolean encryptionEnabled; + + /** Re-encryption rate limit in megabytes per second. */ + protected double reencryptSpeed = DFLT_REENCRYPTION_RATE_MBPS; + + /** The number of pages that is scanned during re-encryption under checkpoint lock. */ + protected int reencryptBatchSize = DFLT_REENCRYPTION_BATCH_SIZE; /** Last operation result. */ protected Object lastOperationResult; @@ -171,7 +183,7 @@ protected boolean persistenceEnable() { testOut.reset(); - encriptionEnabled = false; + encryptionEnabled = false; GridClientFactory.stopAll(false); } @@ -233,13 +245,22 @@ protected boolean idleVerifyRes(Path p) { cfg.setClientMode(igniteInstanceName.startsWith(CLIENT_NODE_NAME_PREFIX)); - if (encriptionEnabled) { + cfg.setDaemon(igniteInstanceName.startsWith(DAEMON_NODE_NAME_PREFIX)); + + if (encryptionEnabled) { KeystoreEncryptionSpi encSpi = new KeystoreEncryptionSpi(); encSpi.setKeyStorePath(KEYSTORE_PATH); encSpi.setKeyStorePassword(KEYSTORE_PASSWORD.toCharArray()); cfg.setEncryptionSpi(encSpi); + + EncryptionConfiguration encCfg = new EncryptionConfiguration(); + + encCfg.setReencryptionRateLimit(reencryptSpeed); + encCfg.setReencryptionBatchSize(reencryptBatchSize); + + dsCfg.setEncryptionConfiguration(encCfg); } return cfg; @@ -418,7 +439,8 @@ protected void createCacheAndPreload( CacheConfiguration ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME) .setAffinity(new RendezvousAffinityFunction(false, partitions)) - .setBackups(1); + .setBackups(1) + .setEncryptionEnabled(encryptionEnabled); if (filter != null) ccfg.setNodeFilter(filter); diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java index 4403e38ed3411f..a8dc39878157c2 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java @@ -891,15 +891,18 @@ public void testCacheIdleVerifyDumpForCorruptedDataOnSystemCache() throws Except corruptDataEntry(storedSysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("sq" + parts / 2, "default-ds-group"), false, true); - CacheGroupContext memorySysCacheCtx = ignite.context().cache().cacheGroup(CU.cacheId("default-volatile-ds-group")); + CacheGroupContext memoryVolatileCacheCtx = ignite.context().cache().cacheGroup(CU.cacheId( + "default-volatile-ds-group@volatileDsMemPlc")); - assertNotNull(memorySysCacheCtx); + assertNotNull(memoryVolatileCacheCtx); + assertEquals("volatileDsMemPlc", memoryVolatileCacheCtx.dataRegion().config().getName()); + assertEquals(false, memoryVolatileCacheCtx.dataRegion().config().isPersistenceEnabled()); - corruptDataEntry(memorySysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s0", - "default-volatile-ds-group"), true, false); + corruptDataEntry(memoryVolatileCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s0", + "default-volatile-ds-group@volatileDsMemPlc"), true, false); - corruptDataEntry(memorySysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s" + parts / 2, - "default-volatile-ds-group"), false, true); + corruptDataEntry(memoryVolatileCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s" + parts / 2, + "default-volatile-ds-group@volatileDsMemPlc"), false, true); assertEquals(EXIT_CODE_OK, execute("--cache", "idle_verify", "--dump", "--cache-filter", "SYSTEM")); @@ -910,7 +913,8 @@ public void testCacheIdleVerifyDumpForCorruptedDataOnSystemCache() throws Except U.log(log, dumpWithConflicts); - assertContains(log, dumpWithConflicts, "found 4 conflict partitions: [counterConflicts=2, " + + // Non-persistent caches do not have counter conflicts + assertContains(log, dumpWithConflicts, "found 3 conflict partitions: [counterConflicts=1, " + "hashConflicts=2]"); } else diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java index 111b5c0c0d8462..2c800d7b9ea3af 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java @@ -47,6 +47,29 @@ public void init() { public void clear() { } + /** + * Check the command '--property help'. + * Steps: + */ + @Test + public void testHelp() { + assertEquals(EXIT_CODE_OK, execute("--property", "help")); + + String out = testOut.toString(); + + assertContains(log, out, "Print property command help:"); + assertContains(log, out, "control.(sh|bat) --property help"); + + assertContains(log, out, "Print list of available properties:"); + assertContains(log, out, "control.(sh|bat) --property list"); + + assertContains(log, out, "Get the property value:"); + assertContains(log, out, "control.(sh|bat) --property get --name "); + + assertContains(log, out, "Set the property value:"); + assertContains(log, out, "control.(sh|bat) --property set --name --val "); + } + /** * Check the command '--property list'. * Steps: diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index a2ddf0b218730a..f95d79a535185a 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -21,7 +21,9 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.io.Serializable; +import java.lang.reflect.Field; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; @@ -32,6 +34,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; @@ -40,6 +43,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; +import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -64,6 +68,7 @@ import org.apache.ignite.internal.GridJobExecuteResponse; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteNodeAttributes; import org.apache.ignite.internal.TestRecordingCommunicationSpi; import org.apache.ignite.internal.client.GridClientFactory; import org.apache.ignite.internal.client.impl.GridClientImpl; @@ -83,6 +88,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockResponse; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; +import org.apache.ignite.internal.processors.cache.persistence.CheckpointState; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.db.IgniteCacheGroupsWithRestartsTest; import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.dumpprocessors.ToFileDumpProcessor; @@ -95,8 +101,10 @@ import org.apache.ignite.internal.processors.cache.warmup.WarmUpTestPluginProvider; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.lang.GridFunc; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTaskResult; import org.apache.ignite.internal.visor.tx.VisorTxInfo; @@ -113,6 +121,7 @@ import org.apache.ignite.transactions.TransactionRollbackException; import org.apache.ignite.transactions.TransactionTimeoutException; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import org.junit.Test; import static java.io.File.separatorChar; @@ -132,6 +141,12 @@ import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_OK; import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_UNEXPECTED_ERROR; import static org.apache.ignite.internal.commandline.CommandList.DEACTIVATE; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_CACHE_GROUP_KEY; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RATE; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RESUME; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STATUS; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_SUSPEND; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.MASTER_KEY_NAME_2; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP; import static org.apache.ignite.internal.processors.cache.persistence.snapshot.AbstractSnapshotSelfTest.doSnapshotCancellationTest; @@ -140,6 +155,7 @@ import static org.apache.ignite.internal.processors.cache.verify.IdleVerifyUtility.GRID_NOT_IDLE_MSG; import static org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor.DEFAULT_TARGET_FOLDER; import static org.apache.ignite.testframework.GridTestUtils.assertContains; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; import static org.apache.ignite.testframework.GridTestUtils.runAsync; import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC; @@ -247,6 +263,365 @@ public void testClientsLeakage() throws Exception { assertTrue("Still opened clients: " + new ArrayList<>(clnts.values()), clntsBefore.equals(clntsAfter2)); } + private CacheConfiguration cacheConfiguration(String cacheName) { + CacheConfiguration ccfg = new CacheConfiguration(cacheName) + .setAtomicityMode(TRANSACTIONAL) + .setAffinity(new RendezvousAffinityFunction(false, 32)) + .setBackups(1); + + return ccfg; + } + + /** + * Starts cluster of two nodes and prepares situation of corrupted PDS on node2 + * so it enters maintenance mode on restart. + * + * @param cachesToStart Configurations of caches that should be started in cluster. + * @param cacheToCorrupt Function determining should cache with given name be corrupted or not. + */ + private File startGridAndPutNodeToMaintenance(CacheConfiguration[] cachesToStart, + @Nullable Function cacheToCorrupt) throws Exception { + assert cachesToStart != null && cachesToStart.length > 0; + + IgniteEx ig0 = startGrid(0); + IgniteEx ig1 = startGrid(1); + + String ig1Folder = ig1.context().pdsFolderResolver().resolveFolders().folderName(); + File dbDir = U.resolveWorkDirectory(ig1.configuration().getWorkDirectory(), "db", false); + + File ig1LfsDir = new File(dbDir, ig1Folder); + + ig0.cluster().baselineAutoAdjustEnabled(false); + ig0.cluster().state(ACTIVE); + + IgniteCache dfltCache = ig0.getOrCreateCache(cachesToStart[0]); + + if (cachesToStart.length > 1) { + for (int i = 1; i < cachesToStart.length; i++) + ig0.getOrCreateCache(cachesToStart[i]); + } + + for (int k = 0; k < 1000; k++) + dfltCache.put(k, k); + + GridCacheDatabaseSharedManager dbMrg0 = (GridCacheDatabaseSharedManager) ig0.context().cache().context().database(); + GridCacheDatabaseSharedManager dbMrg1 = (GridCacheDatabaseSharedManager) ig1.context().cache().context().database(); + + dbMrg0.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get(); + dbMrg1.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get(); + + Arrays.stream(cachesToStart) + .map(ccfg -> ccfg.getName()) + .filter(name -> cacheToCorrupt.apply(name)) + .forEach(name -> ig0.cluster().disableWal(name)); + + for (int k = 1000; k < 2000; k++) + dfltCache.put(k, k); + + stopGrid(1); + + File[] cpMarkers = new File(ig1LfsDir, "cp").listFiles(); + + for (File cpMark : cpMarkers) { + if (cpMark.getName().contains("-END")) + cpMark.delete(); + } + + assertThrows(log, () -> startGrid(1), Exception.class, null); + + return ig1LfsDir; + } + + /** + * Test verifies persistence clean command with explicit list of caches to be cleaned. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceCleanSpecifiedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + String cacheName2 = DEFAULT_CACHE_NAME + "2"; + String cacheName3 = DEFAULT_CACHE_NAME + "3"; + + String nonExistingCacheName = DEFAULT_CACHE_NAME + "4"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1), + cacheConfiguration(cacheName2), + cacheConfiguration(cacheName3) + }, + s -> !s.equals(cacheName3)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_INVALID_ARGUMENTS, execute("--persistence", "clean", "caches", + nonExistingCacheName, + "--host", "localhost", "--port", port)); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "caches", + cacheName0 + "," + cacheName1, + "--host", "localhost", "--port", port)); + + boolean cleanedEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(f -> f.getName().contains(cacheName0) || f.getName().contains(cacheName1)) + .map(f -> f.listFiles().length == 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(cleanedEmpty); + + boolean nonCleanedNonEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(f -> f.getName().contains(cacheName2) || f.getName().contains(cacheName3)) + .map(f -> f.listFiles().length > 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(nonCleanedNonEmpty); + + stopGrid(1); + + ig1 = startGrid(1); + + assertTrue(ig1.context().maintenanceRegistry().isMaintenanceMode()); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "caches", + cacheName2, + "--host", "localhost", "--port", port)); + + stopGrid(1); + + ig1 = startGrid(1); + + assertFalse(ig1.context().maintenanceRegistry().isMaintenanceMode()); + } + + /** + * Test verifies persistence clean command cleaning only corrupted caches and not touching others. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceCleanCorruptedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + String cacheName2 = DEFAULT_CACHE_NAME + "2"; + String cacheName3 = DEFAULT_CACHE_NAME + "3"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1), + cacheConfiguration(cacheName2), + cacheConfiguration(cacheName3) + }, + s -> !s.equals(cacheName3)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "corrupted", + "--host", "localhost", "--port", port)); + + boolean cleanedEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(f -> + f.getName().contains(cacheName0) + || f.getName().contains(cacheName1) + || f.getName().contains(cacheName2) + ) + .map(f -> f.listFiles().length == 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(cleanedEmpty); + + stopGrid(1); + + ig1 = startGrid(1); + + assertFalse(ig1.context().maintenanceRegistry().isMaintenanceMode()); + } + + /** + * Test verifies persistence clean all command that cleans all cache directories. + * + * @throws Exception + */ + @Test + public void testPersistenceCleanAllCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1) + }, + s -> s.equals(cacheName0)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "all", + "--host", "localhost", "--port", port)); + + boolean allEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("cache-")) + .map(f -> f.listFiles().length == 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(allEmpty); + + stopGrid(1); + + ig1 = startGrid(1); + + assertFalse(ig1.context().maintenanceRegistry().isMaintenanceMode()); + } + + /** + * Test verifies that persistence backup command to backup all caches backs up all cache directories. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceBackupAllCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1) + }, + s -> s.equals(cacheName0)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "backup", "all", + "--host", "localhost", "--port", port)); + + Set backedUpCacheDirs = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .map(f -> f.getName().substring("backup_".length())) + .collect(Collectors.toCollection(TreeSet::new)); + + Set allCacheDirs = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("cache-")) + .map(File::getName) + .collect(Collectors.toCollection(TreeSet::new)); + + assertEqualsCollections(backedUpCacheDirs, allCacheDirs); + + checkCacheAndBackupDirsContent(mntcNodeWorkDir); + } + + /** + * Test verifies that persistence backup command copies all corrupted caches content to backup directory + * but does not touch other directories. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceBackupCorruptedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1) + }, + s -> s.equals(cacheName0)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "backup", "corrupted", + "--host", "localhost", "--port", port)); + + long backedUpCachesCnt = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .filter(f -> f.getName().contains(cacheName0)) + .count(); + + assertEquals(1, backedUpCachesCnt); + + checkCacheAndBackupDirsContent(mntcNodeWorkDir); + } + + /** + * Test verifies that persistence backup command with specified caches copied only content of that caches and + * doesn't touch other directories. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceBackupSpecifiedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + String cacheName2 = DEFAULT_CACHE_NAME + "2"; + + String nonExistingCacheName = "nonExistingCache"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1), + cacheConfiguration(cacheName2) + }, + s -> s.equals(cacheName0) || s.equals(cacheName2)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_INVALID_ARGUMENTS, execute("--persistence", "backup", "caches", + nonExistingCacheName, + "--host", "localhost", "--port", port)); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "backup", "caches", + cacheName0 + "," + cacheName2, + "--host", "localhost", "--port", port)); + + long backedUpCachesCnt = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .count(); + + assertEquals(2, backedUpCachesCnt); + + checkCacheAndBackupDirsContent(mntcNodeWorkDir); + } + + /** */ + private void checkCacheAndBackupDirsContent(File mntcNodeWorkDir) { + List backupDirs = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .collect(Collectors.toList()); + + Path mntcNodeWorkDirPath = mntcNodeWorkDir.toPath(); + + for (File bDir : backupDirs) { + File origCacheDir = mntcNodeWorkDirPath.resolve(bDir.getName().substring("backup_".length())).toFile(); + + assertTrue(origCacheDir.isDirectory()); + + assertEquals(origCacheDir.listFiles().length, bDir.listFiles().length); + } + } + /** * Test enabling/disabling read-only mode works via control.sh * @@ -606,13 +981,49 @@ private void setState(Ignite ignite, ClusterState state, String strState, String */ @Test public void testBaselineCollect() throws Exception { - Ignite ignite = startGrids(1); + Ignite ignite = startGrid( + optimize(getConfiguration(getTestIgniteInstanceName(0))).setLocalHost("0.0.0.0")); + + Field addresses = ignite.cluster().node().getClass().getDeclaredField("addrs"); + addresses.setAccessible(true); + addresses.set(ignite.cluster().node(), Arrays.asList("127.0.0.1", "0:0:0:0:0:0:0:1", "10.19.112.175", "188.166.164.247")); + Field hostNames = ignite.cluster().node().getClass().getDeclaredField("hostNames"); + hostNames.setAccessible(true); + hostNames.set(ignite.cluster().node(), Arrays.asList("10.19.112.175.hostname")); assertFalse(ignite.cluster().active()); ignite.cluster().active(true); - assertEquals(EXIT_CODE_OK, execute("--baseline")); + injectTestSystemOut(); + + { // non verbose mode + assertEquals(EXIT_CODE_OK, execute("--baseline")); + + List nodesInfo = findBaselineNodesInfo(); + assertEquals(1, nodesInfo.size()); + assertContains(log, nodesInfo.get(0), "Address=188.166.164.247.hostname/188.166.164.247, "); + } + + { // verbose mode + assertEquals(EXIT_CODE_OK, execute("--verbose", "--baseline")); + + List nodesInfo = findBaselineNodesInfo(); + assertEquals(1, nodesInfo.size()); + assertContains(log, nodesInfo.get(0), "Addresses=188.166.164.247.hostname/188.166.164.247,10.19.112.175.hostname/10.19.112.175"); + } + + { // empty resolved addresses + addresses.set(ignite.cluster().node(), Collections.emptyList()); + hostNames.set(ignite.cluster().node(), Collections.emptyList()); + + assertEquals(EXIT_CODE_OK, execute("--verbose", "--baseline")); + + List nodesInfo = findBaselineNodesInfo(); + assertEquals(1, nodesInfo.size()); + assertContains(log, nodesInfo.get(0), "ConsistentId=" + + grid(0).cluster().localNode().consistentId() + ", State="); + } assertEquals(1, ignite.cluster().currentBaselineTopology().size()); } @@ -637,7 +1048,7 @@ public void testBaselineCollectCrd() throws Exception { String crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(0).cluster().localNode().consistentId() + ", Order=1)", crdStr); + grid(0).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=1)", crdStr); stopGrid(0); @@ -646,7 +1057,7 @@ public void testBaselineCollectCrd() throws Exception { crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(1).cluster().localNode().consistentId() + ", Order=2)", crdStr); + grid(1).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=2)", crdStr); startGrid(0); @@ -655,7 +1066,7 @@ public void testBaselineCollectCrd() throws Exception { crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(1).cluster().localNode().consistentId() + ", Order=2)", crdStr); + grid(1).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=2)", crdStr); stopGrid(1); @@ -664,7 +1075,7 @@ public void testBaselineCollectCrd() throws Exception { crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(0).cluster().localNode().consistentId() + ", Order=4)", crdStr); + grid(0).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=4)", crdStr); } /** @@ -682,6 +1093,30 @@ private String findCrdInfo() { return crdStr.substring(0, crdStr.indexOf('\n')).trim(); } + /** + * @return utility information about baseline nodes + */ + private List findBaselineNodesInfo() { + String outStr = testOut.toString(); + + int i = outStr.indexOf("Baseline nodes:"); + + assertTrue("Baseline nodes information is not found", i != -1); + + int j = outStr.indexOf("\n", i) + 1; + + int beginOfNodeDesc = -1; + + List nodesInfo = new ArrayList<>(); + + while ((beginOfNodeDesc = outStr.indexOf("ConsistentId=", j) ) != -1) { + j = outStr.indexOf("\n", beginOfNodeDesc); + nodesInfo.add(outStr.substring(beginOfNodeDesc, j).trim()); + } + + return nodesInfo; + } + /** * @param ignites Ignites. * @return Local node consistent ID. @@ -2199,7 +2634,7 @@ public void testCacheIdleVerifyPrintLostPartitions() throws Exception { /** @throws Exception If failed. */ @Test public void testMasterKeyChange() throws Exception { - encriptionEnabled = true; + encryptionEnabled = true; injectTestSystemOut(); @@ -2234,10 +2669,189 @@ public void testMasterKeyChange() throws Exception { "Master key change was rejected. Unable to get the master key digest."); } + /** @throws Exception If failed. */ + @Test + public void testCacheGroupKeyChange() throws Exception { + encryptionEnabled = true; + + injectTestSystemOut(); + + int srvNodes = 2; + + IgniteEx ignite = startGrids(srvNodes); + + startGrid(CLIENT_NODE_NAME_PREFIX); + startGrid(DAEMON_NODE_NAME_PREFIX); + + ignite.cluster().state(ACTIVE); + + List srvGrids = GridFunc.asList(grid(0), grid(1)); + + enableCheckpoints(srvGrids, false); + + createCacheAndPreload(ignite, 1000); + + int ret = execute("--encryption", CACHE_GROUP_KEY_IDS.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertContains(log, testOut.toString(), "Encryption key identifiers for cache: " + DEFAULT_CACHE_NAME); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "0 (active)")); + + ret = execute("--encryption", CHANGE_CACHE_GROUP_KEY.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertContains(log, testOut.toString(), + "The encryption key has been changed for the cache group \"" + DEFAULT_CACHE_NAME + '"'); + + ret = execute("--encryption", CACHE_GROUP_KEY_IDS.toString(), DEFAULT_CACHE_NAME); + + assertEquals(testOut.toString(), EXIT_CODE_OK, ret); + assertContains(log, testOut.toString(), "Encryption key identifiers for cache: " + DEFAULT_CACHE_NAME); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "1 (active)")); + + GridTestUtils.waitForCondition(() -> { + execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); + + return srvNodes == countSubstrs(testOut.toString(), + "re-encryption will be completed after the next checkpoint"); + }, getTestTimeout()); + + enableCheckpoints(srvGrids, true); + forceCheckpoint(srvGrids); + + GridTestUtils.waitForCondition(() -> { + execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); + + return srvNodes == countSubstrs(testOut.toString(), "re-encryption completed or not required"); + }, getTestTimeout()); + } + + /** @throws Exception If failed. */ + @Test + public void testChangeReencryptionRate() throws Exception { + int srvNodes = 2; + + IgniteEx ignite = startGrids(srvNodes); + + ignite.cluster().state(ACTIVE); + + injectTestSystemOut(); + + int ret = execute("--encryption", REENCRYPTION_RATE.toString()); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate is not limited.")); + + double newRate = 0.01; + + ret = execute("--encryption", REENCRYPTION_RATE.toString(), Double.toString(newRate)); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + String.format("re-encryption rate has been limited to %.2f MB/s.", newRate))); + + ret = execute("--encryption", REENCRYPTION_RATE.toString()); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + String.format("re-encryption rate is limited to %.2f MB/s.", newRate))); + + ret = execute("--encryption", REENCRYPTION_RATE.toString(), "0"); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate is not limited.")); + } + + /** @throws Exception If failed. */ + @Test + public void testReencryptionSuspendAndResume() throws Exception { + encryptionEnabled = true; + reencryptSpeed = 0.01; + reencryptBatchSize = 1; + + int srvNodes = 2; + + IgniteEx ignite = startGrids(srvNodes); + + ignite.cluster().state(ACTIVE); + + injectTestSystemOut(); + + createCacheAndPreload(ignite, 10_000); + + ignite.encryption().changeCacheGroupKey(Collections.singleton(DEFAULT_CACHE_NAME)).get(); + + assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); + + int ret = execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + + Pattern ptrn = Pattern.compile("(?m)Node [-0-9a-f]{36}:\n\\s+(?\\d+) KB of data.+"); + Matcher matcher = ptrn.matcher(testOut.toString()); + int matchesCnt = 0; + + while (matcher.find()) { + assertEquals(1, matcher.groupCount()); + + int pagesLeft = Integer.parseInt(matcher.group("left")); + + assertTrue(pagesLeft > 0); + + matchesCnt++; + } + + assertEquals(srvNodes, matchesCnt); + + ret = execute("--encryption", REENCRYPTION_SUSPEND.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been suspended.")); + assertFalse(isReencryptionStarted(DEFAULT_CACHE_NAME)); + + ret = execute("--encryption", REENCRYPTION_SUSPEND.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been suspended.")); + + ret = execute("--encryption", REENCRYPTION_RESUME.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been resumed.")); + assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); + + ret = execute("--encryption", REENCRYPTION_RESUME.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been resumed.")); + } + + /** + * @param cacheName Cache name. + * @return {@code True} if re-encryption of the specified cache is started on all server nodes. + */ + private boolean isReencryptionStarted(String cacheName) { + for (Ignite grid : G.allGrids()) { + ClusterNode locNode = grid.cluster().localNode(); + + if (locNode.isClient() || locNode.isDaemon()) + continue; + + if (((IgniteEx)grid).context().encryption().reencryptionFuture(CU.cacheId(cacheName)).isDone()) + return false; + } + + return true; + } + /** @throws Exception If failed. */ @Test public void testMasterKeyChangeOnInactiveCluster() throws Exception { - encriptionEnabled = true; + encryptionEnabled = true; injectTestSystemOut(); @@ -2407,4 +3021,18 @@ private VisorFindAndDeleteGarbageInPersistenceTaskResult executeTaskViaControlCo return hnd.getLastOperationResult(); } + + /** + * @param str String. + * @param substr Substring to find in the specified string. + * @return The number of substrings found in the specified string. + */ + private int countSubstrs(String str, String substr) { + int cnt = 0; + + for (int off = 0; (off = str.indexOf(substr, off)) != -1; off++) + ++cnt; + + return cnt; + } } diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java index a67ca87eeed645..e23554930355b3 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java @@ -469,15 +469,17 @@ public void testTable() { List row = sqlTablesView.get(0); - assertEquals("T1", row.get(0)); // TABLE_NAME - assertEquals(DFLT_SCHEMA, row.get(1)); // SCHEMA_NAME - assertEquals("SQL_PUBLIC_T1", row.get(2)); // CACHE_NAME - assertEquals(Integer.toString(cacheId("SQL_PUBLIC_T1")), row.get(3)); // CACHE_ID - assertEquals("null", row.get(4)); // AFFINITY_KEY_COLUMN - assertEquals("ID", row.get(5)); // KEY_ALIAS - assertEquals("null", row.get(6)); // VALUE_ALIAS - assertEquals("java.lang.Long", row.get(7)); // KEY_TYPE_NAME - assertFalse("null".equals(row.get(8))); // VALUE_TYPE_NAME + assertEquals(Integer.toString(cacheId("SQL_PUBLIC_T1")), row.get(0)); // CACHE_GROUP_ID + assertEquals("SQL_PUBLIC_T1", row.get(1)); // CACHE_GROUP_NAME + assertEquals(Integer.toString(cacheId("SQL_PUBLIC_T1")), row.get(2)); // CACHE_ID + assertEquals("SQL_PUBLIC_T1", row.get(3)); // CACHE_NAME + assertEquals(DFLT_SCHEMA, row.get(4)); // SCHEMA_NAME + assertEquals("T1", row.get(5)); // TABLE_NAME + assertEquals("null", row.get(6)); // AFFINITY_KEY_COLUMN + assertEquals("ID", row.get(7)); // KEY_ALIAS + assertEquals("null", row.get(8)); // VALUE_ALIAS + assertEquals("java.lang.Long", row.get(9)); // KEY_TYPE_NAME + assertFalse("null".equals(row.get(10))); // VALUE_TYPE_NAME executeSql(ignite0, "CREATE TABLE T2(ID LONG PRIMARY KEY, NAME VARCHAR)"); diff --git a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java index c2534cbb3414d7..e90d255d1a10e4 100644 --- a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java @@ -98,6 +98,13 @@ public interface DataStorageMetrics { */ public long getLastCheckpointDuration(); + /** + * Returns time when the last checkpoint was started. + * + * @return Time when the last checkpoint was started. + * */ + public long getLastCheckpointStarted(); + /** * Gets the duration of last checkpoint lock wait in milliseconds. * diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java b/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java index 68439b7516f705..debc7797b9b2cb 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java @@ -17,6 +17,7 @@ package org.apache.ignite; +import java.util.Collection; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.lang.IgniteFuture; @@ -70,4 +71,19 @@ public interface IgniteEncryption { * @return Future for this operation. */ public IgniteFuture changeMasterKey(String masterKeyName); + + /** + * Starts cache group encryption key change process. + *

    + * NOTE: Node join is rejected during rotation of cache group encryption key. Background re-encryption of + * existing data in the specified cache group(s) begins after the encryption key(s) is changed. During + * re-encryption, node join is not rejected, the cluster remains fully functional, it is fault-tolerant operation + * that automatically continues after restart. Secondary rotation of the encryption key of a cache group is only + * possible after background re-encryption of existing data in this cache group is completed. + * + * @param cacheOrGrpNames Cache or group names. + * @return Future which will be completed when new encryption key(s) are set for writing on all nodes in the cluster + * and re-encryption of existing cache data is initiated. + */ + public IgniteFuture changeCacheGroupKey(Collection cacheOrGrpNames); } diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java index 61a944fec126ef..9c0948a115faeb 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java @@ -31,6 +31,10 @@ import org.apache.ignite.internal.jdbc.JdbcConnection; import org.apache.ignite.internal.jdbc.JdbcDriverPropertyInfo; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + /** * JDBC driver implementation for In-Memory Data Grid. *

    @@ -485,10 +489,10 @@ public class IgniteJdbcDriver implements Driver { info.getProperty("ignite.client.ssl.enabled", "false"), "Flag indicating that SSL is needed for connection."), new JdbcDriverPropertyInfo("ignite.client.ssl.protocol", - info.getProperty("ignite.client.ssl.protocol", "TLS"), + info.getProperty("ignite.client.ssl.protocol", DFLT_SSL_PROTOCOL), "SSL protocol."), new JdbcDriverPropertyInfo("ignite.client.ssl.key.algorithm", - info.getProperty("ignite.client.ssl.key.algorithm", "SunX509"), + info.getProperty("ignite.client.ssl.key.algorithm", DFLT_KEY_ALGORITHM), "Key manager algorithm."), new JdbcDriverPropertyInfo("ignite.client.ssl.keystore.location", info.getProperty("ignite.client.ssl.keystore.location", ""), @@ -497,7 +501,7 @@ public class IgniteJdbcDriver implements Driver { info.getProperty("ignite.client.ssl.keystore.password", ""), "Key store password."), new JdbcDriverPropertyInfo("ignite.client.ssl.keystore.type", - info.getProperty("ignite.client.ssl.keystore.type", "jks"), + info.getProperty("ignite.client.ssl.keystore.type", DFLT_STORE_TYPE), "Key store type."), new JdbcDriverPropertyInfo("ignite.client.ssl.truststore.location", info.getProperty("ignite.client.ssl.truststore.location", ""), @@ -506,7 +510,7 @@ public class IgniteJdbcDriver implements Driver { info.getProperty("ignite.client.ssl.truststore.password", ""), "Trust store password."), new JdbcDriverPropertyInfo("ignite.client.ssl.truststore.type", - info.getProperty("ignite.client.ssl.truststore.type", "jks"), + info.getProperty("ignite.client.ssl.truststore.type", DFLT_STORE_TYPE), "Trust store type."), new JdbcDriverPropertyInfo("ignite.client.credentials", info.getProperty("ignite.client.credentials", ""), diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index 949e9c6f6ed7c4..aa12e541b06e2b 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -81,6 +81,7 @@ import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_ATOMIC_CACHE_DELETE_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_CACHE_REMOVE_ENTRIES_TTL; import static org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager.DFLT_MVCC_TX_SIZE_CACHING_THRESHOLD; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_PDS_WAL_REBALANCE_THRESHOLD; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory.DFLT_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointWorkflow.DFLT_CHECKPOINT_PARALLEL_SORT_THRESHOLD; @@ -1935,6 +1936,32 @@ public final class IgniteSystemProperties { defaults = "" + DFLT_DUMP_TX_COLLISIONS_INTERVAL) public static final String IGNITE_DUMP_TX_COLLISIONS_INTERVAL = "IGNITE_DUMP_TX_COLLISIONS_INTERVAL"; + /** + * Set to true only during the junit tests. + * Signals that the cluster is running in a test environment. + * + * Can be used for changing behaviour of tightly coupled code pieces during the tests. + * Use it as a last resort only, prefer another toolchain like DI, mocks and etc. if possible + */ + @SystemProperty(value = "Set to true only during the junit tests. " + + "Can be used for changing behaviour of tightly coupled code pieces during the tests. " + + "Use it as a last resort only, prefer another toolchain like DI, mocks and etc. if possible", + type = Boolean.class) + public static final String IGNITE_TEST_ENV = "IGNITE_TEST_ENV"; + + /** + * Defragmentation region size percentage of configured region size. + * This percentage will be calculated from largest configured region size and then proportionally subtracted + * from all configured regions. + */ + @SystemProperty(value = "Defragmentation region size percentage of configured region size. " + + "This percentage will be calculated from largest configured region size and then proportionally subtracted " + + "from all configured regions", + type = Integer.class, + defaults = "" + DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE) + public static final String IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE = + "IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE"; + /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java index 6d6e16733c8deb..882fd1f7dea05a 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java @@ -409,6 +409,8 @@ public int getUpdateBatchSize() { * @return {@code this} for chaining. */ public SqlFieldsQuery setUpdateBatchSize(int updateBatchSize) { + A.ensure(updateBatchSize >= 1, "updateBatchSize cannot be lower than 1"); + this.updateBatchSize = updateBatchSize; return this; diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java index 3b70891a0aa5e6..2a1927b79fdaaf 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java @@ -314,6 +314,9 @@ public class DataStorageConfiguration implements Serializable { /** Default warm-up configuration. */ @Nullable private WarmUpConfiguration dfltWarmUpCfg; + /** Encryption configuration. */ + private EncryptionConfiguration encCfg = new EncryptionConfiguration(); + /** * Creates valid durable memory configuration with all default values. */ @@ -1118,6 +1121,27 @@ public DataStorageConfiguration setWalPageCompressionLevel(Integer walPageCompre return this; } + /** + * Gets encryyption configuration. + * + * @return Encryption configuration. + */ + public EncryptionConfiguration getEncryptionConfiguration() { + return encCfg; + } + + /** + * Sets encryption configuration. + * + * @param encCfg Encryption configuration. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setEncryptionConfiguration(EncryptionConfiguration encCfg) { + this.encCfg = encCfg; + + return this; + } + /** * Sets default warm-up configuration. * diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java new file mode 100644 index 00000000000000..6b9345d4c75501 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.configuration; + +import java.io.Serializable; +import org.apache.ignite.internal.util.typedef.internal.A; + +/** + * Encryption configuration. + */ +public class EncryptionConfiguration implements Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** Default re-encryption rate limit. The value is {@code 0}, which means that scan speed is not limited. */ + public static final double DFLT_REENCRYPTION_RATE_MBPS = 0.0; + + /** Default number of pages that is scanned during reencryption under checkpoint lock. The value is {@code 100}. */ + public static final int DFLT_REENCRYPTION_BATCH_SIZE = 100; + + /** Re-encryption rate limit in megabytes per second (set {@code 0} for unlimited scanning). */ + private double reencryptionRateLimit = DFLT_REENCRYPTION_RATE_MBPS; + + /** The number of pages that is scanned during re-encryption under checkpoint lock. */ + private int reencryptionBatchSize = DFLT_REENCRYPTION_BATCH_SIZE; + + /** + * Creates valid encryption configuration with all default values. + */ + public EncryptionConfiguration() { + // No-op. + } + + /** + * Constructs the copy of the configuration. + * + * @param cfg Configuration to copy. + */ + public EncryptionConfiguration(EncryptionConfiguration cfg) { + assert cfg != null; + + reencryptionBatchSize = cfg.getReencryptionBatchSize(); + reencryptionRateLimit = cfg.getReencryptionRateLimit(); + } + + /** + * Gets re-encryption rate limit. + * + * @return Re-encryption rate limit in megabytes per second. + */ + public double getReencryptionRateLimit() { + return reencryptionRateLimit; + } + + /** + * Sets re-encryption rate limit. + * + * @param reencryptionRateLimit Re-encryption rate limit in megabytes per second. + * @return {@code this} for chaining. + */ + public EncryptionConfiguration setReencryptionRateLimit(double reencryptionRateLimit) { + A.ensure(reencryptionRateLimit >= 0, + "Re-encryption rate limit (" + reencryptionRateLimit + ") must be non-negative."); + + this.reencryptionRateLimit = reencryptionRateLimit; + + return this; + } + + /** + * Gets the number of pages that is scanned during re-encryption under checkpoint lock. + * + * @return The number of pages that is scanned during re-encryption under checkpoint lock. + */ + public int getReencryptionBatchSize() { + return reencryptionBatchSize; + } + + /** + * Sets the number of pages that is scanned during re-encryption under checkpoint lock. + * + * @param reencryptionBatchSize The number of pages that is scanned during re-encryption under checkpoint lock. + * @return {@code this} for chaining. + */ + public EncryptionConfiguration setReencryptionBatchSize(int reencryptionBatchSize) { + A.ensure(reencryptionBatchSize > 0, + "Reencryption batch size(" + reencryptionBatchSize + ") must be positive."); + + this.reencryptionBatchSize = reencryptionBatchSize; + + return this; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/events/EventType.java b/modules/core/src/main/java/org/apache/ignite/events/EventType.java index db1a88e443c140..3da980109d9063 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/EventType.java +++ b/modules/core/src/main/java/org/apache/ignite/events/EventType.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteEvents; import org.apache.ignite.IgniteSnapshot; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.GridComponent; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.eventstorage.NoopEventStorageSpi; @@ -922,6 +923,29 @@ public interface EventType { */ public static final int EVT_CLUSTER_SNAPSHOT_FAILED = 151; + /** + * Built-in event type: query execution. + *

    + * NOTE: all types in range from 1 to 1000 are reserved for + * internal Ignite events and should not be used by user-defined events. + * + * @see SqlQueryExecutionEvent + */ + public static final int EVT_SQL_QUERY_EXECUTION = 160; + + /** + * Built-in event type: node validation failed. + *
    + * This event is triggered if a node join fails due to a node validation failure. + *

    + * NOTE: all types in range from 1 to 1000 are reserved for + * internal Ignite events and should not be used by user-defined events. + * + * @see NodeValidationFailedEvent + * @see GridComponent#validateNode + */ + public static final int EVT_NODE_VALIDATION_FAILED = 170; + /** * All cluster snapshot events. This array can be directly passed into * {@link IgniteEvents#localListen(IgnitePredicate, int...)} method to diff --git a/modules/core/src/main/java/org/apache/ignite/events/NodeValidationFailedEvent.java b/modules/core/src/main/java/org/apache/ignite/events/NodeValidationFailedEvent.java new file mode 100644 index 00000000000000..74cf3a9f5a19fb --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/events/NodeValidationFailedEvent.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.events; + +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.GridComponent; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.spi.IgniteNodeValidationResult; + +import static org.apache.ignite.events.EventType.EVT_NODE_VALIDATION_FAILED; + +/** + * This event is triggered if any of {@link GridComponent}s fail to validate the joining node + * while join message processing. + * + * @see EventType#EVT_NODE_VALIDATION_FAILED + * @see GridComponent#validateNode + */ +public class NodeValidationFailedEvent extends EventAdapter { + /** */ + private static final long serialVersionUID = 0L; + + /** The node that attempted to join cluster. */ + private final ClusterNode evtNode; + + /** Validation result. */ + private final IgniteNodeValidationResult res; + + /** + * Creates new node validation event with given parameters. + * + * @param node Local node. + * @param evtNode Node which couldn't join the topology due to a validation failure. + * @param res Joining node validation result. + */ + public NodeValidationFailedEvent(ClusterNode node, ClusterNode evtNode, IgniteNodeValidationResult res) { + super(node, res.message(), EVT_NODE_VALIDATION_FAILED); + + this.evtNode = evtNode; + this.res = res; + } + + /** @return Node that couldn't join the topology due to a validation failure. */ + public ClusterNode eventNode() { + return evtNode; + } + + /** @return Joining node validation result. */ + public IgniteNodeValidationResult validationResult() { + return res; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(NodeValidationFailedEvent.class, this, "parent", super.toString()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java new file mode 100644 index 00000000000000..4700d7b9fff15b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.events; + +import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; + +/** + * Query execution event. + *

    + * Grid events are used for notification about what happens within the grid. Note that by + * design Ignite keeps all events generated on the local node locally and it provides + * APIs for performing a distributed queries across multiple nodes: + *

      + *
    • + * {@link org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate, long, int...)} - + * asynchronously querying events occurred on the nodes specified, including remote nodes. + *
    • + *
    • + * {@link org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate, int...)} - + * querying only local events stored on this local node. + *
    • + *
    • + * {@link org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate, int...)} - + * listening to local grid events (events from remote nodes not included). + *
    • + *
    + * User can also wait for events using method {@link org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate, int...)}. + *

    Events and Performance

    + * Note that by default all events in Ignite are enabled and therefore generated and stored + * by whatever event storage SPI is configured. Ignite can and often does generate thousands events per seconds + * under the load and therefore it creates a significant additional load on the system. If these events are + * not needed by the application this load is unnecessary and leads to significant performance degradation. + *

    + * It is highly recommended to enable only those events that your application logic requires + * by using {@link org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} method in Ignite configuration. Note that certain + * events are required for Ignite's internal operations and such events will still be generated but not stored by + * event storage SPI if they are disabled in Ignite configuration. + * + * @see EventType#EVT_SQL_QUERY_EXECUTION + */ +public class SqlQueryExecutionEvent extends EventAdapter { + /** */ + private static final long serialVersionUID = 0L; + + /** Query text. */ + private final String text; + + /** Query arguments. */ + @GridToStringInclude(sensitive = true) + private final Object[] args; + + /** Security subject ID. */ + private final UUID subjId; + + /** + * @param node Node where event was fired. + * @param msg Event message. + * @param text Query text. + * @param args Query arguments. + * @param subjId Security subject ID. + */ + public SqlQueryExecutionEvent( + ClusterNode node, + String msg, + @Nullable String text, + @Nullable Object[] args, + @Nullable UUID subjId + ) { + super(node, msg, EVT_SQL_QUERY_EXECUTION); + + this.text = text; + this.args = args; + this.subjId = subjId; + } + + /** + * Gets query text. + *

    + * Applicable for {@code SQL}, {@code SQL fields} queries. + * + * @return Query text. + */ + @Nullable public String text() { + return text; + } + + /** + * Gets query arguments. + *

    + * Applicable for {@code SQL} and {@code SQL fields} queries. + * + * @return Query arguments. + */ + @Nullable public Object[] arguments() { + return args.clone(); + } + + /** + * Gets security subject ID. + * + * @return Security subject ID. + */ + @Nullable public UUID subjectId() { + return subjId; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlQueryExecutionEvent.class, this, + "nodeId8", U.id8(node().id()), + "msg", message(), + "type", name(), + "tstamp", timestamp()); + } +} + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java index 75434d39eb6d04..6f1fb32f976dbd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java @@ -25,6 +25,7 @@ import org.apache.ignite.compute.ComputeJobSibling; import org.apache.ignite.compute.ComputeTaskSessionAttributeListener; import org.apache.ignite.compute.ComputeTaskSessionScope; +import org.apache.ignite.internal.managers.deployment.GridDeployment; import org.apache.ignite.internal.util.future.IgniteFinishedFutureImpl; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -62,6 +63,15 @@ public GridJobSessionImpl(GridKernalContext ctx, GridTaskSessionImpl ses, Ignite this.jobId = jobId; } + /** + * Grid job deployment. + * + * @return Grid deployment. + */ + public GridDeployment deployment() { + return ses.deployment(); + } + /** {@inheritDoc} */ @Override public GridTaskSessionInternal session() { return ses; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java index 11ef19886aa16c..f9704aecadc30c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java @@ -18,6 +18,8 @@ package org.apache.ignite.internal; import java.util.BitSet; +import java.util.Collection; +import org.apache.ignite.IgniteEncryption; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.cluster.ClusterState; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpi; @@ -106,6 +108,9 @@ public enum IgniteFeatures { /** Distributed change timeout for dump long operations. */ DISTRIBUTED_CHANGE_LONG_OPERATIONS_DUMP_TIMEOUT(30), + /** New region for volatile data. */ + VOLATILE_DATA_STRUCTURES_REGION(33), + /** Check secondary indexes inline size on join/by control utility request. */ CHECK_INDEX_INLINE_SIZES(36), @@ -128,7 +133,10 @@ public enum IgniteFeatures { SPECIFIED_SEQ_PK_KEYS(45), /** Compatibility support for new fields which are configured split. */ - SPLITTED_CACHE_CONFIGURATIONS_V2(46); + SPLITTED_CACHE_CONFIGURATIONS_V2(46), + + /** Cache encryption key change. See {@link IgniteEncryption#changeCacheGroupKey(Collection)}. */ + CACHE_GROUP_KEY_CHANGE(47); /** * Unique feature identifier. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index bd35e7a6c64710..1bb3e6c6711dde 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -1195,7 +1195,7 @@ public void start( // Assign discovery manager to context before other processors start so they // are able to register custom event listener. - GridManager discoMgr = new GridDiscoveryManager(ctx); + GridDiscoveryManager discoMgr = new GridDiscoveryManager(ctx); ctx.add(discoMgr, false); @@ -1210,10 +1210,25 @@ public void start( startProcessor(mntcProcessor); if (mntcProcessor.isMaintenanceMode()) { + if (log.isInfoEnabled()) { + log.info( + "Node is being started in maintenance mode. " + + "Starting IsolatedDiscoverySpi instead of configured discovery SPI." + ); + } + + cfg.setClusterStateOnStart(ClusterState.INACTIVE); + + if (log.isInfoEnabled()) + log.info("Overriding 'clusterStateOnStart' configuration to 'INACTIVE'."); + ctx.config().setDiscoverySpi(new IsolatedDiscoverySpi()); discoMgr = new GridDiscoveryManager(ctx); + // Reinitialized discovery manager won't have a valid consistentId on creation. + discoMgr.consistentId(ctx.pdsFolderResolver().resolveFolders().consistentId()); + ctx.add(discoMgr, false); } @@ -1295,7 +1310,7 @@ public void start( throw e; } - // All components exept Discovery are started, time to check if maintenance is still needed + // All components exept Discovery are started, time to check if maintenance is still needed. mntcProcessor.prepareAndExecuteMaintenance(); gw.writeLock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index a02c5ea5254002..310426919c0680 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -1476,6 +1476,16 @@ public static DependencyResolver dependencyResolver() { return dependencyResolver.get(); } + /** + * @param name Grid name (possibly {@code null} for default grid). + * @return true when all managers, processors, and plugins have started and ignite kernal start method has fully + * completed. + */ + public static boolean hasKernalStarted(String name) { + IgniteNamedInstance grid = name != null ? grids.get(name) : dfltGrid; + return grid != null && grid.hasStartLatchCompleted(); + } + /** * Start context encapsulates all starting parameters. */ @@ -1849,11 +1859,16 @@ private void start0(GridStartContext startCtx, IgniteConfiguration cfg, TimeBag WorkersRegistry workerRegistry = new WorkersRegistry( new IgniteBiInClosure() { - @Override public void apply(GridWorker deadWorker, FailureType failureType) { + @Override public void apply(GridWorker worker, FailureType failureType) { + IgniteException ex = new IgniteException(S.toString(GridWorker.class, worker)); + + Thread runner = worker.runner(); + + if (runner != null && runner != Thread.currentThread()) + ex.setStackTrace(runner.getStackTrace()); + if (grid != null) - grid.context().failure().process(new FailureContext( - failureType, - new IgniteException(S.toString(GridWorker.class, deadWorker)))); + grid.context().failure().process(new FailureContext(failureType, ex)); } }, IgniteSystemProperties.getLong(IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT, @@ -1899,6 +1914,7 @@ private void start0(GridStartContext startCtx, IgniteConfiguration cfg, TimeBag // Note, that we do not pre-start threads here as class loading pool may // not be needed. validateThreadPoolSize(cfg.getPeerClassLoadingThreadPoolSize(), "peer class loading"); + p2pExecSvc = new IgniteThreadPoolExecutor( "p2p", cfg.getIgniteInstanceName(), @@ -3209,6 +3225,13 @@ public void setCounter(int cnt) { this.cnt = cnt; } } + + /** + * @return whether the startLatch has been counted down, thereby indicating that the kernal has full started. + */ + public boolean hasStartLatchCompleted() { + return startLatch.getCount() == 0; + } } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java index 5d5eb3ef3636fd..b8a3bc24068f76 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java @@ -320,7 +320,12 @@ private T uncachedValue(Class cls) throws BinaryObjectException { /** {@inheritDoc} */ @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy) { - return deserialize(); + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { + return deserialize(ldr); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java index dfef1b900065c1..6b4eea249b7780 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java @@ -136,10 +136,19 @@ public BinaryObjectImpl(BinaryContext ctx, byte[] arr, int start) { /** {@inheritDoc} */ @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy) { + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { Object obj0 = obj; - if (obj0 == null || (cpy && needCopy(ctx))) - obj0 = deserializeValue(ctx); + if (obj0 == null || (cpy && needCopy(ctx))) { + if (ldr != null) + obj0 = deserialize(ldr); + else + obj0 = deserializeValue(ctx); + } return (T)obj0; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java index efe1c20723595e..b9ff9c2a618860 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java @@ -456,7 +456,12 @@ else if (fieldOffLen == BinaryUtils.OFFSET_2) /** {@inheritDoc} */ @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy) { - return (T)deserializeValue(); + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { + return deserialize(ldr); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java index d277948212f1df..fe138e6507a331 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java @@ -18,14 +18,14 @@ package org.apache.ignite.internal.binary.streams; import java.nio.ByteBuffer; -import org.apache.ignite.binary.BinaryObjectException; +import java.util.Arrays; /** - * + * Input stream over {@link ByteBuffer}. */ public class BinaryByteBufferInputStream implements BinaryInputStream { /** */ - private ByteBuffer buf; + private final ByteBuffer buf; /** * @param buf Buffer to wrap. @@ -44,15 +44,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public byte readByte() { - ensureHasData(1); - return buf.get(); } /** {@inheritDoc} */ @Override public byte[] readByteArray(int cnt) { - ensureHasData(cnt); - byte[] data = new byte[cnt]; buf.get(data); @@ -62,22 +58,16 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public int read(byte[] arr, int off, int cnt) { - ensureHasData(cnt); - return 0; } /** {@inheritDoc} */ @Override public boolean readBoolean() { - ensureHasData(1); - - return false; + return readByte() == 1; } /** {@inheritDoc} */ @Override public boolean[] readBooleanArray(int cnt) { - ensureHasData(cnt); - boolean[] res = new boolean[cnt]; for (int i = 0; i < cnt; i++) @@ -88,15 +78,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public short readShort() { - ensureHasData(2); - return buf.getShort(); } /** {@inheritDoc} */ @Override public short[] readShortArray(int cnt) { - ensureHasData(2 * cnt); - short[] res = new short[cnt]; for (int i = 0; i < cnt; i++) @@ -107,15 +93,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public char readChar() { - ensureHasData(2); - return buf.getChar(); } /** {@inheritDoc} */ @Override public char[] readCharArray(int cnt) { - ensureHasData(2 * cnt); - char[] res = new char[cnt]; for (int i = 0; i < cnt; i++) @@ -126,15 +108,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public int readInt() { - ensureHasData(4); - return buf.getInt(); } /** {@inheritDoc} */ @Override public int[] readIntArray(int cnt) { - ensureHasData(4 * cnt); - int[] res = new int[cnt]; for (int i = 0; i < cnt; i++) @@ -145,15 +123,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public float readFloat() { - ensureHasData(4); - return buf.getFloat(); } /** {@inheritDoc} */ @Override public float[] readFloatArray(int cnt) { - ensureHasData(4 * cnt); - float[] res = new float[cnt]; for (int i = 0; i < cnt; i++) @@ -164,15 +138,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public long readLong() { - ensureHasData(8); - return buf.getLong(); } /** {@inheritDoc} */ @Override public long[] readLongArray(int cnt) { - ensureHasData(8 * cnt); - long[] res = new long[cnt]; for (int i = 0; i < cnt; i++) @@ -183,15 +153,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public double readDouble() { - ensureHasData(8); - return buf.getDouble(); } /** {@inheritDoc} */ @Override public double[] readDoubleArray(int cnt) { - ensureHasData(8 * cnt); - double[] res = new double[cnt]; for (int i = 0; i < cnt; i++) @@ -207,47 +173,17 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public byte readBytePositioned(int pos) { - int oldPos = buf.position(); - - buf.position(pos); - - ensureHasData(1); - - byte res = buf.get(); - - buf.position(oldPos); - - return res; + return buf.get(pos); } /** {@inheritDoc} */ @Override public short readShortPositioned(int pos) { - int oldPos = buf.position(); - - buf.position(pos); - - ensureHasData(2); - - short res = buf.getShort(); - - buf.position(oldPos); - - return res; + return buf.getShort(pos); } /** {@inheritDoc} */ @Override public int readIntPositioned(int pos) { - int oldPos = buf.position(); - - buf.position(pos); - - ensureHasData(4); - - byte res = buf.get(); - - buf.position(oldPos); - - return res; + return buf.getInt(pos); } /** {@inheritDoc} */ @@ -277,7 +213,9 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public byte[] arrayCopy() { - return buf.array(); + byte[] arr = buf.array(); + + return Arrays.copyOf(arr, arr.length); } /** {@inheritDoc} */ @@ -289,13 +227,4 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { @Override public boolean hasArray() { return false; } - - /** - * @param cnt Remaining bytes. - */ - private void ensureHasData(int cnt) { - if (buf.remaining() < cnt) - throw new BinaryObjectException("Not enough data to read the value " + - "[requiredBytes=" + cnt + ", remainingBytes=" + buf.remaining() + ']'); - } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java index 8e373ae2355308..c0135a5e57fcd4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java @@ -39,6 +39,10 @@ import org.apache.ignite.plugin.security.SecurityCredentialsProvider; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + /** * Java client configuration. */ @@ -721,8 +725,8 @@ public void load(String prefix, Properties in) throws GridClientException { String sslEnabled = in.getProperty(prefix + "ssl.enabled"); - String sslProto = in.getProperty(prefix + "ssl.protocol", "TLS"); - String sslKeyAlg = in.getProperty(prefix + "ssl.key.algorithm", "SunX509"); + String sslProto = in.getProperty(prefix + "ssl.protocol"); + String sslKeyAlg = in.getProperty(prefix + "ssl.key.algorithm"); String keyStorePath = in.getProperty(prefix + "ssl.keystore.location"); String keyStorePwd = in.getProperty(prefix + "ssl.keystore.password"); @@ -780,8 +784,8 @@ public void load(String prefix, Properties in) throws GridClientException { if (!F.isEmpty(sslEnabled) && Boolean.parseBoolean(sslEnabled)) { GridSslBasicContextFactory factory = new GridSslBasicContextFactory(); - factory.setProtocol(F.isEmpty(sslProto) ? "TLS" : sslProto); - factory.setKeyAlgorithm(F.isEmpty(sslKeyAlg) ? "SunX509" : sslKeyAlg); + factory.setProtocol(F.isEmpty(sslProto) ? DFLT_SSL_PROTOCOL : sslProto); + factory.setKeyAlgorithm(F.isEmpty(sslKeyAlg) ? DFLT_KEY_ALGORITHM : sslKeyAlg); if (F.isEmpty(keyStorePath)) throw new IllegalArgumentException("SSL key store location is not specified."); @@ -791,7 +795,7 @@ public void load(String prefix, Properties in) throws GridClientException { if (keyStorePwd != null) factory.setKeyStorePassword(keyStorePwd.toCharArray()); - factory.setKeyStoreType(F.isEmpty(keyStoreType) ? "jks" : keyStoreType); + factory.setKeyStoreType(F.isEmpty(keyStoreType) ? DFLT_STORE_TYPE : keyStoreType); if (F.isEmpty(trustStorePath)) factory.setTrustManagers(GridSslBasicContextFactory.getDisabledTrustManager()); @@ -801,7 +805,7 @@ public void load(String prefix, Properties in) throws GridClientException { if (trustStorePwd != null) factory.setTrustStorePassword(trustStorePwd.toCharArray()); - factory.setTrustStoreType(F.isEmpty(trustStoreType) ? "jks" : trustStoreType); + factory.setTrustStoreType(F.isEmpty(trustStoreType) ? DFLT_STORE_TYPE : trustStoreType); } setSslContextFactory(factory); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java index d4075c1c71c350..e500705414de16 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java @@ -37,6 +37,11 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.ssl.SSLContextWrapper; +import org.apache.ignite.ssl.SslContextFactory; + +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; /** * Basic ssl context factory that provides ssl context configuration with specified key @@ -54,15 +59,6 @@ */ @Deprecated public class GridSslBasicContextFactory implements GridSslContextFactory { - /** Default key store type. */ - public static final String DFLT_STORE_TYPE = "JKS"; - - /** Default SSL protocol. */ - public static final String DFLT_SSL_PROTOCOL = "TLS"; - - /** Default key manager algorithm. */ - public static final String DFLT_KEY_ALGORITHM = "SunX509"; - /** SSL protocol. */ private String proto = DFLT_SSL_PROTOCOL; @@ -106,8 +102,8 @@ public String getKeyStoreType() { } /** - * Sets key store type used in context initialization. If not provided, {@link #DFLT_STORE_TYPE} will - * be used. + * Sets key store type used in context initialization. If not provided, {@link SslContextFactory#DFLT_STORE_TYPE} + * will be used. * * @param keyStoreType Key store type. */ @@ -127,8 +123,8 @@ public String getTrustStoreType() { } /** - * Sets trust store type used in context initialization. If not provided, {@link #DFLT_STORE_TYPE} will - * be used. + * Sets trust store type used in context initialization. If not provided, {@link SslContextFactory#DFLT_STORE_TYPE} + * will be used. * * @param trustStoreType Trust store type. */ @@ -148,7 +144,7 @@ public String getProtocol() { } /** - * Sets protocol for secure transport. If not specified, {@link #DFLT_SSL_PROTOCOL} will be used. + * Sets protocol for secure transport. If not specified, {@link SslContextFactory#DFLT_SSL_PROTOCOL} will be used. * * @param proto SSL protocol name. */ @@ -159,7 +155,7 @@ public void setProtocol(String proto) { } /** - * Gets algorithm that will be used to create a key manager. If not specified, {@link #DFLT_KEY_ALGORITHM} + * Gets algorithm that will be used to create a key manager. If not specified, {@link SslContextFactory#DFLT_KEY_ALGORITHM} * will be used. * * @return Key manager algorithm. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java index d4cb4153d1f962..65d1c2d618e039 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.client.thin; +import java.nio.ByteBuffer; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -40,7 +41,7 @@ import org.apache.ignite.client.IgniteClientFuture; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.binary.BinaryRawWriterEx; -import org.apache.ignite.internal.binary.streams.BinaryHeapInputStream; +import org.apache.ignite.internal.binary.streams.BinaryByteBufferInputStream; import org.apache.ignite.internal.processors.platform.client.ClientStatus; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -353,11 +354,11 @@ private void writeExecuteTaskRequest( ClientChannel ch, ClientOperation op, long rsrcId, - byte[] payload, + ByteBuffer payload, Exception err ) { if (op == ClientOperation.COMPUTE_TASK_FINISHED) { - Object res = payload == null ? null : utils.readObject(new BinaryHeapInputStream(payload), false); + Object res = payload == null ? null : utils.readObject(BinaryByteBufferInputStream.create(payload), false); ClientComputeTask task = addTask(ch, rsrcId); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientSslUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientSslUtils.java new file mode 100644 index 00000000000000..4f964d86908f4c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientSslUtils.java @@ -0,0 +1,293 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.function.BiFunction; +import java.util.function.Predicate; +import java.util.stream.Stream; +import javax.cache.configuration.Factory; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; + +import org.apache.ignite.client.SslMode; +import org.apache.ignite.client.SslProtocol; +import org.apache.ignite.configuration.ClientConfiguration; + +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + +public class ClientSslUtils { + /** */ + public static final char[] EMPTY_CHARS = new char[0]; + + /** Trust manager ignoring all certificate checks. */ + private static final TrustManager ignoreErrorsTrustMgr = new X509TrustManager() { + /** */ + @Override public X509Certificate[] getAcceptedIssuers() { + return null; + } + + /** */ + @Override public void checkServerTrusted(X509Certificate[] arg0, String arg1) { + // No-op. + } + + /** */ + @Override public void checkClientTrusted(X509Certificate[] arg0, String arg1) { + // No-op. + } + }; + + /** + * Gets SSL context for the given client configuration. + * + * @param cfg Configuration. + * @return {@link SSLContext} when SSL is enabled in the configuration; null otherwise. + */ + public static SSLContext getSslContext(ClientConfiguration cfg) { + if (cfg.getSslMode() == SslMode.DISABLED) + return null; + + Factory sslCtxFactory = cfg.getSslContextFactory(); + + if (sslCtxFactory != null) { + try { + return sslCtxFactory.create(); + } + catch (Exception e) { + throw new ClientError("SSL Context Factory failed", e); + } + } + + BiFunction or = (val, dflt) -> val == null || val.isEmpty() ? dflt : val; + + String keyStore = or.apply( + cfg.getSslClientCertificateKeyStorePath(), + System.getProperty("javax.net.ssl.keyStore") + ); + + String keyStoreType = or.apply( + cfg.getSslClientCertificateKeyStoreType(), + or.apply(System.getProperty("javax.net.ssl.keyStoreType"), DFLT_STORE_TYPE) + ); + + String keyStorePwd = or.apply( + cfg.getSslClientCertificateKeyStorePassword(), + System.getProperty("javax.net.ssl.keyStorePassword") + ); + + String trustStore = or.apply( + cfg.getSslTrustCertificateKeyStorePath(), + System.getProperty("javax.net.ssl.trustStore") + ); + + String trustStoreType = or.apply( + cfg.getSslTrustCertificateKeyStoreType(), + or.apply(System.getProperty("javax.net.ssl.trustStoreType"), DFLT_STORE_TYPE) + ); + + String trustStorePwd = or.apply( + cfg.getSslTrustCertificateKeyStorePassword(), + System.getProperty("javax.net.ssl.trustStorePassword") + ); + + String algorithm = or.apply(cfg.getSslKeyAlgorithm(), DFLT_KEY_ALGORITHM); + + String proto = toString(cfg.getSslProtocol()); + + if (Stream.of(keyStore, keyStorePwd, keyStoreType, trustStore, trustStorePwd, trustStoreType) + .allMatch(s -> s == null || s.isEmpty()) + ) { + try { + return SSLContext.getDefault(); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Default SSL context cryptographic algorithm is not available", e); + } + } + + KeyManager[] keyManagers = getKeyManagers(algorithm, keyStore, keyStoreType, keyStorePwd); + + TrustManager[] trustManagers = cfg.isSslTrustAll() ? + new TrustManager[] {ignoreErrorsTrustMgr} : + getTrustManagers(algorithm, trustStore, trustStoreType, trustStorePwd); + + try { + SSLContext sslCtx = SSLContext.getInstance(proto); + + sslCtx.init(keyManagers, trustManagers, null); + + return sslCtx; + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("SSL context cryptographic algorithm is not available", e); + } + catch (KeyManagementException e) { + throw new ClientError("Failed to create SSL Context", e); + } + } + + /** + * @return String representation of {@link SslProtocol} as required by {@link SSLContext}. + */ + private static String toString(SslProtocol proto) { + switch (proto) { + case TLSv1_1: + return "TLSv1.1"; + + case TLSv1_2: + return "TLSv1.2"; + + default: + return proto.toString(); + } + } + + /** */ + private static KeyManager[] getKeyManagers( + String algorithm, + String keyStore, + String keyStoreType, + String keyStorePwd + ) { + KeyManagerFactory keyMgrFactory; + + try { + keyMgrFactory = KeyManagerFactory.getInstance(algorithm); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Key manager cryptographic algorithm is not available", e); + } + + Predicate empty = s -> s == null || s.isEmpty(); + + if (!empty.test(keyStore) && !empty.test(keyStoreType)) { + char[] pwd = (keyStorePwd == null) ? EMPTY_CHARS : keyStorePwd.toCharArray(); + + KeyStore store = loadKeyStore("Client", keyStore, keyStoreType, pwd); + + try { + keyMgrFactory.init(store, pwd); + } + catch (UnrecoverableKeyException e) { + throw new ClientError("Could not recover key store key", e); + } + catch (KeyStoreException e) { + throw new ClientError( + String.format("Client key store provider of type [%s] is not available", keyStoreType), + e + ); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Client key store integrity check algorithm is not available", e); + } + } + + return keyMgrFactory.getKeyManagers(); + } + + /** */ + private static TrustManager[] getTrustManagers( + String algorithm, + String trustStore, + String trustStoreType, + String trustStorePwd + ) { + TrustManagerFactory trustMgrFactory; + + try { + trustMgrFactory = TrustManagerFactory.getInstance(algorithm); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Trust manager cryptographic algorithm is not available", e); + } + + Predicate empty = s -> s == null || s.isEmpty(); + + if (!empty.test(trustStore) && !empty.test(trustStoreType)) { + char[] pwd = (trustStorePwd == null) ? EMPTY_CHARS : trustStorePwd.toCharArray(); + + KeyStore store = loadKeyStore("Trust", trustStore, trustStoreType, pwd); + + try { + trustMgrFactory.init(store); + } + catch (KeyStoreException e) { + throw new ClientError( + String.format("Trust key store provider of type [%s] is not available", trustStoreType), + e + ); + } + } + + return trustMgrFactory.getTrustManagers(); + } + + /** */ + private static KeyStore loadKeyStore(String lb, String path, String type, char[] pwd) { + KeyStore store; + + try { + store = KeyStore.getInstance(type); + } + catch (KeyStoreException e) { + throw new ClientError( + String.format("%s key store provider of type [%s] is not available", lb, type), + e + ); + } + + try (InputStream in = new FileInputStream(new File(path))) { + + store.load(in, pwd); + + return store; + } + catch (FileNotFoundException e) { + throw new ClientError(String.format("%s key store file [%s] does not exist", lb, path), e); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError( + String.format("%s key store integrity check algorithm is not available", lb), + e + ); + } + catch (CertificateException e) { + throw new ClientError(String.format("Could not load certificate from %s key store", lb), e); + } + catch (IOException e) { + throw new ClientError(String.format("Could not read %s key store", lb), e); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java index 1e62a5fa83ce5e..ad2b96dc554b21 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java @@ -524,6 +524,17 @@ void write(SqlFieldsQuery qry, BinaryOutputStream out) { out.writeBoolean(qry.isLazy()); out.writeLong(qry.getTimeout()); out.writeBoolean(true); // include column names + + if (qry.getPartitions() != null) { + out.writeInt(qry.getPartitions().length); + + for (int part : qry.getPartitions()) + out.writeInt(part); + } + else + out.writeInt(-1); + + out.writeInt(qry.getUpdateBatchSize()); } /** Write Ignite binary object to output stream. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java index ae1b7fac9b473a..3aee48304efa0c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.client.thin; +import java.nio.ByteBuffer; + /** * Server to client notification listener. */ @@ -30,5 +32,5 @@ interface NotificationListener { * @param payload Notification payload or {@code null} if there is no payload. * @param err Error. */ - public void acceptNotification(ClientChannel ch, ClientOperation op, long rsrcId, byte[] payload, Exception err); + public void acceptNotification(ClientChannel ch, ClientOperation op, long rsrcId, ByteBuffer payload, Exception err); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java index 76af7f2ae92e27..f9d5978f4ec679 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java @@ -17,7 +17,9 @@ package org.apache.ignite.internal.client.thin; -import org.apache.ignite.internal.binary.streams.BinaryHeapInputStream; +import java.nio.ByteBuffer; + +import org.apache.ignite.internal.binary.streams.BinaryByteBufferInputStream; import org.apache.ignite.internal.binary.streams.BinaryInputStream; /** @@ -33,8 +35,8 @@ class PayloadInputChannel { /** * Constructor. */ - PayloadInputChannel(ClientChannel ch, byte[] payload) { - in = new BinaryHeapInputStream(payload); + PayloadInputChannel(ClientChannel ch, ByteBuffer payload) { + in = BinaryByteBufferInputStream.create(payload); this.ch = ch; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java index 3577a5cf31da0a..0e90c1be97b559 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java @@ -43,7 +43,10 @@ public enum ProtocolBitmaskFeature { SERVICE_INVOKE(5), /** Feature for use default query timeout if the qry timeout isn't set explicitly. */ - DEFAULT_QRY_TIMEOUT(6); + DEFAULT_QRY_TIMEOUT(6), + + /** Additional SqlFieldsQuery properties: partitions, updateBatchSize */ + QRY_PARTITIONS_BATCH_SIZE(7); /** */ private static final EnumSet ALL_FEATURES_AS_ENUM_SET = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java index e7005be764d54f..195088de3cd40f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.client.thin; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -31,13 +32,11 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -51,26 +50,21 @@ import org.apache.ignite.client.IgniteClientFuture; import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; +import org.apache.ignite.internal.client.thin.io.gridnioserver.GridNioClientConnectionMultiplexer; import org.apache.ignite.internal.util.HostAndPortRange; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; -import org.jetbrains.annotations.NotNull; /** * Communication channel with failover and partition awareness. */ final class ReliableChannel implements AutoCloseable, NotificationListener { - /** Timeout to wait for executor service to shutdown (in milliseconds). */ - private static final long EXECUTOR_SHUTDOWN_TIMEOUT = 10_000L; - /** Do nothing helper function. */ private static final Consumer DO_NOTHING = (v) -> {}; - /** Async runner thread name. */ - static final String ASYNC_RUNNER_THREAD_NAME = "thin-client-channel-async-init"; - /** Channel factory. */ - private final Function chFactory; + private final BiFunction chFactory; /** Client channel holders for each configured address. */ private volatile List channels; @@ -96,19 +90,6 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { /** Listeners of channel close events. */ private final Collection> channelCloseLsnrs = new CopyOnWriteArrayList<>(); - /** Async tasks thread pool. */ - private final ExecutorService asyncRunner = Executors.newSingleThreadExecutor( - new ThreadFactory() { - @Override public Thread newThread(@NotNull Runnable r) { - Thread thread = new Thread(r, ASYNC_RUNNER_THREAD_NAME); - - thread.setDaemon(true); - - return thread; - } - } - ); - /** Channels reinit was scheduled. */ private final AtomicBoolean scheduledChannelsReinit = new AtomicBoolean(); @@ -130,6 +111,9 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { /** Guard channels and curChIdx together. */ private final ReadWriteLock curChannelsGuard = new ReentrantReadWriteLock(); + /** Connection manager. */ + private final ClientConnectionMultiplexer connMgr; + /** Cache addresses returned by {@code ThinClientAddressFinder}. */ private volatile String[] prevHostAddrs; @@ -137,9 +121,9 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { * Constructor. */ ReliableChannel( - Function chFactory, - ClientConfiguration clientCfg, - IgniteBinary binary + BiFunction chFactory, + ClientConfiguration clientCfg, + IgniteBinary binary ) { if (chFactory == null) throw new NullPointerException("chFactory"); @@ -153,20 +137,16 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { partitionAwarenessEnabled = clientCfg.isPartitionAwarenessEnabled(); affinityCtx = new ClientCacheAffinityContext(binary); + + connMgr = new GridNioClientConnectionMultiplexer(clientCfg); + connMgr.start(); } /** {@inheritDoc} */ @Override public synchronized void close() { closed = true; - asyncRunner.shutdown(); - - try { - asyncRunner.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, TimeUnit.MILLISECONDS); - } - catch (InterruptedException ignore) { - // No-op. - } + connMgr.stop(); List holders = channels; @@ -430,7 +410,7 @@ public void addChannelCloseListener(Consumer lsnr) { ClientChannel ch, ClientOperation op, long rsrcId, - byte[] payload, + ByteBuffer payload, Exception err ) { for (NotificationListener lsnr : notificationLsnrs) { @@ -579,7 +559,7 @@ private void onChannelFailure(ClientChannelHolder hld, ClientChannel ch) { * Asynchronously try to establish a connection to all configured servers. */ private void initAllChannelsAsync() { - asyncRunner.submit( + ForkJoinPool.commonPool().submit( () -> { List holders = channels; @@ -608,7 +588,7 @@ private void onTopologyChanged(ClientChannel ch) { if (scheduledChannelsReinit.compareAndSet(false, true)) { // If partition awareness is disabled then only schedule and wait for the default channel to fail. if (partitionAwarenessEnabled) - asyncRunner.submit(this::channelsInit); + ForkJoinPool.commonPool().submit(this::channelsInit); } } } @@ -867,6 +847,7 @@ private int getRetryLimit() { /** * Channels holder. */ + @SuppressWarnings("PackageVisibleInnerClass") // Visible for tests. class ClientChannelHolder { /** Channel configuration. */ private final ClientChannelConfiguration chCfg; @@ -937,7 +918,7 @@ private ClientChannel getOrCreateChannel(boolean ignoreThrottling) if (!ignoreThrottling && applyReconnectionThrottling()) throw new ClientConnectionException("Reconnect is not allowed due to applied throttling"); - ClientChannel channel = chFactory.apply(chCfg); + ClientChannel channel = chFactory.apply(chCfg, connMgr); if (channel.serverNodeId() != null) { channel.addTopologyChangeListener(ReliableChannel.this::onTopologyChanged); @@ -1008,6 +989,7 @@ InetSocketAddress getAddress() { /** * Get holders reference. For test purposes. */ + @SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") // For tests. List getChannelHolders() { return channels; } @@ -1015,6 +997,7 @@ List getChannelHolders() { /** * Get node channels reference. For test purposes. */ + @SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") // For tests. Map getNodeChannels() { return nodeChannels; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java index 4f3ee409851227..109c2a9b0a08c6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java @@ -17,21 +17,9 @@ package org.apache.ignite.internal.client.thin; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; import java.net.InetSocketAddress; -import java.net.Socket; -import java.security.KeyManagementException; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.UnrecoverableKeyException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; @@ -44,22 +32,8 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Stream; -import javax.cache.configuration.Factory; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSocket; -import javax.net.ssl.SSLSocketFactory; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.X509TrustManager; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.client.ClientAuthenticationException; import org.apache.ignite.client.ClientAuthorizationException; @@ -67,19 +41,20 @@ import org.apache.ignite.client.ClientException; import org.apache.ignite.client.ClientFeatureNotSupportedByServerException; import org.apache.ignite.client.ClientReconnectedException; -import org.apache.ignite.client.SslMode; -import org.apache.ignite.client.SslProtocol; import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.binary.BinaryCachingMetadataHandler; import org.apache.ignite.internal.binary.BinaryContext; -import org.apache.ignite.internal.binary.BinaryPrimitives; import org.apache.ignite.internal.binary.BinaryReaderExImpl; import org.apache.ignite.internal.binary.BinaryWriterExImpl; -import org.apache.ignite.internal.binary.streams.BinaryHeapInputStream; +import org.apache.ignite.internal.binary.streams.BinaryByteBufferInputStream; import org.apache.ignite.internal.binary.streams.BinaryHeapOutputStream; import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; +import org.apache.ignite.internal.client.thin.io.ClientConnection; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; +import org.apache.ignite.internal.client.thin.io.ClientConnectionStateHandler; +import org.apache.ignite.internal.client.thin.io.ClientMessageHandler; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.odbc.ClientListenerNioListener; import org.apache.ignite.internal.processors.odbc.ClientListenerRequest; @@ -107,13 +82,10 @@ /** * Implements {@link ClientChannel} over TCP. */ -class TcpClientChannel implements ClientChannel { +class TcpClientChannel implements ClientChannel, ClientMessageHandler, ClientConnectionStateHandler { /** Protocol version used by default on first connection attempt. */ private static final ProtocolVersion DEFAULT_VERSION = LATEST_VER; - /** Receiver thread prefix. */ - static final String RECEIVER_THREAD_PREFIX = "thin-client-channel#"; - /** Supported protocol versions. */ private static final Collection supportedVers = Arrays.asList( V1_7_0, @@ -126,30 +98,24 @@ class TcpClientChannel implements ClientChannel { V1_0_0 ); + /** Preallocated empty bytes. */ + public static final byte[] EMPTY_BYTES = new byte[0]; + /** Protocol context. */ - private ProtocolContext protocolCtx; + private volatile ProtocolContext protocolCtx; /** Server node ID. */ - private UUID srvNodeId; + private volatile UUID srvNodeId; /** Server topology version. */ - private AffinityTopologyVersion srvTopVer; + private volatile AffinityTopologyVersion srvTopVer; /** Channel. */ - private final Socket sock; - - /** Output stream. */ - private final OutputStream out; - - /** Data input. */ - private final ByteCountingDataInput dataInput; + private final ClientConnection sock; /** Request id. */ private final AtomicLong reqId = new AtomicLong(1); - /** Send lock. */ - private final Lock sndLock = new ReentrantLock(); - /** Pending requests. */ private final Map pendingReqs = new ConcurrentHashMap<>(); @@ -165,26 +131,20 @@ class TcpClientChannel implements ClientChannel { /** Executor for async operation listeners. */ private final Executor asyncContinuationExecutor; - /** Receiver thread (processes incoming messages). */ - private Thread receiverThread; + /** Send/receive timeout in milliseconds. */ + private final int timeout; /** Constructor. */ - TcpClientChannel(ClientChannelConfiguration cfg) + TcpClientChannel(ClientChannelConfiguration cfg, ClientConnectionMultiplexer connMgr) throws ClientConnectionException, ClientAuthenticationException, ClientProtocolError { validateConfiguration(cfg); Executor cfgExec = cfg.getAsyncContinuationExecutor(); asyncContinuationExecutor = cfgExec != null ? cfgExec : ForkJoinPool.commonPool(); - try { - sock = createSocket(cfg); + timeout = cfg.getTimeout(); - out = sock.getOutputStream(); - dataInput = new ByteCountingDataInput(sock.getInputStream()); - } - catch (IOException e) { - throw handleIOError("addr=" + cfg.getAddress(), e); - } + sock = connMgr.open(cfg.getAddress(), this, this); handshake(DEFAULT_VERSION, cfg.getUserName(), cfg.getUserPassword(), cfg.getUserAttributes()); } @@ -194,28 +154,25 @@ class TcpClientChannel implements ClientChannel { close(null); } + /** {@inheritDoc} */ + @Override public void onMessage(ByteBuffer buf) { + processNextMessage(buf); + } + + /** {@inheritDoc} */ + @Override public void onDisconnected(@Nullable Exception e) { + close(e); + } + /** * Close the channel with cause. */ private void close(Throwable cause) { if (closed.compareAndSet(false, true)) { - U.closeQuiet(dataInput); - U.closeQuiet(out); U.closeQuiet(sock); - sndLock.lock(); // Lock here to prevent creation of new pending requests. - - try { - for (ClientRequestFuture pendingReq : pendingReqs.values()) - pendingReq.onDone(new ClientConnectionException("Channel is closed", cause)); - - if (receiverThread != null) - receiverThread.interrupt(); - } - finally { - sndLock.unlock(); - } - + for (ClientRequestFuture pendingReq : pendingReqs.values()) + pendingReq.onDone(new ClientConnectionException("Channel is closed", cause)); } } @@ -240,7 +197,8 @@ private void close(Throwable cause) { ClientRequestFuture fut = send(op, payloadWriter); return receiveAsync(fut, payloadReader); - } catch (Throwable t) { + } + catch (Throwable t) { CompletableFuture fut = new CompletableFuture<>(); fut.completeExceptionally(t); @@ -257,15 +215,10 @@ private ClientRequestFuture send(ClientOperation op, Consumer T receive(ClientRequestFuture pendingReq, Function payloadReader) throws ClientException { try { - byte[] payload = pendingReq.get(); + ByteBuffer payload = timeout > 0 ? pendingReq.get(timeout) : pendingReq.get(); if (payload == null || payloadReader == null) return null; @@ -327,7 +278,7 @@ private CompletableFuture receiveAsync(ClientRequestFuture pendingReq, Fu pendingReq.listen(payloadFut -> asyncContinuationExecutor.execute(() -> { try { - byte[] payload = payloadFut.get(); + ByteBuffer payload = payloadFut.get(); if (payload == null || payloadReader == null) fut.complete(null); @@ -335,7 +286,8 @@ private CompletableFuture receiveAsync(ClientRequestFuture pendingReq, Fu T res = payloadReader.apply(new PayloadInputChannel(this, payload)); fut.complete(res); } - } catch (Throwable t) { + } + catch (Throwable t) { fut.completeExceptionally(convertException(t)); } })); @@ -377,59 +329,30 @@ private RuntimeException convertException(Throwable e) { return new ClientException(e.getMessage(), e); } - /** - * Init and start receiver thread if it wasn't started before. - * - * Note: Method should be called only under external synchronization. - */ - private void initReceiverThread() { - if (receiverThread == null) { - Socket sock = this.sock; - - String sockInfo = sock == null ? null : sock.getInetAddress().getHostName() + ":" + sock.getPort(); - - receiverThread = new Thread(() -> { - try { - while (!closed()) - processNextMessage(); - } - catch (Throwable e) { - close(e); - } - }, RECEIVER_THREAD_PREFIX + sockInfo); - - receiverThread.setDaemon(true); - - receiverThread.start(); - } - } - /** * Process next message from the input stream and complete corresponding future. */ - private void processNextMessage() throws ClientProtocolError, ClientConnectionException { - // blocking read a message header not to fall into a busy loop - int msgSize = dataInput.readInt(2048); - - if (msgSize <= 0) - throw new ClientProtocolError(String.format("Invalid message size: %s", msgSize)); + private void processNextMessage(ByteBuffer buf) throws ClientProtocolError, ClientConnectionException { + BinaryInputStream dataInput = BinaryByteBufferInputStream.create(buf); - long bytesReadOnStartMsg = dataInput.totalBytesRead(); + if (protocolCtx == null) { + // Process handshake. + pendingReqs.remove(-1L).onDone(buf); + return; + } - long resId = dataInput.spinReadLong(); + long resId = dataInput.readLong(); int status = 0; ClientOperation notificationOp = null; - BinaryInputStream resIn; - if (protocolCtx.isFeatureSupported(PARTITION_AWARENESS)) { - short flags = dataInput.spinReadShort(); + short flags = dataInput.readShort(); if ((flags & ClientFlag.AFFINITY_TOPOLOGY_CHANGED) != 0) { - long topVer = dataInput.spinReadLong(); - int minorTopVer = dataInput.spinReadInt(); + long topVer = dataInput.readLong(); + int minorTopVer = dataInput.readInt(); srvTopVer = new AffinityTopologyVersion(topVer, minorTopVer); @@ -438,7 +361,7 @@ private void processNextMessage() throws ClientProtocolError, ClientConnectionEx } if ((flags & ClientFlag.NOTIFICATION) != 0) { - short notificationCode = dataInput.spinReadShort(); + short notificationCode = dataInput.readShort(); notificationOp = ClientOperation.fromCode(notificationCode); @@ -447,26 +370,25 @@ private void processNextMessage() throws ClientProtocolError, ClientConnectionEx } if ((flags & ClientFlag.ERROR) != 0) - status = dataInput.spinReadInt(); + status = dataInput.readInt(); } else - status = dataInput.spinReadInt(); + status = dataInput.readInt(); - int hdrSize = (int)(dataInput.totalBytesRead() - bytesReadOnStartMsg); + int hdrSize = dataInput.position(); + int msgSize = buf.limit(); - byte[] res = null; + ByteBuffer res = null; Exception err = null; if (status == 0) { if (msgSize > hdrSize) - res = dataInput.spinRead(msgSize - hdrSize); + res = buf; } else if (status == ClientStatus.SECURITY_VIOLATION) err = new ClientAuthorizationException(); else { - resIn = new BinaryHeapInputStream(dataInput.spinRead(msgSize - hdrSize)); - - String errMsg = ClientUtils.createBinaryReader(null, resIn).readString(); + String errMsg = ClientUtils.createBinaryReader(null, dataInput).readString(); err = new ClientServerError(errMsg, status, resId); } @@ -530,31 +452,21 @@ else if (addr.getPort() < 1024 || addr.getPort() > 49151) throw new IllegalArgumentException(error); } - /** Create socket. */ - private static Socket createSocket(ClientChannelConfiguration cfg) throws IOException { - Socket sock = cfg.getSslMode() == SslMode.REQUIRED ? - new ClientSslSocketFactory(cfg).create() : - new Socket(cfg.getAddress().getHostName(), cfg.getAddress().getPort()); - - sock.setTcpNoDelay(cfg.isTcpNoDelay()); - - if (cfg.getTimeout() > 0) - sock.setSoTimeout(cfg.getTimeout()); - - if (cfg.getSendBufferSize() > 0) - sock.setSendBufferSize(cfg.getSendBufferSize()); - - if (cfg.getReceiveBufferSize() > 0) - sock.setReceiveBufferSize(cfg.getReceiveBufferSize()); - - return sock; - } - /** Client handshake. */ private void handshake(ProtocolVersion ver, String user, String pwd, Map userAttrs) throws ClientConnectionException, ClientAuthenticationException, ClientProtocolError { + ClientRequestFuture fut = new ClientRequestFuture(); + pendingReqs.put(-1L, fut); + handshakeReq(ver, user, pwd, userAttrs); - handshakeRes(ver, user, pwd, userAttrs); + + try { + ByteBuffer res = timeout > 0 ? fut.get(timeout) : fut.get(); + handshakeRes(res, ver, user, pwd, userAttrs); + } + catch (IgniteCheckedException e) { + throw new ClientConnectionException(e.getMessage(), e); + } } /** Send handshake request. */ @@ -591,7 +503,7 @@ private void handshakeReq(ProtocolVersion proposedVer, String user, String pwd, writer.out().writeInt(0, writer.out().position() - 4);// actual size - write(writer.array(), writer.out().position()); + write(writer.out().arrayCopy(), writer.out().position()); } } @@ -608,20 +520,15 @@ private ProtocolContext protocolContextFromVersion(ProtocolVersion ver) { } /** Receive and handle handshake response. */ - private void handshakeRes(ProtocolVersion proposedVer, String user, String pwd, Map userAttrs) + private void handshakeRes(ByteBuffer buf, ProtocolVersion proposedVer, String user, String pwd, Map userAttrs) throws ClientConnectionException, ClientAuthenticationException, ClientProtocolError { - int resSize = dataInput.readInt(); - - if (resSize <= 0) - throw new ClientProtocolError(String.format("Invalid handshake response size: %s", resSize)); - - BinaryInputStream res = new BinaryHeapInputStream(dataInput.read(resSize)); + BinaryInputStream res = BinaryByteBufferInputStream.create(buf); try (BinaryReaderExImpl reader = ClientUtils.createBinaryReader(null, res)) { boolean success = res.readBoolean(); if (success) { - byte[] features = new byte[0]; + byte[] features = EMPTY_BYTES; if (ProtocolContext.isFeatureSupported(proposedVer, BITMAP_FEATURES)) features = reader.readByteArray(); @@ -667,12 +574,13 @@ else if (!supportedVers.contains(srvVer) || /** Write bytes to the output stream. */ private void write(byte[] bytes, int len) throws ClientConnectionException { + ByteBuffer buf = ByteBuffer.wrap(bytes, 0, len); + try { - out.write(bytes, 0, len); - out.flush(); + sock.send(buf); } - catch (IOException e) { - throw handleIOError(e); + catch (IgniteCheckedException e) { + throw new ClientConnectionException(e.getMessage(), e); } } @@ -691,425 +599,9 @@ private ClientException handleIOError(String chInfo, @Nullable IOException ex) { return new ClientConnectionException("Ignite cluster is unavailable [" + chInfo + ']', ex); } - /** - * Auxiliary class to read byte buffers and numeric values, counting total bytes read. - * Numeric values are read in the little-endian byte order. - */ - private class ByteCountingDataInput implements AutoCloseable { - /** Input stream. */ - private final InputStream in; - - /** Total bytes read from the input stream. */ - private long totalBytesRead; - - /** Temporary buffer to read long, int and short values. */ - private final byte[] tmpBuf = new byte[Long.BYTES]; - - /** - * @param in Input stream. - */ - public ByteCountingDataInput(InputStream in) { - this.in = in; - } - - /** Read bytes from the input stream. */ - public byte[] read(int len) throws ClientConnectionException { - byte[] bytes = new byte[len]; - - read(bytes, len, 0); - - return bytes; - } - - /** Read bytes from the input stream. */ - public byte[] spinRead(int len) { - byte[] bytes = new byte[len]; - - read(bytes, len, Integer.MAX_VALUE); - - return bytes; - } - - /** - * Read bytes from the input stream to the buffer. - * - * @param bytes Bytes buffer. - * @param len Length. - * @param tryReadCnt Number of reads before falling into blocking read. - */ - public void read(byte[] bytes, int len, int tryReadCnt) throws ClientConnectionException { - int offset = 0; - - try { - while (offset < len) { - int toRead; - - if (tryReadCnt == 0) - toRead = len - offset; - else if ((toRead = Math.min(in.available(), len - offset)) == 0) { - tryReadCnt--; - - continue; - } - - int read = in.read(bytes, offset, toRead); - - if (read < 0) - throw handleIOError(null); - - offset += read; - totalBytesRead += read; - } - } - catch (IOException e) { - throw handleIOError(e); - } - } - - /** - * Read long value from the input stream. - */ - public long readLong() throws ClientConnectionException { - return readLong(0); - } - - /** - * Read long value from the input stream. - */ - public long spinReadLong() throws ClientConnectionException { - return readLong(Integer.MAX_VALUE); - } - - /** - * Read long value from the input stream. - * - * @param tryReadCnt Number of reads before falling into blocking read. - */ - private long readLong(int tryReadCnt) throws ClientConnectionException { - read(tmpBuf, Long.BYTES, tryReadCnt); - - return BinaryPrimitives.readLong(tmpBuf, 0); - } - - /** - * Read int value from the input stream. - */ - public int readInt() throws ClientConnectionException { - return readInt(0); - } - - /** - * Read int value from the input stream. - */ - public int spinReadInt() throws ClientConnectionException { - return readInt(Integer.MAX_VALUE); - } - - /** - * Read int value from the input stream. - * - * @param tryReadCnt Number of reads before falling into blocking read. - */ - private int readInt(int tryReadCnt) throws ClientConnectionException { - read(tmpBuf, Integer.BYTES, tryReadCnt); - - return BinaryPrimitives.readInt(tmpBuf, 0); - } - - /** - * Read short value from the input stream. - */ - public short readShort() throws ClientConnectionException { - return readShort(0); - } - - /** - * Read short value from the input stream. - */ - public short spinReadShort() throws ClientConnectionException { - return readShort(Integer.MAX_VALUE); - } - - /** - * Read short value from the input stream. - * - * @param tryReadCnt Number of reads before falling into blocking read. - */ - public short readShort(int tryReadCnt) throws ClientConnectionException { - read(tmpBuf, Short.BYTES, tryReadCnt); - - return BinaryPrimitives.readShort(tmpBuf, 0); - } - - /** - * Gets total bytes read from the input stream. - */ - public long totalBytesRead() { - return totalBytesRead; - } - - /** - * Close input stream. - */ - @Override public void close() throws IOException { - in.close(); - } - } - /** * */ - private static class ClientRequestFuture extends GridFutureAdapter { - } - - /** SSL Socket Factory. */ - private static class ClientSslSocketFactory { - /** Trust manager ignoring all certificate checks. */ - private static final TrustManager ignoreErrorsTrustMgr = new X509TrustManager() { - @Override public X509Certificate[] getAcceptedIssuers() { - return null; - } - - @Override public void checkServerTrusted(X509Certificate[] arg0, String arg1) { - } - - @Override public void checkClientTrusted(X509Certificate[] arg0, String arg1) { - } - }; - - /** Config. */ - private final ClientChannelConfiguration cfg; - - /** Constructor. */ - ClientSslSocketFactory(ClientChannelConfiguration cfg) { - this.cfg = cfg; - } - - /** Create SSL socket. */ - SSLSocket create() throws IOException { - InetSocketAddress addr = cfg.getAddress(); - - SSLSocket sock = (SSLSocket)getSslSocketFactory(cfg).createSocket(addr.getHostName(), addr.getPort()); - - sock.setUseClientMode(true); - - sock.startHandshake(); - - return sock; - } - - /** Create SSL socket factory. */ - private static SSLSocketFactory getSslSocketFactory(ClientChannelConfiguration cfg) { - Factory sslCtxFactory = cfg.getSslContextFactory(); - - if (sslCtxFactory != null) { - try { - return sslCtxFactory.create().getSocketFactory(); - } - catch (Exception e) { - throw new ClientError("SSL Context Factory failed", e); - } - } - - BiFunction or = (val, dflt) -> val == null || val.isEmpty() ? dflt : val; - - String keyStore = or.apply( - cfg.getSslClientCertificateKeyStorePath(), - System.getProperty("javax.net.ssl.keyStore") - ); - - String keyStoreType = or.apply( - cfg.getSslClientCertificateKeyStoreType(), - or.apply(System.getProperty("javax.net.ssl.keyStoreType"), "JKS") - ); - - String keyStorePwd = or.apply( - cfg.getSslClientCertificateKeyStorePassword(), - System.getProperty("javax.net.ssl.keyStorePassword") - ); - - String trustStore = or.apply( - cfg.getSslTrustCertificateKeyStorePath(), - System.getProperty("javax.net.ssl.trustStore") - ); - - String trustStoreType = or.apply( - cfg.getSslTrustCertificateKeyStoreType(), - or.apply(System.getProperty("javax.net.ssl.trustStoreType"), "JKS") - ); - - String trustStorePwd = or.apply( - cfg.getSslTrustCertificateKeyStorePassword(), - System.getProperty("javax.net.ssl.trustStorePassword") - ); - - String algorithm = or.apply(cfg.getSslKeyAlgorithm(), "SunX509"); - - String proto = toString(cfg.getSslProtocol()); - - if (Stream.of(keyStore, keyStorePwd, keyStoreType, trustStore, trustStorePwd, trustStoreType) - .allMatch(s -> s == null || s.isEmpty()) - ) { - try { - return SSLContext.getDefault().getSocketFactory(); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Default SSL context cryptographic algorithm is not available", e); - } - } - - KeyManager[] keyManagers = getKeyManagers(algorithm, keyStore, keyStoreType, keyStorePwd); - - TrustManager[] trustManagers = cfg.isSslTrustAll() ? - new TrustManager[] {ignoreErrorsTrustMgr} : - getTrustManagers(algorithm, trustStore, trustStoreType, trustStorePwd); - - try { - SSLContext sslCtx = SSLContext.getInstance(proto); - - sslCtx.init(keyManagers, trustManagers, null); - - return sslCtx.getSocketFactory(); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("SSL context cryptographic algorithm is not available", e); - } - catch (KeyManagementException e) { - throw new ClientError("Failed to create SSL Context", e); - } - } - - /** - * @return String representation of {@link SslProtocol} as required by {@link SSLContext}. - */ - private static String toString(SslProtocol proto) { - switch (proto) { - case TLSv1_1: - return "TLSv1.1"; - - case TLSv1_2: - return "TLSv1.2"; - - default: - return proto.toString(); - } - } - - /** */ - private static KeyManager[] getKeyManagers( - String algorithm, - String keyStore, - String keyStoreType, - String keyStorePwd - ) { - KeyManagerFactory keyMgrFactory; - - try { - keyMgrFactory = KeyManagerFactory.getInstance(algorithm); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Key manager cryptographic algorithm is not available", e); - } - - Predicate empty = s -> s == null || s.isEmpty(); - - if (!empty.test(keyStore) && !empty.test(keyStoreType)) { - char[] pwd = (keyStorePwd == null) ? new char[0] : keyStorePwd.toCharArray(); - - KeyStore store = loadKeyStore("Client", keyStore, keyStoreType, pwd); - - try { - keyMgrFactory.init(store, pwd); - } - catch (UnrecoverableKeyException e) { - throw new ClientError("Could not recover key store key", e); - } - catch (KeyStoreException e) { - throw new ClientError( - String.format("Client key store provider of type [%s] is not available", keyStoreType), - e - ); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Client key store integrity check algorithm is not available", e); - } - } - - return keyMgrFactory.getKeyManagers(); - } - - /** */ - private static TrustManager[] getTrustManagers( - String algorithm, - String trustStore, - String trustStoreType, - String trustStorePwd - ) { - TrustManagerFactory trustMgrFactory; - - try { - trustMgrFactory = TrustManagerFactory.getInstance(algorithm); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Trust manager cryptographic algorithm is not available", e); - } - - Predicate empty = s -> s == null || s.isEmpty(); - - if (!empty.test(trustStore) && !empty.test(trustStoreType)) { - char[] pwd = (trustStorePwd == null) ? new char[0] : trustStorePwd.toCharArray(); - - KeyStore store = loadKeyStore("Trust", trustStore, trustStoreType, pwd); - - try { - trustMgrFactory.init(store); - } - catch (KeyStoreException e) { - throw new ClientError( - String.format("Trust key store provider of type [%s] is not available", trustStoreType), - e - ); - } - } - - return trustMgrFactory.getTrustManagers(); - } - - /** */ - private static KeyStore loadKeyStore(String lb, String path, String type, char[] pwd) { - KeyStore store; - - try { - store = KeyStore.getInstance(type); - } - catch (KeyStoreException e) { - throw new ClientError( - String.format("%s key store provider of type [%s] is not available", lb, type), - e - ); - } - - try (InputStream in = new FileInputStream(new File(path))) { - - store.load(in, pwd); - - return store; - } - catch (FileNotFoundException e) { - throw new ClientError(String.format("%s key store file [%s] does not exist", lb, path), e); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError( - String.format("%s key store integrity check algorithm is not available", lb), - e - ); - } - catch (CertificateException e) { - throw new ClientError(String.format("Could not load certificate from %s key store", lb), e); - } - catch (IOException e) { - throw new ClientError(String.format("Could not read %s key store", lb), e); - } - } + private static class ClientRequestFuture extends GridFutureAdapter { } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java index 9cea6a47ef2f33..c67184accc9689 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java @@ -24,8 +24,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.Function; import org.apache.ignite.IgniteBinary; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.binary.BinaryObjectException; @@ -55,6 +55,7 @@ import org.apache.ignite.internal.binary.BinaryWriterExImpl; import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.marshaller.MarshallerContext; @@ -101,8 +102,8 @@ private TcpIgniteClient(ClientConfiguration cfg) throws ClientException { * Constructor with custom channel factory. */ TcpIgniteClient( - Function chFactory, - ClientConfiguration cfg + BiFunction chFactory, + ClientConfiguration cfg ) throws ClientException { final ClientBinaryMetadataHandler metadataHandler = new ClientBinaryMetadataHandler(); @@ -116,18 +117,24 @@ private TcpIgniteClient(ClientConfiguration cfg) throws ClientException { ch = new ReliableChannel(chFactory, cfg, binary); - ch.channelsInit(); + try { + ch.channelsInit(); - ch.addChannelFailListener(() -> metadataHandler.onReconnect()); + ch.addChannelFailListener(() -> metadataHandler.onReconnect()); - transactions = new TcpClientTransactions(ch, marsh, - new ClientTransactionConfiguration(cfg.getTransactionConfiguration())); + transactions = new TcpClientTransactions(ch, marsh, + new ClientTransactionConfiguration(cfg.getTransactionConfiguration())); - cluster = new ClientClusterImpl(ch, marsh); + cluster = new ClientClusterImpl(ch, marsh); - compute = new ClientComputeImpl(ch, marsh, cluster.defaultClusterGroup()); + compute = new ClientComputeImpl(ch, marsh, cluster.defaultClusterGroup()); - services = new ClientServicesImpl(ch, marsh, cluster.defaultClusterGroup()); + services = new ClientServicesImpl(ch, marsh, cluster.defaultClusterGroup()); + } + catch (Exception e) { + ch.close(); + throw e; + } } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnection.java new file mode 100644 index 00000000000000..eed90b6bc7756a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnection.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.nio.ByteBuffer; + +import org.apache.ignite.IgniteCheckedException; + +/** + * Client connection: abstracts away sending and receiving messages. + */ +public interface ClientConnection extends AutoCloseable { + /** + * Sends a message. + * + * @param msg Message buffer. + */ + void send(ByteBuffer msg) throws IgniteCheckedException; + + /** + * Closes the connection. + */ + @Override void close(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionMultiplexer.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionMultiplexer.java new file mode 100644 index 00000000000000..891e2b350ec6bb --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionMultiplexer.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.net.InetSocketAddress; + +import org.apache.ignite.client.ClientConnectionException; + +/** + * Client connection multiplexer: manages multiple connections with a shared resource pool (worker threads, etc). + */ +public interface ClientConnectionMultiplexer { + /** + * Initializes this instance. + */ + void start(); + + /** + * Stops this instance. + */ + void stop(); + + /** + * Opens a new connection. + * + * @param addr Address. + * @param msgHnd Incoming message handler. + * @param stateHnd Connection state handler. + * @return Created connection. + * @throws ClientConnectionException when connection can't be established. + */ + ClientConnection open( + InetSocketAddress addr, + ClientMessageHandler msgHnd, + ClientConnectionStateHandler stateHnd) + throws ClientConnectionException; +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheFlags.cs b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionStateHandler.java similarity index 67% rename from modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheFlags.cs rename to modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionStateHandler.java index e24d952a7af09d..3f9481e525d0af 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheFlags.cs +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionStateHandler.java @@ -1,4 +1,4 @@ -/* +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. @@ -15,24 +15,17 @@ * limitations under the License. */ -namespace Apache.Ignite.Core.Impl.Client.Cache -{ - using System; +package org.apache.ignite.internal.client.thin.io; - /// - /// Cache operation flags. - /// - [Flags] - internal enum CacheFlags : byte - { - /// - /// No flags. - /// - None = 0x00, +import org.jetbrains.annotations.Nullable; - /// - /// Keep binary. - /// - KeepBinary = 0x01 - } +/** + * Handles thin client connection state. + */ +public interface ClientConnectionStateHandler { + /** + * Handles connection loss. + * @param e Exception that caused the disconnect, can be null. + */ + void onDisconnected(@Nullable Exception e); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageDecoder.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageDecoder.java new file mode 100644 index 00000000000000..06ab441db76212 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageDecoder.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.nio.ByteBuffer; + +/** + * Decodes thin client messages from partial buffers. + */ +public class ClientMessageDecoder { + /** */ + private byte[] data; + + /** */ + private int cnt = -4; + + /** */ + private int msgSize; + + /** + * Applies the next partial buffer. + * + * @param buf Buffer. + * @return Decoded message, or null when not yet complete. + */ + public byte[] apply(ByteBuffer buf) { + boolean msgReady = read(buf); + + return msgReady ? data : null; + } + + /** + * Reads the buffer. + * + * @param buf Buffer. + * @return True when a complete message has been received; false otherwise. + */ + @SuppressWarnings("DuplicatedCode") // A little duplication is better than a little dependency. + private boolean read(ByteBuffer buf) { + if (cnt < 0) { + for (; cnt < 0 && buf.hasRemaining(); cnt++) + msgSize |= (buf.get() & 0xFF) << (8 * (4 + cnt)); + + if (cnt < 0) + return false; + + data = new byte[msgSize]; + } + + assert data != null; + assert cnt >= 0; + assert msgSize > 0; + + int remaining = buf.remaining(); + + if (remaining > 0) { + int missing = msgSize - cnt; + + if (missing > 0) { + int len = Math.min(missing, remaining); + + buf.get(data, cnt, len); + + cnt += len; + } + } + + if (cnt == msgSize) { + cnt = -4; + msgSize = 0; + + return true; + } + + return false; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageHandler.java new file mode 100644 index 00000000000000..a52859ff43ce99 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageHandler.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.nio.ByteBuffer; + +/** + * Handles thin client responses and server -> client notifications. + */ +public interface ClientMessageHandler { + /** + * Handles messages from the server. + * @param buf Buffer. + */ + void onMessage(ByteBuffer buf); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnection.java new file mode 100644 index 00000000000000..e81d6f4c3a3218 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnection.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.nio.ByteBuffer; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.client.thin.io.ClientConnection; +import org.apache.ignite.internal.client.thin.io.ClientConnectionStateHandler; +import org.apache.ignite.internal.client.thin.io.ClientMessageHandler; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.apache.ignite.internal.util.nio.GridNioSessionMetaKey; + +/** + * Client connection. + */ +class GridNioClientConnection implements ClientConnection { + /** */ + static final int SES_META_CONN = GridNioSessionMetaKey.nextUniqueKey(); + + /** */ + private final GridNioSession ses; + + /** */ + private final ClientMessageHandler msgHnd; + + /** */ + private final ClientConnectionStateHandler stateHnd; + + /** + * Ctor. + * + * @param ses Session. + */ + public GridNioClientConnection(GridNioSession ses, + ClientMessageHandler msgHnd, + ClientConnectionStateHandler stateHnd) { + assert ses != null; + assert msgHnd != null; + assert stateHnd != null; + + this.ses = ses; + this.msgHnd = msgHnd; + this.stateHnd = stateHnd; + + ses.addMeta(SES_META_CONN, this); + } + + /** {@inheritDoc} */ + @Override public void send(ByteBuffer msg) throws IgniteCheckedException { + ses.sendNoFuture(msg, null); + } + + /** {@inheritDoc} */ + @Override public void close() { + ses.close(); + } + + /** + * Handles incoming message. + * + * @param msg Message. + */ + void onMessage(ByteBuffer msg) { + assert msg != null; + + msgHnd.onMessage(msg); + } + + /** + * Handles disconnect. + * + * @param e Exception that caused the disconnect. + */ + void onDisconnected(Exception e) { + stateHnd.onDisconnected(e); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnectionMultiplexer.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnectionMultiplexer.java new file mode 100644 index 00000000000000..74a70251e96cb9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnectionMultiplexer.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.SocketChannel; +import java.util.HashMap; +import java.util.Map; +import javax.net.ssl.SSLContext; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.client.ClientConnectionException; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.internal.client.thin.ClientSslUtils; +import org.apache.ignite.internal.client.thin.io.ClientConnection; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; +import org.apache.ignite.internal.client.thin.io.ClientConnectionStateHandler; +import org.apache.ignite.internal.client.thin.io.ClientMessageHandler; +import org.apache.ignite.internal.util.nio.GridNioCodecFilter; +import org.apache.ignite.internal.util.nio.GridNioFilter; +import org.apache.ignite.internal.util.nio.GridNioFuture; +import org.apache.ignite.internal.util.nio.GridNioFutureImpl; +import org.apache.ignite.internal.util.nio.GridNioServer; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.apache.ignite.internal.util.nio.ssl.GridNioSslFilter; +import org.apache.ignite.logger.NullLogger; + +/** + * Client connection multiplexer based on {@link org.apache.ignite.internal.util.nio.GridNioServer}. + */ +public class GridNioClientConnectionMultiplexer implements ClientConnectionMultiplexer { + /** Worker thread prefix. */ + private static final String THREAD_PREFIX = "thin-client-channel"; + + /** */ + private static final int CLIENT_MODE_PORT = -1; + + /** */ + private final GridNioServer srv; + + /** */ + private final SSLContext sslCtx; + + /** + * Constructor. + * + * @param cfg Client config. + */ + public GridNioClientConnectionMultiplexer(ClientConfiguration cfg) { + IgniteLogger gridLog = new NullLogger(); + + GridNioFilter[] filters; + + GridNioFilter codecFilter = new GridNioCodecFilter(new GridNioClientParser(), gridLog, false); + + sslCtx = ClientSslUtils.getSslContext(cfg); + + if (sslCtx != null) { + GridNioSslFilter sslFilter = new GridNioSslFilter(sslCtx, true, ByteOrder.nativeOrder(), gridLog); + sslFilter.directMode(false); + filters = new GridNioFilter[] {codecFilter, sslFilter}; + } + else + filters = new GridNioFilter[] {codecFilter}; + + try { + srv = GridNioServer.builder() + .port(CLIENT_MODE_PORT) + .listener(new GridNioClientListener()) + .filters(filters) + .logger(gridLog) + .selectorCount(1) // Using more selectors does not seem to improve performance. + .byteOrder(ByteOrder.nativeOrder()) + .directBuffer(true) + .directMode(false) + .igniteInstanceName("thinClient") + .serverName(THREAD_PREFIX) + .idleTimeout(Long.MAX_VALUE) + .socketReceiveBufferSize(cfg.getReceiveBufferSize()) + .socketSendBufferSize(cfg.getSendBufferSize()) + .tcpNoDelay(true) + .build(); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** {@inheritDoc} */ + @Override public void start() { + srv.start(); + } + + /** {@inheritDoc} */ + @Override public void stop() { + srv.stop(); + } + + /** {@inheritDoc} */ + @Override public ClientConnection open(InetSocketAddress addr, + ClientMessageHandler msgHnd, + ClientConnectionStateHandler stateHnd) + throws ClientConnectionException { + try { + SocketChannel ch = SocketChannel.open(); + ch.socket().connect(new InetSocketAddress(addr.getHostName(), addr.getPort()), Integer.MAX_VALUE); + + Map meta = new HashMap<>(); + GridNioFuture sslHandshakeFut = null; + + if (sslCtx != null) { + sslHandshakeFut = new GridNioFutureImpl<>(null); + + meta.put(GridNioSslFilter.HANDSHAKE_FUT_META_KEY, sslHandshakeFut); + } + + GridNioSession ses = srv.createSession(ch, meta, false, null).get(); + + if (sslHandshakeFut != null) + sslHandshakeFut.get(); + + return new GridNioClientConnection(ses, msgHnd, stateHnd); + } + catch (Exception e) { + throw new ClientConnectionException(e.getMessage(), e); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientListener.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientListener.java new file mode 100644 index 00000000000000..f33835d9097545 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientListener.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.nio.ByteBuffer; + +import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.util.nio.GridNioServerListener; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.jetbrains.annotations.Nullable; + +/** + * Client event listener. + */ +class GridNioClientListener implements GridNioServerListener { + /** {@inheritDoc} */ + @Override public void onConnected(GridNioSession ses) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onDisconnected(GridNioSession ses, @Nullable Exception e) { + GridNioClientConnection conn = ses.meta(GridNioClientConnection.SES_META_CONN); + + // Conn can be null when connection fails during initialization in open method. + if (conn != null) + conn.onDisconnected(e); + } + + /** {@inheritDoc} */ + @Override public void onMessageSent(GridNioSession ses, ByteBuffer msg) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onMessage(GridNioSession ses, ByteBuffer msg) { + GridNioClientConnection conn = ses.meta(GridNioClientConnection.SES_META_CONN); + + assert conn != null : "Session must have an associated connection"; + + conn.onMessage(msg); + } + + /** {@inheritDoc} */ + @Override public void onSessionWriteTimeout(GridNioSession ses) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onSessionIdleTimeout(GridNioSession ses) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onFailure(FailureType failureType, Throwable failure) { + // No-op. + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientParser.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientParser.java new file mode 100644 index 00000000000000..439c78a72c3068 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientParser.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import org.apache.ignite.internal.client.thin.io.ClientMessageDecoder; +import org.apache.ignite.internal.util.nio.GridNioParser; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.apache.ignite.internal.util.nio.GridNioSessionMetaKey; +import org.jetbrains.annotations.Nullable; + +/** + * Client message parser. + */ +class GridNioClientParser implements GridNioParser { + /** */ + private static final int SES_META_DECODER = GridNioSessionMetaKey.nextUniqueKey(); + + /** {@inheritDoc} */ + @Override public @Nullable Object decode(GridNioSession ses, ByteBuffer buf) { + ClientMessageDecoder decoder = ses.meta(SES_META_DECODER); + + if (decoder == null) { + decoder = new ClientMessageDecoder(); + + ses.addMeta(SES_META_DECODER, decoder); + } + + byte[] bytes = decoder.apply(buf); + + if (bytes == null) + return null; // Message is not yet completely received. + + // Thin client protocol is little-endian. ByteBuffer will handle conversion as necessary on big-endian systems. + return ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN); + } + + /** {@inheritDoc} */ + @Override public ByteBuffer encode(GridNioSession ses, Object msg) { + return (ByteBuffer)msg; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java index 4d735d0b2514d6..279586d0a02572 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java @@ -59,6 +59,12 @@ public abstract class IgniteDataTransferObject implements Externalizable { /** Version 7. */ protected static final byte V7 = 7; + /** Version 8. */ + protected static final byte V8 = 8; + + /** Version 9. */ + protected static final byte V9 = 9; + /** * @param col Source collection. * @param Collection type. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java index 9cc585331f9949..bacb797b93e6ed 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java @@ -138,7 +138,7 @@ false, new PropertyValidator() { /** SSL: Key algorithm name. */ private StringProperty sslKeyAlgorithm = new StringProperty("sslKeyAlgorithm", - "SSL key algorithm name", "SunX509", null, false, null); + "SSL key algorithm name", null, null, false, null); /** SSL: Client certificate key store url. */ private StringProperty sslClientCertificateKeyStoreUrl = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java index d62f939cbe255f..e410e4d1fa2c2d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java @@ -33,6 +33,10 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.ssl.SslContextFactory; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + /** * SSL utility method to create SSL connetion. */ @@ -136,7 +140,7 @@ private static SSLSocketFactory getSSLSocketFactory(ConnectionProperties connPro cliCertKeyStorePwd = System.getProperty("javax.net.ssl.keyStorePassword"); if (cliCertKeyStoreType == null) - cliCertKeyStoreType = System.getProperty("javax.net.ssl.keyStoreType", "JKS"); + cliCertKeyStoreType = System.getProperty("javax.net.ssl.keyStoreType", DFLT_STORE_TYPE); if (trustCertKeyStoreUrl == null) trustCertKeyStoreUrl = System.getProperty("javax.net.ssl.trustStore"); @@ -145,10 +149,13 @@ private static SSLSocketFactory getSSLSocketFactory(ConnectionProperties connPro trustCertKeyStorePwd = System.getProperty("javax.net.ssl.trustStorePassword"); if (trustCertKeyStoreType == null) - trustCertKeyStoreType = System.getProperty("javax.net.ssl.trustStoreType", "JKS"); + trustCertKeyStoreType = System.getProperty("javax.net.ssl.trustStoreType", DFLT_STORE_TYPE); if (sslProtocol == null) - sslProtocol = "TLS"; + sslProtocol = DFLT_SSL_PROTOCOL; + + if (keyAlgorithm == null) + keyAlgorithm = DFLT_KEY_ALGORITHM; SslContextFactory f = new SslContextFactory(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java index 02698a1c8a891e..4695cbf6b1a62f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; @@ -52,7 +51,7 @@ */ public class MaintenanceFileStore { /** */ - private static final String MAINTENANCE_FILE_NAME = "maintenance_tasks.mntc"; + public static final String MAINTENANCE_FILE_NAME = "maintenance_tasks.mntc"; /** */ private static final String TASKS_SEPARATOR = System.lineSeparator(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java index 8f85ceb64dc2aa..063bd475562ece 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java @@ -23,7 +23,6 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.internal.GridKernalContext; @@ -162,12 +161,21 @@ public MaintenanceProcessor(GridKernalContext ctx) { ); } - if (!workflowCallbacks.isEmpty()) + if (!workflowCallbacks.isEmpty()) { + if (log.isInfoEnabled()) { + String mntcTasksNames = String.join(", ", workflowCallbacks.keySet()); + + log.info("Node requires maintenance, non-empty set of maintenance tasks is found: [" + + mntcTasksNames + ']'); + } + proceedWithMaintenance(); - else { - if (log.isInfoEnabled()) + } + else if (isMaintenanceMode()) { + if (log.isInfoEnabled()) { log.info("All maintenance tasks are fixed, no need to enter maintenance mode. " + "Restart the node to get it back to normal operations."); + } } } @@ -182,11 +190,11 @@ public MaintenanceProcessor(GridKernalContext ctx) { */ private void proceedWithMaintenance() { for (Map.Entry cbE : workflowCallbacks.entrySet()) { - MaintenanceAction mntcAction = cbE.getValue().automaticAction(); + MaintenanceAction mntcAct = cbE.getValue().automaticAction(); - if (mntcAction != null) { + if (mntcAct != null) { try { - mntcAction.execute(); + mntcAct.execute(); } catch (Throwable t) { log.warning("Failed to execute automatic action for maintenance task: " + @@ -236,7 +244,7 @@ private void proceedWithMaintenance() { if (inMemoryMode) throw new IgniteException(IN_MEMORY_MODE_ERR_MSG); - List actions = cb.allActions(); + List> actions = cb.allActions(); if (actions == null || actions.isEmpty()) throw new IgniteException("Maintenance workflow callback should provide at least one mainetance action"); @@ -263,7 +271,7 @@ private void proceedWithMaintenance() { } /** {@inheritDoc} */ - @Override public List actionsForMaintenanceTask(String maintenanceTaskName) { + @Override public List> actionsForMaintenanceTask(String maintenanceTaskName) { if (inMemoryMode) throw new IgniteException(IN_MEMORY_MODE_ERR_MSG); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java index f317024ade88d2..5d536d0bf5bd2c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java @@ -328,7 +328,7 @@ else if (log.isDebugEnabled()) if (isDeadClassLoader(meta)) return null; - if (meta.participants() != null && !meta.participants().isEmpty()) { + if (!F.isEmpty(meta.participants())) { Map participants = new LinkedHashMap<>(); for (Map.Entry e : meta.participants().entrySet()) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 0a8c8572fcdd64..65a331fcb0e049 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -169,6 +169,7 @@ import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_USER_NAME; import static org.apache.ignite.internal.IgniteVersionUtils.VER; import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; +import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName; import static org.apache.ignite.internal.processors.security.SecurityUtils.isSecurityCompatibilityMode; import static org.apache.ignite.plugin.segmentation.SegmentationPolicy.NOOP; @@ -188,6 +189,9 @@ public class GridDiscoveryManager extends GridManagerAdapter { /** @see IgniteSystemProperties#IGNITE_DISCOVERY_HISTORY_SIZE */ public static final int DFLT_DISCOVERY_HISTORY_SIZE = 500; + /** Name of the discovery metrics registry. */ + public static final String DISCO_METRICS = metricName("io", "discovery"); + /** Predicate filtering out daemon nodes. */ private static final IgnitePredicate FILTER_NOT_DAEMON = new P1() { @Override public boolean apply(ClusterNode n) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupEncryptionKeys.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupEncryptionKeys.java new file mode 100644 index 00000000000000..03b884bf581729 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupEncryptionKeys.java @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CopyOnWriteArrayList; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.encryption.EncryptionSpi; +import org.jetbrains.annotations.Nullable; + +/** + * Serves for managing encryption keys and related datastructure located in the heap. + */ +class CacheGroupEncryptionKeys { + /** Group encryption keys. */ + private final Map> grpKeys = new ConcurrentHashMap<>(); + + /** + * WAL segments encrypted with previous encryption keys prevent keys from being deleted + * until the associated segment is deleted. + */ + private final Collection trackedWalSegments = new ConcurrentLinkedQueue<>(); + + /** Encryption spi. */ + private final EncryptionSpi encSpi; + + /** + * @param encSpi Encryption spi. + */ + CacheGroupEncryptionKeys(EncryptionSpi encSpi) { + this.encSpi = encSpi; + } + + /** + * Returns group encryption key, that was set for writing. + * + * @param grpId Cache group ID. + * @return Group encryption key with ID, that was set for writing. + */ + @Nullable GroupKey getActiveKey(int grpId) { + List keys = grpKeys.get(grpId); + + if (F.isEmpty(keys)) + return null; + + return keys.get(0); + } + + /** + * Returns group encryption key with specified ID. + * + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @return Group encryption key. + */ + @Nullable GroupKey getKey(int grpId, int keyId) { + List keys = grpKeys.get(grpId); + + if (keys == null) + return null; + + for (GroupKey groupKey : keys) { + if (groupKey.unsignedId() == keyId) + return groupKey; + } + + return null; + } + + /** + * Gets the existing encryption key IDs for the specified cache group. + * + * @param grpId Cache group ID. + * @return List of the key IDs. + */ + @Nullable List keyIds(int grpId) { + List keys = grpKeys.get(grpId); + + if (keys == null) + return null; + + List keyIds = new ArrayList<>(keys.size()); + + for (GroupKey groupKey : keys) + keyIds.add(groupKey.unsignedId()); + + return keyIds; + } + + /** + * @return Cache group IDs for which encryption keys are stored. + */ + Set groupIds() { + return grpKeys.keySet(); + } + + /** + * @return Local encryption keys. + */ + @Nullable HashMap getAll() { + if (F.isEmpty(grpKeys)) + return null; + + HashMap keys = U.newHashMap(grpKeys.size()); + + for (Map.Entry> entry : grpKeys.entrySet()) { + int grpId = entry.getKey(); + GroupKey grpKey = entry.getValue().get(0); + + keys.put(grpId, new GroupKeyEncrypted(grpKey.unsignedId(), encSpi.encryptKey(grpKey.key()))); + } + + return keys; + } + + /** + * @param grpId Cache group ID. + * + * @return Local encryption keys used for specified cache group. + */ + @Nullable List getAll(int grpId) { + List grpKeys = this.grpKeys.get(grpId); + + if (F.isEmpty(grpKeys)) + return null; + + List encryptedKeys = new ArrayList<>(grpKeys.size()); + + for (GroupKey grpKey : grpKeys) + encryptedKeys.add(new GroupKeyEncrypted(grpKey.unsignedId(), encSpi.encryptKey(grpKey.key()))); + + return encryptedKeys; + } + + /** + * Sets new encryption key for writing. + * + * @param grpId Cache group ID. + * @param keyId ID of the existing encryption key to be set for writing.. + * @return Previous encryption key used for writing. + */ + GroupKey changeActiveKey(int grpId, int keyId) { + List keys = grpKeys.get(grpId); + + assert !F.isEmpty(keys) : "grpId=" + grpId; + + GroupKey prevKey = keys.get(0); + + assert prevKey.unsignedId() != keyId : "keyId=" + keyId; + + GroupKey newKey = null; + + for (ListIterator itr = keys.listIterator(keys.size()); itr.hasPrevious(); ) { + GroupKey key = itr.previous(); + + if (key.unsignedId() != keyId) + continue; + + newKey = key; + + break; + } + + assert newKey != null : "exp=" + keyId + ", act=" + keys; + + keys.add(0, newKey); + + // Remove the duplicate key(s) from the tail of the list. + keys.subList(1, keys.size()).removeIf(k -> k.unsignedId() == keyId); + + return prevKey; + } + + /** + * Adds new encryption key. + * + * @param grpId Cache group ID. + * @param newEncKey New encrypted key for writing. + * @return {@code True} If a key has been added, {@code False} if the specified key is already present. + */ + boolean addKey(int grpId, GroupKeyEncrypted newEncKey) { + List keys = grpKeys.computeIfAbsent(grpId, v -> new CopyOnWriteArrayList<>()); + + GroupKey grpKey = new GroupKey(newEncKey.id(), encSpi.decryptKey(newEncKey.key())); + + if (!keys.contains(grpKey)) + return keys.add(grpKey); + + return false; + } + + /** + * @param grpId Cache group ID. + * @param encryptedKeys Encrypted keys. + */ + void setGroupKeys(int grpId, List encryptedKeys) { + List keys = new CopyOnWriteArrayList<>(); + + for (GroupKeyEncrypted grpKey : encryptedKeys) + keys.add(new GroupKey(grpKey.id(), encSpi.decryptKey(grpKey.key()))); + + grpKeys.put(grpId, keys); + } + + /** + * Remove encrytion keys associated with the specified cache group. + * + * @param grpId Cache group ID. + * @return List of encryption keys of the removed cache group. + */ + List remove(int grpId) { + return grpKeys.remove(grpId); + } + + /** + * @param grpId Cache group ID. + * @param ids Key IDs for deletion. + * @return {@code True} if the keys have been deleted. + */ + boolean removeKeysById(int grpId, Set ids) { + List keys = grpKeys.get(grpId); + + if (F.isEmpty(keys)) + return false; + + return keys.subList(1, keys.size()).removeIf(key -> ids.contains(key.unsignedId())); + } + + /** + * Remove unused keys. + * + * @param grpId Cache group ID. + * @return Removed key IDs, + */ + Set removeUnusedKeys(int grpId) { + List keys = grpKeys.get(grpId); + Set rmvKeyIds = U.newHashSet(keys.size() - 1); + + rmvKeyIds.addAll(F.viewReadOnly(keys.subList(1, keys.size()), GroupKey::unsignedId)); + + for (TrackedWalSegment segment : trackedWalSegments) { + if (segment.grpId != grpId) + continue; + + rmvKeyIds.remove(segment.keyId); + } + + if (keys.removeIf(key -> rmvKeyIds.contains(key.unsignedId()))) + return rmvKeyIds; + + return Collections.emptySet(); + } + + /** + * @return A collection of tracked (encrypted with previous encryption keys) WAL segments. + */ + Collection trackedWalSegments() { + return Collections.unmodifiableCollection(trackedWalSegments); + } + + /** + * @param segments WAL segments, mapped to cache group encryption key IDs. + */ + void trackedWalSegments(Collection segments) { + trackedWalSegments.addAll(segments); + } + + /** + * Associate WAL segment index with the specified key ID + * to prevent deletion of that encryption key before deleting the segment. + * + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @param walIdx WAL segment index. + */ + void reserveWalKey(int grpId, int keyId, long walIdx) { + trackedWalSegments.add(new TrackedWalSegment(walIdx, grpId, keyId)); + } + + /** + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @return Wal segment index or null if there no segment associated with the specified cache group ID and key ID. + */ + @Nullable Long reservedSegment(int grpId, int keyId) { + for (TrackedWalSegment segment : trackedWalSegments) { + if (segment.grpId != grpId) + continue; + + if (segment.keyId == keyId) + return segment.idx; + } + + return null; + } + + /** + * Remove all of the segments that are not greater than the specified index. + * + * @param walIdx WAL segment index. + * @return Map of group IDs with key IDs that were associated with removed WAL segments. + */ + Map> releaseWalKeys(long walIdx) { + Map> rmvKeys = new HashMap<>(); + Iterator iter = trackedWalSegments.iterator(); + + while (iter.hasNext()) { + TrackedWalSegment segment = iter.next(); + + if (segment.idx > walIdx) + break; + + iter.remove(); + + rmvKeys.computeIfAbsent(segment.grpId, v -> new HashSet<>()).add(segment.keyId); + } + + return rmvKeys; + } + + /** + * A WAL segment encrypted with a specific encryption key ID. + */ + protected static final class TrackedWalSegment implements Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** WAL segment index. */ + private final long idx; + + /** Cache group ID. */ + private final int grpId; + + /** Encryption key ID. */ + private final int keyId; + + /** + * @param idx WAL segment index. + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + */ + public TrackedWalSegment(long idx, int grpId, int keyId) { + this.idx = idx; + this.grpId = grpId; + this.keyId = keyId; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java new file mode 100644 index 00000000000000..c2a68950425d30 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java @@ -0,0 +1,545 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.NodeStoppingException; +import org.apache.ignite.internal.managers.communication.GridIoPolicy; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; +import org.apache.ignite.internal.util.BasicRateLimiter; +import org.apache.ignite.internal.util.GridConcurrentHashSet; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.lang.IgniteInClosureX; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.thread.IgniteThreadPoolExecutor; +import org.apache.ignite.thread.OomExceptionHandler; + +import static org.apache.ignite.internal.util.IgniteUtils.MB; + +/** + * Cache group page stores scanner. + * Scans a range of pages and marks them as dirty to re-encrypt them with the last encryption key on disk. + */ +public class CacheGroupPageScanner implements CheckpointListener { + /** Thread prefix for scanning tasks. */ + private static final String REENCRYPT_THREAD_PREFIX = "reencrypt"; + + /** Kernal context. */ + private final GridKernalContext ctx; + + /** Logger. */ + private final IgniteLogger log; + + /** Lock. */ + private final ReentrantLock lock = new ReentrantLock(); + + /** Mapping of cache group ID to group scanning task. */ + private final Map grps = new ConcurrentHashMap<>(); + + /** Collection of groups waiting for a checkpoint. */ + private final Collection cpWaitGrps = new ConcurrentLinkedQueue<>(); + + /** Single-threaded executor to run cache group scan task. */ + private final ThreadPoolExecutor singleExecSvc; + + /** Number of pages that is scanned during reencryption under checkpoint lock. */ + private final int batchSize; + + /** Page scanning speed limiter. */ + private final BasicRateLimiter limiter; + + /** Stop flag. */ + private boolean stopped; + + /** + * @param ctx Grid kernal context. + */ + public CacheGroupPageScanner(GridKernalContext ctx) { + this.ctx = ctx; + + log = ctx.log(getClass()); + + DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); + + if (ctx.clientNode() || !CU.isPersistenceEnabled(dsCfg)) { + batchSize = -1; + limiter = null; + singleExecSvc = null; + + return; + } + + double rateLimit = dsCfg.getEncryptionConfiguration().getReencryptionRateLimit(); + + limiter = new BasicRateLimiter(calcPermits(rateLimit, dsCfg)); + + batchSize = dsCfg.getEncryptionConfiguration().getReencryptionBatchSize(); + + singleExecSvc = new IgniteThreadPoolExecutor(REENCRYPT_THREAD_PREFIX, + ctx.igniteInstanceName(), + 1, + 1, + IgniteConfiguration.DFLT_THREAD_KEEP_ALIVE_TIME, + new LinkedBlockingQueue<>(), + GridIoPolicy.SYSTEM_POOL, + new OomExceptionHandler(ctx)); + + singleExecSvc.allowCoreThreadTimeOut(true); + } + + /** {@inheritDoc} */ + @Override public void onCheckpointBegin(Context cpCtx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void beforeCheckpointBegin(Context cpCtx) { + Set completeCandidates = new HashSet<>(); + + cpWaitGrps.removeIf(completeCandidates::add); + + cpCtx.finishedStateFut().listen( + f -> { + // Retry if error occurs. + if (f.error() != null || f.isCancelled()) { + cpWaitGrps.addAll(completeCandidates); + + return; + } + + lock.lock(); + + try { + for (GroupScanTask grpScanTask : completeCandidates) { + grps.remove(grpScanTask.groupId()); + + grpScanTask.onDone(); + + if (log.isInfoEnabled()) + log.info("Cache group reencryption is finished [grpId=" + grpScanTask.groupId() + "]"); + } + + if (!grps.isEmpty()) + return; + + ((GridCacheDatabaseSharedManager)ctx.cache().context().database()). + removeCheckpointListener(this); + } + finally { + lock.unlock(); + } + } + ); + } + + /** {@inheritDoc} */ + @Override public void onMarkCheckpointBegin(Context ctx) { + // No-op. + } + + /** + * Schedule scanning partitions. + * + * @param grpId Cache group ID. + */ + public IgniteInternalFuture schedule(int grpId) throws IgniteCheckedException { + CacheGroupContext grp = ctx.cache().cacheGroup(grpId); + + if (grp == null || !grp.affinityNode()) { + if (log.isInfoEnabled()) + log.info("Skip reencryption, cache group doesn't exist on the local node [grp=" + grpId + "]"); + + return new GridFinishedFuture<>(); + } + + lock.lock(); + + try { + if (stopped) + throw new NodeStoppingException("Operation has been cancelled (node is stopping)."); + + if (grps.isEmpty()) + ((GridCacheDatabaseSharedManager)ctx.cache().context().database()).addCheckpointListener(this); + + GroupScanTask prevState = grps.get(grpId); + + if (prevState != null && !prevState.isDone()) { + if (log.isDebugEnabled()) + log.debug("Reencryption already scheduled [grpId=" + grpId + "]"); + + return prevState; + } + + Set parts = new HashSet<>(); + long[] pagesLeft = new long[1]; + + forEachPageStore(grp, new IgniteInClosureX() { + @Override public void applyx(Integer partId) { + long encState = ctx.encryption().getEncryptionState(grpId, partId); + + if (encState == 0) { + if (log.isDebugEnabled()) + log.debug("Skipping partition reencryption [grp=" + grpId + ", p=" + partId + "]"); + + return; + } + + parts.add(partId); + + pagesLeft[0] += (ReencryptStateUtils.pageCount(encState) - ReencryptStateUtils.pageIndex(encState)); + } + }); + + GroupScanTask grpScan = new GroupScanTask(grp, parts, pagesLeft[0]); + + singleExecSvc.submit(grpScan); + + if (log.isInfoEnabled()) + log.info("Scheduled reencryption [grpId=" + grpId + "]"); + + grps.put(grpId, grpScan); + + return grpScan; + } + finally { + lock.unlock(); + } + } + + /** + * @param grpId Cache group ID. + * @return Future that will be completed when all partitions have been scanned and pages have been written to disk. + */ + public IgniteInternalFuture statusFuture(int grpId) { + GroupScanTask grpScanTask = grps.get(grpId); + + return grpScanTask == null ? new GridFinishedFuture<>() : grpScanTask; + } + + /** + * Shutdown scanning and disable new tasks scheduling. + */ + public void stop() throws IgniteCheckedException { + lock.lock(); + + try { + stopped = true; + + for (GroupScanTask grpScanTask : grps.values()) + grpScanTask.cancel(); + + if (singleExecSvc != null) + singleExecSvc.shutdownNow(); + } finally { + lock.unlock(); + } + } + + /** + * Stop scannig the specified partition. + * + * @param grpId Cache group ID. + * @param partId Partition ID. + * @return {@code True} if reencryption was cancelled. + */ + public boolean excludePartition(int grpId, int partId) { + GroupScanTask grpScanTask = grps.get(grpId); + + if (grpScanTask == null) + return false; + + return grpScanTask.excludePartition(partId); + } + + /** + * Collect current number of pages in the specified cache group. + * + * @param grp Cache group. + * @return Partitions with current page count. + * @throws IgniteCheckedException If failed. + */ + public long[] pagesCount(CacheGroupContext grp) throws IgniteCheckedException { + // The last element of the array is used to store the status of the index partition. + long[] partStates = new long[grp.affinity().partitions() + 1]; + + ctx.cache().context().database().checkpointReadLock(); + + try { + forEachPageStore(grp, new IgniteInClosureX() { + @Override public void applyx(Integer partId) throws IgniteCheckedException { + int pagesCnt = ctx.cache().context().pageStore().pages(grp.groupId(), partId); + + partStates[Math.min(partId, partStates.length - 1)] = pagesCnt; + } + }); + } finally { + ctx.cache().context().database().checkpointReadUnlock(); + } + + return partStates; + } + + /** + * @param grpId Cache group ID. + * @return Number of remaining memory pages to scan. + */ + public long remainingPagesCount(int grpId) { + GroupScanTask grpScanTask = grps.get(grpId); + + if (grpScanTask != null) + return grpScanTask.remainingPagesCount(); + + return 0; + } + + /** + * @return Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public double getRate() { + DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); + + if (CU.isPersistenceEnabled(dsCfg)) + return dsCfg.getPageSize() * limiter.getRate() / MB; + + return 0; + } + + /** + * @param rate Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public void setRate(double rate) { + DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); + + if (CU.isPersistenceEnabled(dsCfg)) + limiter.setRate(calcPermits(rate, dsCfg)); + } + + /** + * @param rate Maximum scan speed in megabytes per second + * @param dsCfg Datastorage configuration. + * @return The number of permits allowed per second. + */ + private double calcPermits(double rate, DataStorageConfiguration dsCfg) { + return rate * MB / + (dsCfg.getPageSize() == 0 ? DataStorageConfiguration.DFLT_PAGE_SIZE : dsCfg.getPageSize()); + } + + /** + * @param grp Cache group. + * @param hnd Partition handler. + */ + private void forEachPageStore(CacheGroupContext grp, IgniteInClosureX hnd) throws IgniteCheckedException { + int parts = grp.affinity().partitions(); + + IgnitePageStoreManager pageStoreMgr = ctx.cache().context().pageStore(); + + for (int p = 0; p < parts; p++) { + if (!pageStoreMgr.exists(grp.groupId(), p)) + continue; + + hnd.applyx(p); + } + + hnd.applyx(PageIdAllocator.INDEX_PARTITION); + } + + /** + * Cache group partition scanning task. + */ + private class GroupScanTask extends GridFutureAdapter implements Runnable { + /** Cache group ID. */ + private final CacheGroupContext grp; + + /** Partition IDs. */ + private final Set parts; + + /** Page memory. */ + private final PageMemoryEx pageMem; + + /** Total memory pages left for reencryption. */ + private final AtomicLong remainingPagesCntr; + + /** + * @param grp Cache group. + */ + public GroupScanTask(CacheGroupContext grp, Set parts, long remainingPagesCnt) { + this.grp = grp; + this.parts = new GridConcurrentHashSet<>(parts); + + remainingPagesCntr = new AtomicLong(remainingPagesCnt); + pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); + } + + /** {@inheritDoc} */ + @Override public synchronized boolean cancel() throws IgniteCheckedException { + return onCancelled(); + } + + /** + * Stop reencryption of the specified partition. + * + * @param partId Partition ID. + * @return {@code True} if reencryption was cancelled. + */ + public synchronized boolean excludePartition(int partId) { + long state = ctx.encryption().getEncryptionState(groupId(), partId); + + remainingPagesCntr.addAndGet(ReencryptStateUtils.pageIndex(state) - ReencryptStateUtils.pageCount(state)); + + return parts.remove(partId); + } + + /** + * @return Cache group ID. + */ + public int groupId() { + return grp.groupId(); + } + + /** + * @return Number of remaining memory pages to scan. + */ + public long remainingPagesCount() { + return remainingPagesCntr.get(); + } + + /** {@inheritDoc} */ + @Override public void run() { + try { + for (int partId : parts) { + long state = ctx.encryption().getEncryptionState(grp.groupId(), partId); + + if (state == 0) + continue; + + scanPartition(partId, ReencryptStateUtils.pageIndex(state), ReencryptStateUtils.pageCount(state)); + + if (isDone()) + return; + } + + boolean added = cpWaitGrps.add(this); + + assert added; + } + catch (Throwable t) { + if (X.hasCause(t, NodeStoppingException.class)) + onCancelled(); + else + onDone(t); + } + } + + /** + * @param partId Partition ID. + * @param off Start page offset. + * @param cnt Count of pages to scan. + */ + private void scanPartition(int partId, int off, int cnt) throws IgniteCheckedException { + if (log.isDebugEnabled()) { + log.debug("Partition reencryption is started [grpId=" + grp.groupId() + + ", p=" + partId + ", remain=" + (cnt - off) + ", total=" + cnt + "]"); + } + + while (off < cnt) { + int pagesCnt = Math.min(batchSize, cnt - off); + + limiter.acquire(pagesCnt); + + synchronized (this) { + if (isDone() || !parts.contains(partId)) + break; + + ctx.cache().context().database().checkpointReadLock(); + + try { + off += scanPages(partId, off, pagesCnt); + } + finally { + ctx.cache().context().database().checkpointReadUnlock(); + } + } + + remainingPagesCntr.addAndGet(-pagesCnt); + + ctx.encryption().setEncryptionState(grp, partId, off, cnt); + } + + if (log.isDebugEnabled()) { + log.debug("Partition reencryption is finished " + + "[grpId=" + grp.groupId() + + ", p=" + partId + + ", remain=" + (cnt - off) + + ", total=" + cnt + "]"); + } + } + + /** + * @param off Start page offset. + * @param cnt Count of pages to scan. + * @return Count of scanned pages. + * @throws IgniteCheckedException If failed. + */ + private int scanPages(int partId, int off, int cnt) throws IgniteCheckedException { + int grpId = grp.groupId(); + byte flag = GroupPartitionId.getFlagByPartId(partId); + + for (int pageIdx = off; pageIdx < off + cnt; pageIdx++) { + long pageId = PageIdUtils.pageId(partId, flag, pageIdx); + long page = pageMem.acquirePage(grpId, pageId); + + try { + if (pageMem.isDirty(grpId, pageId, page)) + continue; + + pageMem.writeLock(grpId, pageId, page, true); + pageMem.writeUnlock(grpId, pageId, page, null, true); + } + finally { + pageMem.releasePage(grpId, pageId, page); + } + } + + return cnt; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ChangeCacheEncryptionRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ChangeCacheEncryptionRequest.java new file mode 100644 index 00000000000000..54d7405df98bb3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ChangeCacheEncryptionRequest.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; +import java.util.Objects; +import java.util.UUID; + +/** + * Change cache group encryption key request. + */ +@SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") +public class ChangeCacheEncryptionRequest implements Serializable { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Request ID. */ + private final UUID reqId = UUID.randomUUID(); + + /** Cache group IDs. */ + private final int[] grpIds; + + /** Encryption keys. */ + private final byte[][] keys; + + /** Key identifiers. */ + private final byte[] keyIds; + + /** Master key digest. */ + private final byte[] masterKeyDigest; + + /** + * @param grpIds Cache group IDs. + * @param keys Encryption keys. + * @param keyIds Key identifiers. + * @param masterKeyDigest Master key digest. + */ + public ChangeCacheEncryptionRequest(int[] grpIds, byte[][] keys, byte[] keyIds, byte[] masterKeyDigest) { + this.grpIds = grpIds; + this.keys = keys; + this.keyIds = keyIds; + this.masterKeyDigest = masterKeyDigest; + } + + /** + * @return Request ID. + */ + public UUID requestId() { + return reqId; + } + + /** + * @return Cache group IDs. + */ + public int[] groupIds() { + return grpIds; + } + + /** + * @return Encryption keys. + */ + public byte[][] keys() { + return keys; + } + + /** + * @return Key identifiers. + */ + public byte[] keyIds() { return keyIds; } + + /** + * @return Master key digest. + */ + public byte[] masterKeyDigest() { + return masterKeyDigest; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + return Objects.equals(reqId, ((ChangeCacheEncryptionRequest)o).reqId); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(reqId); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java index 027022286bd1ae..f2e31a6f5d5b31 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.managers.encryption; +import java.util.Collections; import org.apache.ignite.internal.GridKernalContextImpl; import org.apache.ignite.mxbean.EncryptionMXBean; @@ -41,4 +42,9 @@ public EncryptionMXBeanImpl(GridKernalContextImpl ctx) { @Override public void changeMasterKey(String masterKeyName) { encryptionMgr.changeMasterKey(masterKeyName).get(); } + + /** {@inheritDoc} */ + @Override public void changeCacheGroupKey(String cacheOrGrpName) { + encryptionMgr.changeCacheGroupKey(Collections.singleton(cacheOrGrpName)).get(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index 78590d330cbf2a..d0d467fefb1238 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -40,6 +40,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteFeatures; @@ -47,7 +48,9 @@ import org.apache.ignite.internal.managers.GridManagerAdapter; import org.apache.ignite.internal.managers.communication.GridMessageListener; import org.apache.ignite.internal.managers.eventstorage.DiscoveryEventListener; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener; import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadOnlyMetastorage; @@ -61,6 +64,7 @@ import org.apache.ignite.internal.util.future.IgniteFutureImpl; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteFuture; @@ -72,8 +76,6 @@ import org.apache.ignite.spi.discovery.DiscoveryDataBag; import org.apache.ignite.spi.discovery.DiscoveryDataBag.GridDiscoveryData; import org.apache.ignite.spi.discovery.DiscoveryDataBag.JoiningNodeDiscoveryData; -import org.apache.ignite.spi.discovery.DiscoverySpi; -import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.encryption.EncryptionSpi; import org.jetbrains.annotations.Nullable; @@ -83,6 +85,7 @@ import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; import static org.apache.ignite.internal.GridComponent.DiscoveryDataExchangeType.ENCRYPTION_MGR; import static org.apache.ignite.internal.GridTopic.TOPIC_GEN_ENC_KEY; +import static org.apache.ignite.internal.IgniteFeatures.CACHE_GROUP_KEY_CHANGE; import static org.apache.ignite.internal.IgniteFeatures.MASTER_KEY_CHANGE; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SYSTEM_POOL; import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.MASTER_KEY_CHANGE_FINISH; @@ -115,6 +118,8 @@ *
      *
    • 1. If new key for group doesn't exists locally it added to local store.
    • *
    • 2. If new key for group exists locally, then received key skipped.
    • + *
    • 3. If a cache group is encrypted with a different (previous) encryption key, then background + * re-encryption of this group with a new key is started.
    • *
    * * @@ -136,6 +141,22 @@ public class GridEncryptionManager extends GridManagerAdapter imp */ private static final IgniteProductVersion CACHE_ENCRYPTION_SINCE = IgniteProductVersion.fromString("2.7.0"); + /** Prefix for a master key name. */ + public static final String MASTER_KEY_NAME_PREFIX = "encryption-master-key-name"; + + /** Prefix for a encryption group key in meta store, which contains encryption keys with identifiers. */ + public static final String ENCRYPTION_KEYS_PREFIX = "grp-encryption-keys-"; + + /** Initial identifier for cache group encryption key. */ + public static final int INITIAL_KEY_ID = 0; + + /** The name on the meta store key, that contains wal segments encrypted using previous encryption keys. */ + private static final String REENCRYPTED_WAL_SEGMENTS = "reencrypted-wal-segments"; + + /** Prefix for a encryption group key in meta store. */ + @Deprecated + private static final String ENCRYPTION_KEY_PREFIX = "grp-encryption-key-"; + /** Synchronization mutex. */ private final Object metaStorageMux = new Object(); @@ -154,14 +175,8 @@ public class GridEncryptionManager extends GridManagerAdapter imp /** Flag to enable/disable write to metastore on cluster state change. */ private volatile boolean writeToMetaStoreEnabled; - /** Prefix for a encryption group key in meta store. */ - public static final String ENCRYPTION_KEY_PREFIX = "grp-encryption-key-"; - - /** Prefix for a master key name. */ - public static final String MASTER_KEY_NAME_PREFIX = "encryption-master-key-name"; - - /** Group encryption keys. */ - private final ConcurrentHashMap grpEncKeys = new ConcurrentHashMap<>(); + /** Cache group encryption keys. */ + private CacheGroupEncryptionKeys grpKeys; /** Pending generate encryption key futures. */ private ConcurrentMap genEncKeyFuts = new ConcurrentHashMap<>(); @@ -182,7 +197,7 @@ public class GridEncryptionManager extends GridManagerAdapter imp private volatile boolean recoveryMasterKeyName; /** Master key change future. Not {@code null} on request initiator. */ - private MasterKeyChangeFuture masterKeyChangeFut; + private KeyChangeFuture masterKeyChangeFut; /** Pending master key request or {@code null} if there is no ongoing master key change process. */ private volatile MasterKeyChangeRequest masterKeyChangeRequest; @@ -194,10 +209,25 @@ public class GridEncryptionManager extends GridManagerAdapter imp * Master key change prepare process. Checks that all server nodes have the same new master key and then starts * finish process. */ - private DistributedProcess prepareMKChangeProc; + private DistributedProcess prepareMKChangeProc; /** Process to perform the master key change. Changes master key and reencrypt group keys. */ - private DistributedProcess performMKChangeProc; + private DistributedProcess performMKChangeProc; + + /** + * A two-phase distributed process that rotates the encryption keys of specified cache groups and initiates + * re-encryption of those cache groups. + */ + private GroupKeyChangeProcess grpKeyChangeProc; + + /** Cache groups for which encryption key was changed, and they must be re-encrypted. */ + private final Map reencryptGroups = new ConcurrentHashMap<>(); + + /** Cache groups for which encryption key was changed on node join. */ + private final Map reencryptGroupsForced = new ConcurrentHashMap<>(); + + /** Cache group page stores scanner. */ + private CacheGroupPageScanner pageScanner; /** * @param ctx Kernel context. @@ -283,11 +313,17 @@ public GridEncryptionManager(GridKernalContext ctx) { performMKChangeProc = new DistributedProcess<>(ctx, MASTER_KEY_CHANGE_FINISH, this::performMasterKeyChange, this::finishPerformMasterKeyChange); + + grpKeys = new CacheGroupEncryptionKeys(getSpi()); + pageScanner = new CacheGroupPageScanner(ctx); + grpKeyChangeProc = new GroupKeyChangeProcess(ctx, grpKeys); } /** {@inheritDoc} */ @Override public void stop(boolean cancel) throws IgniteCheckedException { stopSpi(); + + pageScanner.stop(); } /** {@inheritDoc} */ @@ -340,7 +376,7 @@ public GridEncryptionManager(GridKernalContext ctx) { * Callback for local join. */ public void onLocalJoin() { - if (!isCoordinator()) + if (!U.isLocalNodeCoordinator(ctx.discovery())) return; //We can't store keys before node join to cluster(on statically configured cache registration). @@ -350,7 +386,7 @@ public void onLocalJoin() { //And sends that keys to every joining node. synchronized (metaStorageMux) { //Keys read from meta storage. - HashMap knownEncKeys = knownEncryptionKeys(); + HashMap knownEncKeys = grpKeys.getAll(); //Generated(not saved!) keys for a new caches. //Configured statically in config, but doesn't stored on the disk. @@ -362,7 +398,7 @@ public void onLocalJoin() { //We can store keys to the disk, because we are on a coordinator. for (Map.Entry entry : newEncKeys.entrySet()) { - groupKey(entry.getKey(), entry.getValue()); + addGroupKey(entry.getKey(), new GroupKeyEncrypted(INITIAL_KEY_ID, entry.getValue())); U.quietAndInfo(log, "Added encryption key on local join [grpId=" + entry.getKey() + "]"); } @@ -392,6 +428,12 @@ public void onLocalJoin() { if (res != null) return res; + if (grpKeyChangeProc.inProgress()) { + return new IgniteNodeValidationResult(ctx.localNodeId(), + "Cache group key change is in progress! Node join is rejected. [node=" + node.id() + "]", + "Cache group key change is in progress! Node join is rejected."); + } + NodeEncryptionKeys nodeEncKeys = (NodeEncryptionKeys)discoData.joiningNodeData(); if (!discoData.hasJoiningNodeData() || nodeEncKeys == null) { @@ -406,23 +448,49 @@ public void onLocalJoin() { "Master key digest differs! Node join is rejected."); } + if (!IgniteFeatures.nodeSupports(node, CACHE_GROUP_KEY_CHANGE)) { + return new IgniteNodeValidationResult(ctx.localNodeId(), + "Joining node doesn't support multiple encryption keys for single group [node=" + node.id() + "]", + "Joining node doesn't support multiple encryption keys for single group."); + } + if (F.isEmpty(nodeEncKeys.knownKeys)) { U.quietAndInfo(log, "Joining node doesn't have stored group keys [node=" + node.id() + "]"); return null; } - for (Map.Entry entry : nodeEncKeys.knownKeys.entrySet()) { - Serializable locEncKey = grpEncKeys.get(entry.getKey()); + assert !F.isEmpty(nodeEncKeys.knownKeysWithIds); + + for (Map.Entry> entry : nodeEncKeys.knownKeysWithIds.entrySet()) { + int grpId = entry.getKey(); + + GroupKey locEncKey = groupKey(grpId); if (locEncKey == null) continue; - Serializable rmtKey = getSpi().decryptKey(entry.getValue()); + List rmtKeys = entry.getValue(); + + if (rmtKeys == null) + continue; + + GroupKeyEncrypted rmtKeyEncrypted = null; + + for (GroupKeyEncrypted rmtKey0 : rmtKeys) { + if (rmtKey0.id() != locEncKey.unsignedId()) + continue; + + rmtKeyEncrypted = rmtKey0; + + break; + } - if (F.eq(locEncKey, rmtKey)) + if (rmtKeyEncrypted == null || F.eq(locEncKey.key(), getSpi().decryptKey(rmtKeyEncrypted.key()))) continue; + // The remote node should not rotate the cache key to the current one + // until the old key (with an identifier that is currently active in the cluster) is removed. return new IgniteNodeValidationResult(ctx.localNodeId(), "Cache key differs! Node join is rejected. [node=" + node.id() + ", grp=" + entry.getKey() + "]", "Cache key differs! Node join is rejected."); @@ -433,13 +501,17 @@ public void onLocalJoin() { /** {@inheritDoc} */ @Override public void collectJoiningNodeData(DiscoveryDataBag dataBag) { - if (dataBag.isJoiningNodeClient()) + if (ctx.clientNode()) return; - HashMap knownEncKeys = knownEncryptionKeys(); + Set grpIds = grpKeys.groupIds(); - HashMap newKeys = - newEncryptionKeys(knownEncKeys == null ? Collections.EMPTY_SET : knownEncKeys.keySet()); + HashMap> knownEncKeys = U.newHashMap(grpIds.size()); + + for (int grpId : grpIds) + knownEncKeys.put(grpId, grpKeys.getAll(grpId)); + + HashMap newKeys = newEncryptionKeys(grpIds); if (log.isInfoEnabled()) { String knownGrps = F.isEmpty(knownEncKeys) ? null : F.concat(knownEncKeys.keySet(), ","); @@ -467,9 +539,9 @@ public void onLocalJoin() { for (Map.Entry entry : nodeEncryptionKeys.newKeys.entrySet()) { if (groupKey(entry.getKey()) == null) { U.quietAndInfo(log, "Store group key received from joining node [node=" + - data.joiningNodeId() + ", grp=" + entry.getKey() + "]"); + data.joiningNodeId() + ", grp=" + entry.getKey() + "]"); - groupKey(entry.getKey(), entry.getValue()); + addGroupKey(entry.getKey(), new GroupKeyEncrypted(INITIAL_KEY_ID, entry.getValue())); } else { U.quietAndInfo(log, "Skip group key received from joining node. Already exists. [node=" + @@ -483,16 +555,18 @@ public void onLocalJoin() { if (dataBag.isJoiningNodeClient() || dataBag.commonDataCollectedFor(ENCRYPTION_MGR.ordinal())) return; - HashMap knownEncKeys = knownEncryptionKeys(); + HashMap knownEncKeys = grpKeys.getAll(); HashMap newKeys = newEncryptionKeys(knownEncKeys == null ? Collections.EMPTY_SET : knownEncKeys.keySet()); - if (knownEncKeys == null) - knownEncKeys = newKeys; - else if (newKeys != null) { + if (!F.isEmpty(newKeys)) { + if (knownEncKeys == null) + knownEncKeys = new HashMap<>(); + for (Map.Entry entry : newKeys.entrySet()) { - byte[] old = knownEncKeys.putIfAbsent(entry.getKey(), entry.getValue()); + GroupKeyEncrypted old = + knownEncKeys.putIfAbsent(entry.getKey(), new GroupKeyEncrypted(INITIAL_KEY_ID, entry.getValue())); assert old == null; } @@ -506,55 +580,93 @@ else if (newKeys != null) { if (ctx.clientNode()) return; - Map encKeysFromCluster = (Map)data.commonData(); + Map encKeysFromCluster = (Map)data.commonData(); if (F.isEmpty(encKeysFromCluster)) return; - for (Map.Entry entry : encKeysFromCluster.entrySet()) { - if (groupKey(entry.getKey()) == null) { - U.quietAndInfo(log, "Store group key received from coordinator [grp=" + entry.getKey() + "]"); + for (Map.Entry entry : encKeysFromCluster.entrySet()) { + int grpId = entry.getKey(); - groupKey(entry.getKey(), entry.getValue()); - } - else { + GroupKeyEncrypted rmtKey; + + if (entry.getValue() instanceof GroupKeyEncrypted) + rmtKey = (GroupKeyEncrypted)entry.getValue(); + else + rmtKey = new GroupKeyEncrypted(INITIAL_KEY_ID, (byte[])entry.getValue()); + + GroupKey locGrpKey = groupKey(grpId); + + if (locGrpKey != null && locGrpKey.unsignedId() == rmtKey.id()) { U.quietAndInfo(log, "Skip group key received from coordinator. Already exists. [grp=" + - entry.getKey() + "]"); + grpId + ", keyId=" + rmtKey.id() + "]"); + + continue; } + + U.quietAndInfo(log, "Store group key received from coordinator [grp=" + grpId + + ", keyId=" + rmtKey.id() + "]"); + + grpKeys.addKey(grpId, rmtKey); + + if (locGrpKey == null) + continue; + + GroupKey prevKey = grpKeys.changeActiveKey(grpId, rmtKey.id()); + + if (ctx.config().getDataStorageConfiguration().getWalMode() != WALMode.NONE) + grpKeys.reserveWalKey(grpId, prevKey.unsignedId(), ctx.cache().context().wal().currentSegment()); + + reencryptGroupsForced.put(grpId, rmtKey.id()); } } /** * Returns group encryption key. * - * @param grpId Group id. - * @return Group encryption key. + * @param grpId Cache group ID. + * @return Group encryption key with identifier, that was set for writing. */ - @Nullable public Serializable groupKey(int grpId) { - if (grpEncKeys.isEmpty()) - return null; - - return grpEncKeys.get(grpId); + @Nullable public GroupKey groupKey(int grpId) { + return grpKeys.getActiveKey(grpId); } /** - * Store group encryption key. + * Returns group encryption key with specified identifier. * - * @param grpId Group id. - * @param encGrpKey Encrypted group key. + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @return Group encryption key. */ - public void groupKey(int grpId, byte[] encGrpKey) { - assert !grpEncKeys.containsKey(grpId); + @Nullable public GroupKey groupKey(int grpId, int keyId) { + return grpKeys.getKey(grpId, keyId); + } - Serializable encKey = withMasterKeyChangeReadLock(() -> getSpi().decryptKey(encGrpKey)); + /** + * Gets the existing encryption key IDs for the specified cache group. + * + * @param grpId Cache group ID. + * @return List of the key identifiers. + */ + @Nullable public List groupKeyIds(int grpId) { + return grpKeys.keyIds(grpId); + } + /** + * Adds new cache group encryption key. + * + * @param grpId Cache group ID. + * @param key Encryption key. + */ + void addGroupKey(int grpId, GroupKeyEncrypted key) { synchronized (metaStorageMux) { - if (log.isDebugEnabled()) - log.debug("Key added. [grp=" + grpId + "]"); - - grpEncKeys.put(grpId, encKey); + try { + grpKeys.addKey(grpId, key); - writeToMetaStore(grpId, encGrpKey); + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + } catch (IgniteCheckedException e) { + throw new IgniteException("Failed to write cache group encryption key [grpId=" + grpId + ']', e); + } } } @@ -588,7 +700,7 @@ public void groupKey(int grpId, byte[] encGrpKey) { digest = masterKeyDigest(masterKeyName); } catch (Exception e) { return new IgniteFinishedFutureImpl<>(new IgniteException("Master key change was rejected. " + - "Unable to get the master key digest.")); + "Unable to get the master key digest.", e)); } MasterKeyChangeRequest request = new MasterKeyChangeRequest(UUID.randomUUID(), encryptKeyName(masterKeyName), @@ -611,7 +723,7 @@ public void groupKey(int grpId, byte[] encGrpKey) { "The previous change was not completed.")); } - masterKeyChangeFut = new MasterKeyChangeFuture(request.requestId()); + masterKeyChangeFut = new KeyChangeFuture(request.requestId()); prepareMKChangeProc.start(request.requestId(), request); @@ -627,22 +739,121 @@ public void groupKey(int grpId, byte[] encGrpKey) { return withMasterKeyChangeReadLock(() -> getSpi().getMasterKeyName()); } + /** {@inheritDoc} */ + @Override public IgniteFuture changeCacheGroupKey(Collection cacheOrGrpNames) { + A.notEmpty(cacheOrGrpNames, "cacheOrGrpNames"); + + synchronized (opsMux) { + if (stopped) { + return new IgniteFinishedFutureImpl<>(new IgniteException("Cache group key change was rejected. " + + "Node is stopping.")); + } + + return grpKeyChangeProc.start(cacheOrGrpNames); + } + } + + /** + * @param grpIds Cache group IDs. + * @param keyIds Encryption key IDs. + * @param keys Encryption keys. + * @throws IgniteCheckedException If failed. + */ + protected void changeCacheGroupKeyLocal(int[] grpIds, byte[] keyIds, byte[][] keys) throws IgniteCheckedException { + Map encryptionStatus = U.newHashMap(grpIds.length); + + for (int i = 0; i < grpIds.length; i++) + encryptionStatus.put(grpIds[i], keyIds[i]); + + WALPointer ptr = ctx.cache().context().wal().log(new ReencryptionStartRecord(encryptionStatus)); + + if (ptr != null) + ctx.cache().context().wal().flush(ptr, false); + + for (int i = 0; i < grpIds.length; i++) { + int grpId = grpIds[i]; + int newKeyId = keyIds[i] & 0xff; + + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + // Set new key as key for writing. Note that we cannot pass the encrypted key here because the master + // key may have changed in which case we will not be able to decrypt the cache encryption key. + GroupKey prevGrpKey = grpKeys.changeActiveKey(grpId, newKeyId); + + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + + if (ptr == null) + return null; + + grpKeys.reserveWalKey(grpId, prevGrpKey.unsignedId(), ctx.cache().context().wal().currentSegment()); + + writeTrackedWalIdxsToMetaStore(); + } + + return null; + }); + + CacheGroupContext grp = ctx.cache().cacheGroup(grpId); + + if (grp != null && grp.affinityNode()) + reencryptGroups.put(grpId, pageScanner.pagesCount(grp)); + + if (log.isInfoEnabled()) + log.info("New encryption key for group was added [grpId=" + grpId + ", keyId=" + newKeyId + "]"); + } + + startReencryption(encryptionStatus.keySet()); + } + + /** + * @param grpId Cache group ID. + * @return Future that will be completed when reencryption of the specified group is finished. + */ + public IgniteInternalFuture reencryptionFuture(int grpId) { + return pageScanner.statusFuture(grpId); + } + + /** + * @param grpId Cache group ID. + * @return {@code True} If the specified cache group is currently being re-encrypted. + */ + public boolean reencryptionInProgress(int grpId) { + // The method guarantees not only the completion of the re-encryption, but also that the clearing of + // unused keys is complete. + return reencryptGroups.containsKey(grpId); + } + + /** + * @return Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public double getReencryptionRate() { + return pageScanner.getRate(); + } + + /** + * @param rate Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public void setReencryptionRate(double rate) { + pageScanner.setRate(rate); + } + /** - * Removes encryption key. + * Removes encryption key(s). * - * @param grpId Group id. + * @param grpId Cache group ID. */ private void removeGroupKey(int grpId) { synchronized (metaStorageMux) { ctx.cache().context().database().checkpointReadLock(); try { - grpEncKeys.remove(grpId); + if (grpKeys.remove(grpId) == null) + return; - metaStorage.remove(ENCRYPTION_KEY_PREFIX + grpId); + metaStorage.remove(ENCRYPTION_KEYS_PREFIX + grpId); if (log.isDebugEnabled()) - log.debug("Key removed. [grp=" + grpId + "]"); + log.debug("Key(s) removed. [grp=" + grpId + "]"); } catch (IgniteCheckedException e) { U.error(log, "Failed to clear meta storage", e); @@ -655,19 +866,41 @@ private void removeGroupKey(int grpId) { /** * Callback for cache group start event. - * @param grpId Group id. + * + * @param grpId Cache group ID. * @param encKey Encryption key */ public void beforeCacheGroupStart(int grpId, @Nullable byte[] encKey) { if (encKey == null || ctx.clientNode()) return; - groupKey(grpId, encKey); + withMasterKeyChangeReadLock(() -> { + addGroupKey(grpId, new GroupKeyEncrypted(INITIAL_KEY_ID, encKey)); + + return null; + }); + } + + /** + * Callback is called before invalidate page memory. + * + * @param grpId Cache group ID. + */ + public void onCacheGroupStop(int grpId) { + try { + reencryptionFuture(grpId).cancel(); + } + catch (IgniteCheckedException e) { + log.warning("Unable to cancel reencryption [grpId=" + grpId + "]", e); + } + + reencryptGroups.remove(grpId); } /** * Callback for cache group destroy event. - * @param grpId Group id. + * + * @param grpId Cache group ID. */ public void onCacheGroupDestroyed(int grpId) { if (groupKey(grpId) == null) @@ -676,6 +909,59 @@ public void onCacheGroupDestroyed(int grpId) { removeGroupKey(grpId); } + /** + * @param grp Cache group. + * @param partId Partition ID. + */ + public void onDestroyPartitionStore(CacheGroupContext grp, int partId) { + if (pageScanner.excludePartition(grp.groupId(), partId)) + setEncryptionState(grp, partId, 0, 0); + } + + /** + * Callback when WAL segment is removed. + * + * @param segmentIdx WAL segment index. + */ + public void onWalSegmentRemoved(long segmentIdx) { + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + Map> rmvKeys = grpKeys.releaseWalKeys(segmentIdx); + + if (F.isEmpty(rmvKeys)) + return null; + + try { + writeTrackedWalIdxsToMetaStore(); + + for (Map.Entry> entry : rmvKeys.entrySet()) { + Integer grpId = entry.getKey(); + + if (reencryptGroups.containsKey(grpId)) + continue; + + Set keyIds = entry.getValue(); + + if (!grpKeys.removeKeysById(grpId, keyIds)) + continue; + + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + + if (log.isInfoEnabled()) { + log.info("Previous encryption keys have been removed [grpId=" + grpId + + ", keyIds=" + keyIds + "]"); + } + } + } + catch (IgniteCheckedException e) { + log.error("Unable to remove encryption keys from metastore.", e); + } + } + + return null; + }); + } + /** {@inheritDoc} */ @Override public void onReadyForRead(ReadOnlyMetastorage metastorage) { try { @@ -690,17 +976,35 @@ public void onCacheGroupDestroyed(int grpId) { } } - metastorage.iterate(ENCRYPTION_KEY_PREFIX, (key, val) -> { - Integer grpId = Integer.valueOf(key.replace(ENCRYPTION_KEY_PREFIX, "")); + metastorage.iterate(ENCRYPTION_KEYS_PREFIX, (key, val) -> { + int grpId = Integer.parseInt(key.replace(ENCRYPTION_KEYS_PREFIX, "")); - byte[] encGrpKey = (byte[])val; + if (grpKeys.groupIds().contains(grpId)) + return; - grpEncKeys.computeIfAbsent(grpId, k -> getSpi().decryptKey(encGrpKey)); + grpKeys.setGroupKeys(grpId, (List)val); }, true); - if (!grpEncKeys.isEmpty()) { - U.quietAndInfo(log, "Encryption keys loaded from metastore. [grps=" + - F.concat(grpEncKeys.keySet(), ",") + ", masterKeyName=" + getSpi().getMasterKeyName() + ']'); + // Try to read keys in previous format. + if (grpKeys.groupIds().isEmpty()) { + metastorage.iterate(ENCRYPTION_KEY_PREFIX, (key, val) -> { + int grpId = Integer.parseInt(key.replace(ENCRYPTION_KEY_PREFIX, "")); + + GroupKeyEncrypted grpKey = new GroupKeyEncrypted(INITIAL_KEY_ID, (byte[])val); + + grpKeys.setGroupKeys(grpId, Collections.singletonList(grpKey)); + }, true); + } + + Serializable savedSegments = metastorage.read(REENCRYPTED_WAL_SEGMENTS); + + if (savedSegments != null) + grpKeys.trackedWalSegments((Collection)savedSegments); + + if (grpKeys.groupIds().isEmpty()) { + U.quietAndInfo(log, "Encryption keys loaded from metastore. " + + "[grps=" + F.concat(grpKeys.groupIds(), ",") + + ", masterKeyName=" + getSpi().getMasterKeyName() + ']'); } } catch (IgniteCheckedException e) { @@ -730,20 +1034,47 @@ public void onCacheGroupDestroyed(int grpId) { /** {@inheritDoc} */ @Override public void onReadyForReadWrite(ReadWriteMetastorage metaStorage) throws IgniteCheckedException { - synchronized (metaStorageMux) { - this.metaStorage = metaStorage; + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + this.metaStorage = metaStorage; - writeToMetaStoreEnabled = true; + writeToMetaStoreEnabled = true; - if (recoveryMasterKeyName) - writeKeysToWal(); + if (recoveryMasterKeyName) + writeKeysToWal(); + + writeKeysToMetaStore(restoredFromWAL || recoveryMasterKeyName); + + restoredFromWAL = false; + + recoveryMasterKeyName = false; + } + + return null; + }); - writeKeysToMetaStore(restoredFromWAL || recoveryMasterKeyName); + for (Map.Entry entry : reencryptGroupsForced.entrySet()) { + int grpId = entry.getKey(); - restoredFromWAL = false; + if (reencryptGroups.containsKey(grpId)) + continue; + + if (entry.getValue() != groupKey(grpId).unsignedId()) + continue; + + CacheGroupContext grp = ctx.cache().cacheGroup(grpId); + + if (grp == null || !grp.affinityNode()) + continue; + + long[] offsets = pageScanner.pagesCount(grp); - recoveryMasterKeyName = false; + reencryptGroups.put(grpId, offsets); } + + reencryptGroupsForced.clear(); + + startReencryption(reencryptGroups.keySet()); } /** {@inheritDoc} */ @@ -767,6 +1098,45 @@ public void onCacheGroupDestroyed(int grpId) { } } + /** + * Set reencryption status for partition. + * + * @param grp Cache group. + * @param partId Partition ID. + * @param idx Index of the last reencrypted page. + * @param total Total pages to be reencrypted. + */ + public void setEncryptionState(CacheGroupContext grp, int partId, int idx, int total) { + // The last element of the array is used to store the status of the index partition. + long[] states = reencryptGroups.computeIfAbsent(grp.groupId(), v -> new long[grp.affinity().partitions() + 1]); + + states[Math.min(partId, states.length - 1)] = ReencryptStateUtils.state(idx, total); + } + + /** + * Get reencryption status for partition. + * + * @param grpId Cache group ID. + * @param partId Parttiion ID. + * @return Index and count of pages to be reencrypted. + */ + public long getEncryptionState(int grpId, int partId) { + long[] states = reencryptGroups.get(grpId); + + if (states == null) + return 0; + + return states[Math.min(partId, states.length - 1)]; + } + + /** + * @param grpId Cache group ID. + * @return The number of bytes left for re-ecryption. + */ + public long getBytesLeftForReencryption(int grpId) { + return pageScanner.remainingPagesCount(grpId) * ctx.config().getDataStorageConfiguration().getPageSize(); + } + /** * @param keyCnt Count of keys to generate. * @return Future that will contain results of generation. @@ -811,6 +1181,77 @@ private void sendGenerateEncryptionKeyRequest(GenerateEncryptionKeyFuture fut) t ctx.io().sendToGridTopic(rndNode.id(), TOPIC_GEN_ENC_KEY, req, SYSTEM_POOL); } + /** + * Suspend re-encryption of the cache group. + * + * @param grpId Cache group ID. + */ + public boolean suspendReencryption(int grpId) throws IgniteCheckedException { + return reencryptionFuture(grpId).cancel(); + } + + /** + * Forces re-encryption of the cache group. + * + * @param grpId Cache group ID. + */ + public boolean resumeReencryption(int grpId) throws IgniteCheckedException { + if (!reencryptionFuture(grpId).isDone()) + return false; + + if (!reencryptionInProgress(grpId)) + throw new IgniteCheckedException("Re-encryption completed or not required [grpId=" + grpId + "]"); + + startReencryption(Collections.singleton(grpId)); + + return true; + } + + /** + * @param grpIds Cache group IDs. + * @throws IgniteCheckedException If failed. + */ + private void startReencryption(Collection grpIds) throws IgniteCheckedException { + for (int grpId : grpIds) { + IgniteInternalFuture fut = pageScanner.schedule(grpId); + + fut.listen(f -> { + if (f.isCancelled() || f.error() != null) { + log.warning("Reencryption " + + (f.isCancelled() ? "cancelled" : "failed") + " [grp=" + grpId + "]", f.error()); + + return; + } + + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + cleanupKeys(grpId); + + reencryptGroups.remove(grpId); + } + + return null; + }); + }); + } + } + + /** + * @param grpId Cache group ID. + * @throws IgniteCheckedException If failed. + */ + private void cleanupKeys(int grpId) throws IgniteCheckedException { + Set rmvKeyIds = grpKeys.removeUnusedKeys(grpId); + + if (rmvKeyIds.isEmpty()) + return; + + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + + if (log.isInfoEnabled()) + log.info("Previous encryption keys were removed [grpId=" + grpId + ", keyIds=" + rmvKeyIds + "]"); + } + /** * Writes all unsaved grpEncKeys to metaStorage. * @@ -821,11 +1262,55 @@ private void writeKeysToMetaStore(boolean writeAll) throws IgniteCheckedExceptio if (writeAll) metaStorage.write(MASTER_KEY_NAME_PREFIX, getSpi().getMasterKeyName()); - for (Map.Entry entry : grpEncKeys.entrySet()) { - if (!writeAll && metaStorage.read(ENCRYPTION_KEY_PREFIX + entry.getKey()) != null) + if (!reencryptGroupsForced.isEmpty()) + writeTrackedWalIdxsToMetaStore(); + + for (Integer grpId : grpKeys.groupIds()) { + if (!writeAll && !reencryptGroupsForced.containsKey(grpId) && + metaStorage.read(ENCRYPTION_KEYS_PREFIX + grpId) != null) continue; - writeToMetaStore(entry.getKey(), getSpi().encryptKey(entry.getValue())); + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + } + } + + /** + * Writes cache group encryption keys to metastore. + * + * @param grpId Cache group ID. + */ + private void writeGroupKeysToMetaStore(int grpId, List keys) throws IgniteCheckedException { + assert Thread.holdsLock(metaStorageMux); + + if (metaStorage == null || !writeToMetaStoreEnabled || stopped) + return; + + ctx.cache().context().database().checkpointReadLock(); + + try { + metaStorage.write(ENCRYPTION_KEYS_PREFIX + grpId, (Serializable)keys); + } + finally { + ctx.cache().context().database().checkpointReadUnlock(); + } + } + + /** + * Writes tracked (encrypted with previous encryption keys) WAL segments to metastore. + */ + private void writeTrackedWalIdxsToMetaStore() throws IgniteCheckedException { + assert Thread.holdsLock(metaStorageMux); + + if (metaStorage == null || !writeToMetaStoreEnabled || stopped) + return; + + ctx.cache().context().database().checkpointReadLock(); + + try { + metaStorage.write(REENCRYPTED_WAL_SEGMENTS, (Serializable)grpKeys.trackedWalSegments()); + } + finally { + ctx.cache().context().database().checkpointReadUnlock(); } } @@ -850,29 +1335,6 @@ public void checkEncryptedCacheSupported() throws IgniteCheckedException { return ENCRYPTION_MGR; } - /** - * Writes encryption key to metastore. - * - * @param grpId Group id. - * @param encGrpKey Group encryption key. - */ - private void writeToMetaStore(int grpId, byte[] encGrpKey) { - if (metaStorage == null || !writeToMetaStoreEnabled) - return; - - ctx.cache().context().database().checkpointReadLock(); - - try { - metaStorage.write(ENCRYPTION_KEY_PREFIX + grpId, encGrpKey); - } - catch (IgniteCheckedException e) { - throw new IgniteException("Failed to write cache group encryption key [grpId=" + grpId + ']', e); - } - finally { - ctx.cache().context().database().checkpointReadUnlock(); - } - } - /** * @param knownKeys Saved keys set. * @return New keys for local cache groups. @@ -897,28 +1359,13 @@ private void writeToMetaStore(int grpId, byte[] encGrpKey) { return newKeys; } - /** - * @return Local encryption keys. - */ - @Nullable private HashMap knownEncryptionKeys() { - if (F.isEmpty(grpEncKeys)) - return null; - - HashMap knownKeys = new HashMap<>(); - - for (Map.Entry entry : grpEncKeys.entrySet()) - knownKeys.put(entry.getKey(), getSpi().encryptKey(entry.getValue())); - - return knownKeys; - } - /** * Generates required count of encryption keys. * * @param keyCnt Keys count. * @return Tuple of collection with newly generated encryption keys and master key digest. */ - private T2, byte[]> createKeys(int keyCnt) { + T2, byte[]> createKeys(int keyCnt) { return withMasterKeyChangeReadLock(() -> { if (keyCnt == 0) return new T2<>(Collections.emptyList(), getSpi().masterKeyDigest()); @@ -974,12 +1421,14 @@ private void doChangeMasterKey(String name) { /** Writes the record with the master key name and all keys to WAL. */ private void writeKeysToWal() throws IgniteCheckedException { - Map reencryptedKeys = new HashMap<>(); + List> reencryptedKeys = new ArrayList<>(); - for (Map.Entry entry : grpEncKeys.entrySet()) - reencryptedKeys.put(entry.getKey(), getSpi().encryptKey(entry.getValue())); + for (int grpId : grpKeys.groupIds()) { + for (GroupKeyEncrypted grpKey : grpKeys.getAll(grpId)) + reencryptedKeys.add(new T2<>(grpId, grpKey)); + } - MasterKeyChangeRecord rec = new MasterKeyChangeRecord(getSpi().getMasterKeyName(), reencryptedKeys); + MasterKeyChangeRecordV2 rec = new MasterKeyChangeRecordV2(getSpi().getMasterKeyName(), reencryptedKeys); WALPointer ptr = ctx.cache().context().wal().log(rec); @@ -991,7 +1440,7 @@ private void writeKeysToWal() throws IgniteCheckedException { * * @param rec Record. */ - public void applyKeys(MasterKeyChangeRecord rec) { + public void applyKeys(MasterKeyChangeRecordV2 rec) { assert !writeToMetaStoreEnabled && !ctx.state().clusterState().active(); log.info("Master key name loaded from WAL [masterKeyName=" + rec.getMasterKeyName() + ']'); @@ -999,8 +1448,13 @@ public void applyKeys(MasterKeyChangeRecord rec) { try { getSpi().setMasterKeyName(rec.getMasterKeyName()); - for (Map.Entry entry : rec.getGrpKeys().entrySet()) - grpEncKeys.put(entry.getKey(), getSpi().decryptKey(entry.getValue())); + Map> keysMap = new HashMap<>(); + + for (T2 entry : rec.getGrpKeys()) + keysMap.computeIfAbsent(entry.getKey(), v -> new ArrayList<>()).add(entry.getValue()); + + for (Map.Entry> entry : keysMap.entrySet()) + grpKeys.setGroupKeys(entry.getKey(), entry.getValue()); restoredFromWAL = true; } catch (IgniteSpiException e) { @@ -1008,13 +1462,25 @@ public void applyKeys(MasterKeyChangeRecord rec) { } } + /** + * Start reencryption using logical WAL record. + * + * @param rec Reencryption start logical record. + */ + public void applyReencryptionStartRecord(ReencryptionStartRecord rec) { + assert !writeToMetaStoreEnabled; + + for (Map.Entry e : rec.groups().entrySet()) + reencryptGroupsForced.put(e.getKey(), e.getValue() & 0xff); + } + /** * Prepares master key change. Checks master key consistency. * * @param req Request. * @return Result future. */ - private IgniteInternalFuture prepareMasterKeyChange(MasterKeyChangeRequest req) { + private IgniteInternalFuture prepareMasterKeyChange(MasterKeyChangeRequest req) { if (masterKeyChangeRequest != null) { return new GridFinishedFuture<>(new IgniteException("Master key change was rejected. " + "The previous change was not completed.")); @@ -1044,7 +1510,7 @@ private IgniteInternalFuture prepareMasterKeyChange(Maste ctx.localNodeId() + ']', e)); } - return new GridFinishedFuture<>(new MasterKeyChangeResult()); + return new GridFinishedFuture<>(new EmptyResult()); } /** @@ -1054,14 +1520,14 @@ private IgniteInternalFuture prepareMasterKeyChange(Maste * @param res Results. * @param err Errors. */ - private void finishPrepareMasterKeyChange(UUID id, Map res, Map err) { + private void finishPrepareMasterKeyChange(UUID id, Map res, Map err) { if (!err.isEmpty()) { if (masterKeyChangeRequest != null && masterKeyChangeRequest.requestId().equals(id)) masterKeyChangeRequest = null; completeMasterKeyChangeFuture(id, err); } - else if (isCoordinator()) + else if (U.isLocalNodeCoordinator(ctx.discovery())) performMKChangeProc.start(id, masterKeyChangeRequest); } @@ -1071,7 +1537,7 @@ else if (isCoordinator()) * @param req Request. * @return Result future. */ - private IgniteInternalFuture performMasterKeyChange(MasterKeyChangeRequest req) { + private IgniteInternalFuture performMasterKeyChange(MasterKeyChangeRequest req) { if (masterKeyChangeRequest == null || !masterKeyChangeRequest.equals(req)) return new GridFinishedFuture<>(new IgniteException("Unknown master key change was rejected.")); @@ -1089,7 +1555,7 @@ private IgniteInternalFuture performMasterKeyChange(Maste masterKeyDigest = req.digest(); - return new GridFinishedFuture<>(new MasterKeyChangeResult()); + return new GridFinishedFuture<>(new EmptyResult()); } /** @@ -1099,7 +1565,7 @@ private IgniteInternalFuture performMasterKeyChange(Maste * @param res Results. * @param err Errors. */ - private void finishPerformMasterKeyChange(UUID id, Map res, Map err) { + private void finishPerformMasterKeyChange(UUID id, Map res, Map err) { completeMasterKeyChangeFuture(id, err); } @@ -1130,29 +1596,16 @@ private void completeMasterKeyChangeFuture(UUID reqId, Map err) * @param msg Error message. */ private void cancelFutures(String msg) { + assert Thread.holdsLock(opsMux); + for (GenerateEncryptionKeyFuture fut : genEncKeyFuts.values()) fut.onDone(new IgniteFutureCancelledException(msg)); if (masterKeyChangeFut != null && !masterKeyChangeFut.isDone()) masterKeyChangeFut.onDone(new IgniteFutureCancelledException(msg)); - } - - /** - * Checks whether local node is coordinator. Nodes that are leaving or failed - * (but are still in topology) are removed from search. - * - * @return {@code true} if local node is coordinator. - */ - private boolean isCoordinator() { - DiscoverySpi spi = ctx.discovery().getInjectedDiscoverySpi(); - if (spi instanceof TcpDiscoverySpi) - return ((TcpDiscoverySpi)spi).isLocalNodeCoordinator(); - else { - ClusterNode crd = U.oldest(ctx.discovery().aliveServerNodes(), null); - - return crd != null && F.eq(ctx.localNodeId(), crd.id()); - } + if (grpKeyChangeProc != null) + grpKeyChangeProc.cancel(msg); } /** @return {@code True} if the master key change process in progress. */ @@ -1167,7 +1620,7 @@ public boolean isMasterKeyChangeInProgress() { * * @return Digest of last changed master key or {@code null} if master key was not changed. */ - public byte[] masterKeyDigest() { + @Nullable public byte[] masterKeyDigest() { return masterKeyDigest; } @@ -1175,7 +1628,7 @@ public byte[] masterKeyDigest() { * @param c Callable to run with master key change read lock. * @return Computed result. */ - private T withMasterKeyChangeReadLock(Callable c) { + T withMasterKeyChangeReadLock(Callable c) { masterKeyChangeLock.readLock().lock(); try { @@ -1339,24 +1792,38 @@ byte[] digest() { } /** */ - private static class MasterKeyChangeResult implements Serializable { + protected static class EmptyResult implements Serializable { /** Serial version uid. */ private static final long serialVersionUID = 0L; } /** */ - public static class NodeEncryptionKeys implements Serializable { + protected static class NodeEncryptionKeys implements Serializable { /** */ private static final long serialVersionUID = 0L; /** */ - NodeEncryptionKeys(Map knownKeys, Map newKeys, byte[] masterKeyDigest) { - this.knownKeys = knownKeys; + NodeEncryptionKeys( + HashMap> knownKeysWithIds, + Map newKeys, + byte[] masterKeyDigest + ) { this.newKeys = newKeys; this.masterKeyDigest = masterKeyDigest; + + if (F.isEmpty(knownKeysWithIds)) + return; + + // To be able to join the old cluster. + knownKeys = U.newHashMap(knownKeysWithIds.size()); + + for (Map.Entry> entry : knownKeysWithIds.entrySet()) + knownKeys.put(entry.getKey(), entry.getValue().get(0).key()); + + this.knownKeysWithIds = knownKeysWithIds; } - /** Known i.e. stored in {@code ReadWriteMetastorage} keys from node. */ + /** Known i.e. stored in {@code ReadWriteMetastorage} keys from node (in compatible format). */ Map knownKeys; /** New keys i.e. keys for a local statically configured caches. */ @@ -1364,6 +1831,9 @@ public static class NodeEncryptionKeys implements Serializable { /** Master key digest. */ byte[] masterKeyDigest; + + /** Known i.e. stored in {@code ReadWriteMetastorage} keys from node. */ + Map> knownKeysWithIds; } /** */ @@ -1423,13 +1893,13 @@ public int keyCount() { } } - /** Master key change future. */ - private static class MasterKeyChangeFuture extends GridFutureAdapter { + /** Key change future. */ + protected static class KeyChangeFuture extends GridFutureAdapter { /** Request ID. */ private final UUID id; /** @param id Request ID. */ - private MasterKeyChangeFuture(UUID id) { + KeyChangeFuture(UUID id) { this.id = id; } @@ -1440,7 +1910,7 @@ public UUID id() { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(MasterKeyChangeFuture.class, this); + return S.toString(KeyChangeFuture.class, this); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKey.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKey.java new file mode 100644 index 00000000000000..5182a18b952a2e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKey.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Cache group encryption key with identifier. + */ +public class GroupKey { + /** Encryption key ID. */ + private final int id; + + /** Encryption key. */ + private final Serializable key; + + /** + * @param id Encryption key ID. + * @param key Encryption key. + */ + public GroupKey(int id, Serializable key) { + this.id = id; + this.key = key; + } + + /** + * @return Encryption key ID. + */ + public byte id() { + return (byte)id; + } + + /** + * @return Unsigned encryption key ID. + */ + public int unsignedId() { + return id & 0xff; + } + + /** + * @return Encryption key. + */ + public Serializable key() { + return key; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + GroupKey grpKey = (GroupKey)o; + + return id == grpKey.id && Objects.equals(key, grpKey.key); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(id, key); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return "GroupKey [id=" + id + ']'; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyChangeProcess.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyChangeProcess.java new file mode 100644 index 00000000000000..3e12351de27e62 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyChangeProcess.java @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteFeatures; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager.EmptyResult; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager.KeyChangeFuture; +import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; +import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; +import org.apache.ignite.internal.util.distributed.DistributedProcess; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.future.IgniteFinishedFutureImpl; +import org.apache.ignite.internal.util.future.IgniteFutureImpl; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteFuture; +import org.apache.ignite.lang.IgniteFutureCancelledException; + +import static org.apache.ignite.internal.IgniteFeatures.CACHE_GROUP_KEY_CHANGE; +import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.CACHE_GROUP_KEY_CHANGE_FINISH; +import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.CACHE_GROUP_KEY_CHANGE_PREPARE; + +/** + * A two-phase distributed process that rotates the encryption keys of specified cache groups and initiates + * re-encryption of those cache groups. + */ +class GroupKeyChangeProcess { + /** Grid kernal context. */ + private final GridKernalContext ctx; + + /** Cache group encyption key change prepare phase. */ + private final DistributedProcess prepareGKChangeProc; + + /** Cache group encyption key change perform phase. */ + private final DistributedProcess performGKChangeProc; + + /** Group encryption keys. */ + private final CacheGroupEncryptionKeys keys; + + /** Cache group key change future. */ + private volatile GroupKeyChangeFuture fut; + + /** Cache group key change request. */ + private volatile ChangeCacheEncryptionRequest req; + + /** + * @param ctx Grid kernal context. + * @param keys Cache group encryption keys. + */ + GroupKeyChangeProcess(GridKernalContext ctx, CacheGroupEncryptionKeys keys) { + this.ctx = ctx; + this.keys = keys; + + prepareGKChangeProc = + new DistributedProcess<>(ctx, CACHE_GROUP_KEY_CHANGE_PREPARE, this::prepare, this::finishPrepare); + performGKChangeProc = + new DistributedProcess<>(ctx, CACHE_GROUP_KEY_CHANGE_FINISH, this::perform, this::finishPerform); + } + + /** + * @return {@code True} if operation is still in progress. + */ + public boolean inProgress() { + return req != null; + } + + /** + * @param msg Error message. + */ + public void cancel(String msg) { + GridFutureAdapter keyChangeFut = fut; + + if (keyChangeFut != null && !keyChangeFut.isDone()) + keyChangeFut.onDone(new IgniteFutureCancelledException(msg)); + } + + /** + * Starts cache group encryption key change process. + * + * @param cacheOrGrpNames Cache or group names. + */ + public IgniteFuture start(Collection cacheOrGrpNames) { + if (ctx.clientNode()) + throw new UnsupportedOperationException("Client and daemon nodes can not perform this operation."); + + if (!IgniteFeatures.allNodesSupports(ctx.grid().cluster().nodes(), CACHE_GROUP_KEY_CHANGE)) + throw new IllegalStateException("Not all nodes in the cluster support this operation."); + + if (!ctx.state().clusterState().state().active()) + throw new IgniteException("Operation was rejected. The cluster is inactive."); + + IgniteInternalFuture fut0 = fut; + + if (fut0 != null && !fut0.isDone()) { + return new IgniteFinishedFutureImpl<>(new IgniteException("Cache group key change was rejected. " + + "The previous change was not completed.")); + } + + int[] grpIds = new int[cacheOrGrpNames.size()]; + byte[] keyIds = new byte[grpIds.length]; + + int n = 0; + + for (String cacheOrGroupName : cacheOrGrpNames) { + CacheGroupDescriptor grpDesc = ctx.cache().cacheGroupDescriptor(CU.cacheId(cacheOrGroupName)); + + if (grpDesc == null) { + DynamicCacheDescriptor cacheDesc = ctx.cache().cacheDescriptor(cacheOrGroupName); + + if (cacheDesc == null) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache or group \"" + cacheOrGroupName + "\" doesn't exists"); + } + + int grpId = cacheDesc.groupId(); + + grpDesc = ctx.cache().cacheGroupDescriptor(grpId); + + if (grpDesc.sharedGroup()) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache or group \"" + cacheOrGroupName + "\" is a part of group \"" + + grpDesc.groupName() + "\". Provide group name instead of cache name for shared groups."); + } + } + + if (!grpDesc.config().isEncryptionEnabled()) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache or group \"" + cacheOrGroupName + "\" is not encrypted."); + } + + if (ctx.encryption().reencryptionInProgress(grpDesc.groupId())) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache group reencryption is in progress [grp=" + cacheOrGroupName + "]"); + } + + grpIds[n] = grpDesc.groupId(); + keyIds[n] = (byte)(ctx.encryption().groupKey(grpDesc.groupId()).unsignedId() + 1); + + n += 1; + } + + T2, byte[]> keysAndDigest = ctx.encryption().createKeys(grpIds.length); + + ChangeCacheEncryptionRequest req = new ChangeCacheEncryptionRequest( + grpIds, + keysAndDigest.get1().toArray(new byte[grpIds.length][]), + keyIds, + keysAndDigest.get2() + ); + + fut = new GroupKeyChangeFuture(req); + + prepareGKChangeProc.start(req.requestId(), req); + + return new IgniteFutureImpl<>(fut); + } + + /** + * Validates existing keys. + * + * @param req Request. + * @return Result future. + */ + private IgniteInternalFuture prepare(ChangeCacheEncryptionRequest req) { + if (ctx.clientNode()) + return new GridFinishedFuture<>(); + + if (inProgress()) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected. " + + "The previous change was not completed.")); + } + + this.req = req; + + try { + for (int i = 0; i < req.groupIds().length; i++) { + int grpId = req.groupIds()[i]; + int keyId = req.keyIds()[i] & 0xff; + + if (ctx.encryption().reencryptionInProgress(grpId)) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected. " + + "Cache group reencryption is in progress [grpId=" + grpId + "]")); + } + + List keyIds = ctx.encryption().groupKeyIds(grpId); + + if (keyIds == null) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected." + + "Encrypted cache group not found [grpId=" + grpId + "]")); + } + + GroupKey currKey = ctx.encryption().groupKey(grpId); + + for (int locKeyId : keyIds) { + if (locKeyId != keyId) + continue; + + Long walSegment = keys.reservedSegment(grpId, keyId); + + // Can overwrite inactive key if it was added during prepare phase. + if (walSegment == null && currKey.id() != (byte)keyId) + continue; + + return new GridFinishedFuture<>( + new IgniteException("Cache group key change was rejected. Cannot add new key identifier, " + + "it's already present. There existing WAL segments that encrypted with this key [" + + "grpId=" + grpId + ", newId=" + keyId + ", currId=" + currKey.unsignedId() + + ", walSegment=" + walSegment + "].")); + } + } + + return ctx.encryption().withMasterKeyChangeReadLock(() -> { + if (!Arrays.equals(ctx.config().getEncryptionSpi().masterKeyDigest(), req.masterKeyDigest())) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected. " + + "Master key has been changed.")); + } + + for (int i = 0; i < req.groupIds().length; i++) { + // Save the new key as inactive, because the master key may change later + // and there will be no way to decrypt the received keys. + GroupKeyEncrypted grpKey = new GroupKeyEncrypted(req.keyIds()[i] & 0xff, req.keys()[i]); + + ctx.encryption().addGroupKey(req.groupIds()[i], grpKey); + } + + return new GridFinishedFuture<>(new EmptyResult()); + }); + + } + catch (Exception e) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected [nodeId=" + + ctx.localNodeId() + ']', e)); + } + } + + /** + * Starts group key change if there are no errors. + * + * @param id Request id. + * @param res Results. + * @param err Errors. + */ + private void finishPrepare(UUID id, Map res, Map err) { + if (!err.isEmpty()) { + if (req != null && req.requestId().equals(id)) + req = null; + + completeFuture(id, err, fut); + } + else if (U.isLocalNodeCoordinator(ctx.discovery())) + performGKChangeProc.start(id, req); + } + + /** + * Sets new encrpytion key as active (for writing) and starts background reencryption. + * + * @param req Request. + * @return Result future. + */ + private IgniteInternalFuture perform(ChangeCacheEncryptionRequest req) { + if (this.req == null || !this.req.equals(req)) + return new GridFinishedFuture<>(new IgniteException("Unknown cache group key change was rejected.")); + + try { + if (!ctx.state().clusterState().state().active()) + throw new IgniteException("Cache group key change was rejected. The cluster is inactive."); + + if (!ctx.clientNode()) + ctx.encryption().changeCacheGroupKeyLocal(req.groupIds(), req.keyIds(), req.keys()); + } catch (Exception e) { + return new GridFinishedFuture<>(e); + } finally { + this.req = null; + } + + return new GridFinishedFuture<>(new EmptyResult()); + } + + /** + * Finishes cache encryption key rotation. + * + * @param id Request id. + * @param res Results. + * @param err Errors. + */ + private void finishPerform(UUID id, Map res, Map err) { + completeFuture(id, err, fut); + } + + /** + * @param reqId Request id. + * @param err Exception. + * @param fut Key change future. + * @return {@code True} if future was completed by this call. + */ + private boolean completeFuture(UUID reqId, Map err, GroupKeyChangeFuture fut) { + boolean isInitiator = fut != null && fut.id().equals(reqId); + + if (!isInitiator || fut.isDone()) + return false; + + return !F.isEmpty(err) ? fut.onDone(F.firstValue(err)) : fut.onDone(); + } + + /** Cache group key change future. */ + private static class GroupKeyChangeFuture extends KeyChangeFuture { + /** Request. */ + private final ChangeCacheEncryptionRequest req; + + /** + * @param req Request. + */ + GroupKeyChangeFuture(ChangeCacheEncryptionRequest req) { + super(req.requestId()); + + this.req = req; + } + + /** @return Topology version. */ + public ChangeCacheEncryptionRequest request() { + return req; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(GroupKeyChangeFuture.class, this); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyEncrypted.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyEncrypted.java new file mode 100644 index 00000000000000..6b2ed0543038b2 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyEncrypted.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; + +/** + * Cache group encryption key with identifier. Key is encrypted. + */ +public class GroupKeyEncrypted implements Serializable { + /** Serial version UID. */ + private static final long serialVersionUID = 0L; + + /** Encryption key ID. */ + private final int id; + + /** Encryption key. */ + private final byte[] key; + + /** + * @param id Encryption key ID. + * @param key Encryption key. + */ + public GroupKeyEncrypted(int id, byte[] key) { + this.id = id; + this.key = key; + } + + /** + * @return Encryption key ID. + */ + public int id() { + return id; + } + + /** + * @return Encryption key. + */ + public byte[] key() { + return key; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ReencryptStateUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ReencryptStateUtils.java new file mode 100644 index 00000000000000..37292d99ceee21 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ReencryptStateUtils.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +/** */ +public class ReencryptStateUtils { + /** + * @param idx Index of the last reencrypted page. + * @param total Total pages to be reencrypted. + * @return Reencryption status. + */ + public static long state(int idx, int total) { + return ((long)idx) << Integer.SIZE | (total & 0xffffffffL); + } + + /** + * @param state Reencryption status. + * @return Index of the last reencrypted page. + */ + public static int pageIndex(long state) { + return (int)(state >> Integer.SIZE); + } + + /** + * @param state Reencryption status. + * @return Total pages to be reencrypted. + */ + public static int pageCount(long state) { + return (int)state; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java index d91d31da329571..f97ada7aa794a0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java @@ -19,16 +19,32 @@ import org.apache.ignite.IgniteCheckedException; +import static org.apache.ignite.internal.pagemem.PageIdUtils.pageId; + /** * Allocates page ID's. */ public interface PageIdAllocator { - /** */ + /** + * Flag for Data page. + * Also used by partition meta and tracking pages. + * This type doesn't use Page ID rotation mechanizm. + */ public static final byte FLAG_DATA = 1; - /** */ + /** + * Flag for index page. + * Also used by internal structure in inmemory caches. + * This type uses Page ID rotation mechanizm. + */ public static final byte FLAG_IDX = 2; + /** + * Flag for internal structure page. + * This type uses Page ID rotation mechanizm. + */ + public static final byte FLAG_AUX = 4; + /** Max partition ID that can be used by affinity. */ public static final int MAX_PARTITION_ID = 65500; @@ -41,6 +57,9 @@ public interface PageIdAllocator { /** Special partition reserved for metastore space. */ public static final int METASTORE_PARTITION = 0x1; + /** Cache group meta page id. */ + public static final long META_PAGE_ID = pageId(INDEX_PARTITION, FLAG_IDX, 0); + /** * Allocates a page from the space for the given partition ID and the given flags. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java index c48f4a899f7904..395586c8c508b0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java @@ -108,7 +108,7 @@ public static int pageIndex(long pageId) { * @return Page ID. */ public static long pageId(long link) { - return flag(link) == PageIdAllocator.FLAG_IDX ? link : link & PAGE_ID_MASK; + return flag(link) == PageIdAllocator.FLAG_DATA ? link & PAGE_ID_MASK : link; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java index 8216b96a707e10..da606a6809065f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java @@ -19,6 +19,7 @@ import java.nio.ByteBuffer; import java.util.Map; +import java.util.function.LongConsumer; import java.util.function.Predicate; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; @@ -27,13 +28,13 @@ import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.GridCacheSharedManager; import org.apache.ignite.internal.processors.cache.StoredCacheData; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; /** * */ -public interface IgnitePageStoreManager extends GridCacheSharedManager, IgniteChangeGlobalStateSupport { +public interface IgnitePageStoreManager extends GridCacheSharedManager, IgniteChangeGlobalStateSupport, PageReadWriteManager { /** * Invoked before starting checkpoint recover. */ @@ -53,7 +54,7 @@ public interface IgnitePageStoreManager extends GridCacheSharedManager, IgniteCh * @param tracker Allocation tracker. * @throws IgniteCheckedException If failed. */ - void initialize(int cacheId, int partitions, String workingDir, LongAdderMetric tracker) + void initialize(int cacheId, int partitions, String workingDir, LongConsumer tracker) throws IgniteCheckedException; /** @@ -101,16 +102,6 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac */ public void onPartitionDestroyed(int grpId, int partId, int tag) throws IgniteCheckedException; - /** - * Reads a page for the given cache ID. Cache ID may be {@code 0} if the page is a meta page. - * - * @param grpId Cache group ID. - * @param pageId PageID to read. - * @param pageBuf Page buffer to write to. - * @throws IgniteCheckedException If failed to read the page. - */ - public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException; - /** * Checks if partition store exists. * @@ -139,7 +130,7 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac * @param pageBuf Page buffer to write. * @throws IgniteCheckedException If failed to write page. */ - public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException; + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException; /** * Gets page offset within the page store file. @@ -171,12 +162,12 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac * Allocates a page for the given page space. * * @param grpId Cache group ID. - * @param partId Partition ID. Used only if {@code flags} is equal to {@link PageMemory#FLAG_DATA}. + * @param partId Partition ID. Used only if {@code flags} is not equal to {@link PageMemory#FLAG_IDX}. * @param flags Page allocation flags. * @return Allocated page ID. * @throws IgniteCheckedException If IO exception occurred while allocating a page ID. */ - public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException; + @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException; /** * Gets total number of allocated pages for the given space. @@ -188,14 +179,6 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac */ public int pages(int grpId, int partId) throws IgniteCheckedException; - /** - * Gets meta page ID for specified cache. - * - * @param grpId Cache group ID. - * @return Meta page ID. - */ - public long metaPageId(int grpId); - /** * @return Saved cache configurations. * @throws IgniteCheckedException If failed. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java index 1d9e5014b9e566..528c682e69b47e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java @@ -26,6 +26,12 @@ * Persistent store of pages. */ public interface PageStore extends Closeable { + /** Type for regular affinity partitions. */ + public static byte TYPE_DATA = 1; + + /** Type for index partition. */ + public static byte TYPE_IDX = 2; + /** * @param lsnr Page write listener to set. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStoreCollection.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStoreCollection.java new file mode 100644 index 00000000000000..8caf0409fab3f2 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStoreCollection.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.store; + +import java.util.Collection; +import org.apache.ignite.IgniteCheckedException; + +/** + * A collection that contains {@link PageStore} elements. + */ +public interface PageStoreCollection { + /** + * @param grpId Cache group ID. + * @param partId Partition ID. + * @return Page store for the corresponding parameters. + * @throws IgniteCheckedException If cache or partition with the given ID was not created. + */ + public PageStore getStore(int grpId, int partId) throws IgniteCheckedException; + + /** + * @param grpId Cache group ID. + * @return Collection of related page stores. + * @throws IgniteCheckedException If failed. + */ + public Collection getStores(int grpId) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java index 40e70b37acbd82..cb4fc306cdb49a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java @@ -166,6 +166,11 @@ public WALIterator replay( */ public void notchLastCheckpointPtr(WALPointer ptr); + /** + * @return Current segment index. + */ + public long currentSegment(); + /** * @return Total number of segments in the WAL archive. */ @@ -209,4 +214,19 @@ public WALIterator replay( * @param grpId Group id. */ public boolean disabled(int grpId); + + /** + * Getting local WAL segment size. + * + * @param idx Absolute segment index. + * @return Segment size, {@code 0} if size is unknown. + */ + long segmentSize(long idx); + + /** + * Get last written pointer. + * + * @return Last written pointer. + */ + WALPointer lastWritePointer(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java index 583ff733d41262..dba07b8cbb23e5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java @@ -24,7 +24,10 @@ /** * Logical record that stores encryption keys. Written to the WAL on the master key change. + * + * @deprecated Replaced by MasterKeyChangeRecordV2. */ +@Deprecated public class MasterKeyChangeRecord extends WALRecord { /** Master key name. */ private final String masterKeyName; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecordV2.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecordV2.java new file mode 100644 index 00000000000000..10145d822118d2 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecordV2.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record; + +import java.util.List; +import org.apache.ignite.internal.managers.encryption.GroupKeyEncrypted; +import org.apache.ignite.internal.util.typedef.T2; + +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; + +/** + * Logical record that stores encryption keys. Written to the WAL on the master key change. + */ +public class MasterKeyChangeRecordV2 extends WALRecord { + /** Master key name. */ + private final String masterKeyName; + + /** Group keys encrypted by the master key. */ + private final List> grpKeys; + + /** + * @param masterKeyName Master key name. + * @param grpKeys Encrypted group keys. + */ + public MasterKeyChangeRecordV2(String masterKeyName, List> grpKeys) { + this.masterKeyName = masterKeyName; + this.grpKeys = grpKeys; + } + + /** @return Master key name. */ + public String getMasterKeyName() { + return masterKeyName; + } + + /** @return Encrypted group keys. */ + public List> getGrpKeys() { + return grpKeys; + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return MASTER_KEY_CHANGE_RECORD_V2; + } + + /** @return Record data size. */ + public int dataSize() { + int size = /*Master key name length*/4 + masterKeyName.getBytes().length + /*list size*/4; + + for (T2 entry : grpKeys) + size += /*grpId*/4 + /*grp key size*/4 + /*grp key id size*/1 + entry.get2().key().length; + + return size; + } +} + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/ReencryptionStartRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/ReencryptionStartRecord.java new file mode 100644 index 00000000000000..c8b08d2f2b089a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/ReencryptionStartRecord.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record; + +import java.util.Map; + +/** + * Logical record to restart reencryption with the latest encryption key. + */ +public class ReencryptionStartRecord extends WALRecord { + /** Map of reencrypted cache groups with encryption key identifiers. */ + private final Map grps; + + /** + * @param grps Map of reencrypted cache groups with encryption key identifiers. + */ + public ReencryptionStartRecord(Map grps) { + this.grps = grps; + } + + /** + * @return Map of reencrypted cache groups with encryption key identifiers. + */ + public Map groups() { + return grps; + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.REENCRYPTION_START_RECORD; + } + + /** @return Record data size. */ + public int dataSize() { + return 4 + ((/*grpId*/4 + /*keyId*/1) * grps.size()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java index 8a01467c3eb2b6..f07b71a82fc897 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java @@ -234,7 +234,25 @@ public enum RecordType { TRACKING_PAGE_REPAIR_DELTA(61, PHYSICAL), /** Atomic out-of-order update. */ - OUT_OF_ORDER_UPDATE(62, LOGICAL); + OUT_OF_ORDER_UPDATE(62, LOGICAL), + + /** Encrypted WAL-record. */ + ENCRYPTED_RECORD_V2(63, PHYSICAL), + + /** Ecnrypted data record. */ + ENCRYPTED_DATA_RECORD_V2(64, LOGICAL), + + /** Master key change record containing multiple keys for single cache group. */ + MASTER_KEY_CHANGE_RECORD_V2(65, LOGICAL), + + /** Logical record to restart reencryption with the latest encryption key. */ + REENCRYPTION_START_RECORD(66, LOGICAL), + + /** Partition meta page delta record includes encryption status data. */ + PARTITION_META_PAGE_DELTA_RECORD_V3(67, PHYSICAL), + + /** Index meta page delta record includes encryption status data. */ + INDEX_META_PAGE_DELTA_RECORD(68, PHYSICAL); /** Index for serialization. Should be consistent throughout all versions. */ private final int idx; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateIndexDataRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateIndexDataRecord.java new file mode 100644 index 00000000000000..e2f83c0ef3d390 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateIndexDataRecord.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIOV2; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Meta page delta record, includes encryption status data. + */ +public class MetaPageUpdateIndexDataRecord extends PageDeltaRecord { + /** Index of the last reencrypted page. */ + private int encryptPageIdx; + + /** Total pages to be reencrypted. */ + private int encryptPageCnt; + + /** + * @param grpId Cache group ID. + * @param pageId Page ID. + * @param encryptPageIdx Index of the last reencrypted page. + * @param encryptPageCnt Total pages to be reencrypted. + */ + public MetaPageUpdateIndexDataRecord(int grpId, long pageId, int encryptPageIdx, int encryptPageCnt) { + super(grpId, pageId); + + this.encryptPageIdx = encryptPageIdx; + this.encryptPageCnt = encryptPageCnt; + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + if (PageIO.getVersion(pageAddr) < 2) + ((PageMetaIOV2)PageMetaIOV2.VERSIONS.latest()).upgradePage(pageAddr); + + PageMetaIOV2 io = (PageMetaIOV2)PageMetaIOV2.VERSIONS.forPage(pageAddr); + + io.setEncryptedPageIndex(pageAddr, encryptPageIdx); + io.setEncryptedPageCount(pageAddr, encryptPageCnt); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.INDEX_META_PAGE_DELTA_RECORD; + } + + /** + * @param in Input. + */ + public MetaPageUpdateIndexDataRecord(DataInput in) throws IOException { + super(in.readInt(), in.readLong()); + + encryptPageIdx = in.readInt(); + encryptPageCnt = in.readInt(); + } + + /** + * @param buf Buffer. + */ + public void toBytes(ByteBuffer buf) { + buf.putInt(groupId()); + buf.putLong(pageId()); + + buf.putInt(encryptionPagesIndex()); + buf.putInt(encryptionPagesCount()); + } + + /** + * @return Index of the last reencrypted page. + */ + private int encryptionPagesIndex() { + return encryptPageIdx; + } + + /** + * @return Total pages to be reencrypted. + */ + private int encryptionPagesCount() { + return encryptPageCnt; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageUpdateIndexDataRecord.class, this, "partId", + PageIdUtils.partId(pageId()), "super", super.toString()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV3.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV3.java new file mode 100644 index 00000000000000..c7a71570560bf0 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV3.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV3; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Partition meta page delta record includes encryption status data. + */ +public class MetaPageUpdatePartitionDataRecordV3 extends MetaPageUpdatePartitionDataRecordV2 { + /** Index of the last reencrypted page. */ + private int encryptedPageIdx; + + /** Total pages to be reencrypted. */ + private int encryptedPageCnt; + + /** + * @param grpId Group id. + * @param pageId Page id. + * @param updateCntr Update counter. + * @param globalRmvId Global remove id. + * @param partSize Partition size. + * @param cntrsPageId Cntrs page id. + * @param state State. + * @param allocatedIdxCandidate Allocated index candidate. + * @param link Link. + * @param encryptedPageIdx Index of the last reencrypted page. + * @param encryptedPageCnt Total pages to be reencrypted. + */ + public MetaPageUpdatePartitionDataRecordV3( + int grpId, + long pageId, + long updateCntr, + long globalRmvId, + int partSize, + long cntrsPageId, + byte state, + int allocatedIdxCandidate, + long link, + int encryptedPageIdx, + int encryptedPageCnt) { + super(grpId, pageId, updateCntr, globalRmvId, partSize, cntrsPageId, state, allocatedIdxCandidate, link); + + this.encryptedPageIdx = encryptedPageIdx; + this.encryptedPageCnt = encryptedPageCnt; + } + + /** + * @param in Input. + */ + public MetaPageUpdatePartitionDataRecordV3(DataInput in) throws IOException { + super(in); + + encryptedPageIdx = in.readInt(); + encryptedPageCnt = in.readInt(); + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + super.applyDelta(pageMem, pageAddr); + + PagePartitionMetaIOV3 io = (PagePartitionMetaIOV3)PagePartitionMetaIO.VERSIONS.forPage(pageAddr); + + io.setEncryptedPageIndex(pageAddr, encryptedPageIdx); + io.setEncryptedPageCount(pageAddr, encryptedPageCnt); + } + + /** + * @return Index of the last reencrypted page. + */ + public int encryptedPageIndex() { + return encryptedPageIdx; + } + + /** + * @return Total pages to be reencrypted. + */ + public int encryptedPageCount() { + return encryptedPageCnt; + } + + /** {@inheritDoc} */ + @Override public void toBytes(ByteBuffer buf) { + super.toBytes(buf); + + buf.putInt(encryptedPageIndex()); + buf.putInt(encryptedPageCount()); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.PARTITION_META_PAGE_DELTA_RECORD_V3; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageUpdatePartitionDataRecordV3.class, this, "partId", + PageIdUtils.partId(pageId()), "super", super.toString()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index 05fa72ef0c35a8..66aa98d1cadfcd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -46,7 +46,6 @@ import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.Event; import org.apache.ignite.internal.IgniteInternalFuture; -import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException; import org.apache.ignite.internal.events.DiscoveryCustomEvent; import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; @@ -54,7 +53,6 @@ import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache; -import org.apache.ignite.internal.processors.cache.distributed.dht.ClientCacheDhtTopologyFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAssignmentFetchFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CacheGroupAffinityMessage; @@ -581,30 +579,32 @@ else if (!crd && !fetchFuts.containsKey(grp.groupId())) { fetchFut); GridDhtPartitionFullMap partMap; - ClientCacheDhtTopologyFuture topFut; if (res != null) { partMap = res.partitionMap(); assert partMap != null : res; - - topFut = new ClientCacheDhtTopologyFuture(topVer); } - else { + else partMap = new GridDhtPartitionFullMap(cctx.localNodeId(), cctx.localNode().order(), 1); - topFut = new ClientCacheDhtTopologyFuture(topVer, - new ClusterTopologyServerNotFoundException("All server nodes left grid.")); - } + GridDhtPartitionsExchangeFuture exchFut = context().exchange().lastFinishedFuture(); - grp.topology().updateTopologyVersion(topFut, + grp.topology().updateTopologyVersion(exchFut, discoCache, -1, false); - grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null, null); + GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId()); + + Set lostParts = clientTop == null ? null : clientTop.lostPartitions(); + + grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null, lostParts); + + if (clientTop == null) + grp.topology().detectLostPartitions(topVer, exchFut); - topFut.validate(grp, discoCache.allNodes()); + exchFut.validate(grp); } catch (IgniteCheckedException e) { cctx.cache().closeCaches(startedCaches, false); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java index a3900c3bfa3493..3bf2b8bbb48b80 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java @@ -25,6 +25,7 @@ import org.apache.ignite.configuration.DiskPageCompression; import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.compress.CompressionProcessor; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import static org.apache.ignite.internal.processors.compress.CompressionProcessor.checkCompressionLevelBounds; @@ -45,7 +46,9 @@ public class CacheCompressionManager extends GridCacheManagerAdapter { /** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { - if (cctx.kernalContext().clientNode()) { + CacheConfiguration cfg = cctx.config(); + + if (cctx.kernalContext().clientNode() || !CU.isPersistentCache(cfg, cctx.gridConfig().getDataStorageConfiguration())) { diskPageCompression = DiskPageCompression.DISABLED; return; @@ -53,8 +56,6 @@ public class CacheCompressionManager extends GridCacheManagerAdapter { compressProc = cctx.kernalContext().compress(); - CacheConfiguration cfg = cctx.config(); - diskPageCompression = cctx.kernalContext().config().isClientMode() ? null : cfg.getDiskPageCompression(); if (diskPageCompression != DiskPageCompression.DISABLED) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java index 3621d0b4273b28..19944576c6615e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java @@ -200,7 +200,7 @@ public class CacheGroupContext { * @param persistenceEnabled Persistence enabled flag. * @param walEnabled Wal enabled flag. */ - CacheGroupContext( + public CacheGroupContext( GridCacheSharedContext ctx, int grpId, UUID rcvdFrom, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java index 15a989decd166a..b7c0300b8defbf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java @@ -175,6 +175,16 @@ public void onTopologyInitialized() { mreg.register("TotalAllocatedSize", this::getTotalAllocatedSize, "Total size of memory allocated for group, in bytes."); + + if (ctx.config().isEncryptionEnabled()) { + mreg.register("ReencryptionFinished", + () -> !ctx.shared().kernalContext().encryption().reencryptionInProgress(ctx.groupId()), + "The flag indicates whether reencryption is finished or not."); + + mreg.register("ReencryptionBytesLeft", + () -> ctx.shared().kernalContext().encryption().getBytesLeftForReencryption(ctx.groupId()), + "The number of bytes left for re-ecryption."); + } } /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java index 1a1645fd0b67d6..45adbfebf75988 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java @@ -21,13 +21,17 @@ import org.apache.ignite.cache.CacheInterceptorEntry; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; /** * */ public class CacheLazyEntry extends CacheInterceptorEntry { /** Cache context. */ - protected GridCacheContext cctx; + protected final GridCacheContext cctx; + + /** Keep binary flag. */ + private final boolean keepBinary; /** Key cache object. */ protected KeyCacheObject keyObj; @@ -43,9 +47,6 @@ public class CacheLazyEntry extends CacheInterceptorEntry { @GridToStringInclude(sensitive = true) protected V val; - /** Keep binary flag. */ - private boolean keepBinary; - /** Update counter. */ private Long updateCntr; @@ -56,10 +57,13 @@ public class CacheLazyEntry extends CacheInterceptorEntry { * @param keepBinary Keep binary flag. */ public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, CacheObject valObj, boolean keepBinary) { - this.cctx = cctx; - this.keyObj = keyObj; - this.valObj = valObj; - this.keepBinary = keepBinary; + this(cctx, + keyObj, + null, + valObj, + null, + keepBinary, + null); } /** @@ -69,10 +73,13 @@ public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, CacheObject * @param cctx Cache context. */ public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, V val, boolean keepBinary) { - this.cctx = cctx; - this.keyObj = keyObj; - this.val = val; - this.keepBinary = keepBinary; + this(cctx, + keyObj, + null, + null, + val, + keepBinary, + null); } /** @@ -81,7 +88,6 @@ public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, V val, boole * @param key Key value. * @param valObj Cache object * @param keepBinary Keep binary flag. - * @param updateCntr Partition update counter. * @param val Cache value. */ public CacheLazyEntry(GridCacheContext ctx, @@ -89,16 +95,15 @@ public CacheLazyEntry(GridCacheContext ctx, K key, CacheObject valObj, V val, - boolean keepBinary, - Long updateCntr + boolean keepBinary ) { - this.cctx = ctx; - this.keyObj = keyObj; - this.key = key; - this.valObj = valObj; - this.val = val; - this.keepBinary = keepBinary; - this.updateCntr = updateCntr; + this(ctx, + keyObj, + key, + valObj, + val, + keepBinary, + null); } /** @@ -107,6 +112,7 @@ public CacheLazyEntry(GridCacheContext ctx, * @param key Key value. * @param valObj Cache object * @param keepBinary Keep binary flag. + * @param updateCntr Partition update counter. * @param val Cache value. */ public CacheLazyEntry(GridCacheContext ctx, @@ -114,7 +120,8 @@ public CacheLazyEntry(GridCacheContext ctx, K key, CacheObject valObj, V val, - boolean keepBinary + boolean keepBinary, + Long updateCntr ) { this.cctx = ctx; this.keyObj = keyObj; @@ -122,12 +129,13 @@ public CacheLazyEntry(GridCacheContext ctx, this.valObj = valObj; this.val = val; this.keepBinary = keepBinary; + this.updateCntr = updateCntr; } /** {@inheritDoc} */ @Override public K getKey() { if (key == null) - key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary); + key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), U.contextDeploymentClassLoaderId(cctx.kernalContext()))); return key; } @@ -145,7 +153,7 @@ public CacheLazyEntry(GridCacheContext ctx, */ public V getValue(boolean keepBinary) { if (val == null) - val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true); + val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true, U.deploymentClassLoader(cctx.kernalContext(), U.contextDeploymentClassLoaderId(cctx.kernalContext()))); return val; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java index f9f384a7f97027..0f21c77c43852c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java @@ -45,6 +45,16 @@ public interface CacheObject extends Message { */ @Nullable public T value(CacheObjectValueContext ctx, boolean cpy); + /** + * Deserializes a value from an internal representation. + * + * @param ctx Context. + * @param cpy If {@code true} need to copy value. + * @param ldr Class loader, if it is {@code null}, default class loader will be used. + * @return Value. + */ + @Nullable public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr); + /** * @param ctx Context. * @return Value bytes. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java index de5a9191950c0e..5c033b64212089 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java @@ -63,6 +63,11 @@ public CacheObjectByteArrayImpl(byte[] val) { /** {@inheritDoc} */ @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy) { + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { if (cpy) return (T)Arrays.copyOf(val, val.length); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java index d121a5ecc3b992..01f751b426e164 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java @@ -19,6 +19,7 @@ import org.apache.ignite.cache.affinity.AffinityKeyMapper; import org.apache.ignite.internal.GridKernalContext; +import org.jetbrains.annotations.Nullable; /** * @@ -127,12 +128,13 @@ public boolean customAffinityMapper() { * @param o Object to unwrap. * @param keepBinary Keep binary flag. * @param cpy Copy value flag. + * @param ldr Class loader, used for deserialization from binary representation. * @return Unwrapped object. */ - public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy) { + public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy, @Nullable ClassLoader ldr) { if (o == null) return null; - return CacheObjectUtils.unwrapBinaryIfNeeded(this, o, keepBinary, cpy); + return CacheObjectUtils.unwrapBinaryIfNeeded(this, o, keepBinary, cpy, ldr); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java index 6ca700b6efffce..46ec782ee9c48d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java @@ -54,7 +54,12 @@ public CacheObjectImpl(Object val, byte[] valBytes) { } /** {@inheritDoc} */ - @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy) { + @Override public @Nullable T value(CacheObjectValueContext ctx, boolean cpy) { + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { cpy = cpy && needCopy(ctx); try { @@ -69,16 +74,14 @@ public CacheObjectImpl(Object val, byte[] valBytes) { valBytes = proc.marshal(ctx, val); } - ClassLoader clsLdr; - - if (val != null) - clsLdr = val.getClass().getClassLoader(); - else if (kernalCtx.config().isPeerClassLoadingEnabled()) - clsLdr = kernalCtx.cache().context().deploy().globalLoader(); - else - clsLdr = null; + if (ldr == null) { + if (val != null) + ldr = val.getClass().getClassLoader(); + else if (kernalCtx.config().isPeerClassLoadingEnabled()) + ldr = kernalCtx.cache().context().deploy().globalLoader(); + } - return (T)proc.unmarshal(ctx, valBytes, clsLdr); + return (T)proc.unmarshal(ctx, valBytes, ldr); } if (val != null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java index ed2d32491f0e7b..ed278b1e0dd93c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java @@ -24,6 +24,7 @@ import org.apache.ignite.internal.util.MutableSingletonList; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; /** * Cache object utility methods. @@ -36,16 +37,24 @@ public class CacheObjectUtils { * @return Unwrapped object. */ public static Object unwrapBinaryIfNeeded(CacheObjectValueContext ctx, CacheObject o, boolean keepBinary, boolean cpy) { - return unwrapBinary(ctx, o, keepBinary, cpy); + return unwrapBinary(ctx, o, keepBinary, cpy, null); } /** + * @param ctx Cache object context. * @param o Object to unwrap. * @param keepBinary Keep binary flag. * @param cpy Copy value flag. + * @param ldr Class loader, used for deserialization from binary representation. * @return Unwrapped object. */ - public static Object unwrapBinaryIfNeeded(CacheObjectValueContext ctx, Object o, boolean keepBinary, boolean cpy) { + public static Object unwrapBinaryIfNeeded( + CacheObjectValueContext ctx, + Object o, + boolean keepBinary, + boolean cpy, + @Nullable ClassLoader ldr + ) { if (o == null) return null; @@ -55,16 +64,16 @@ public static Object unwrapBinaryIfNeeded(CacheObjectValueContext ctx, Object o, Object key = entry.getKey(); - Object uKey = unwrapBinary(ctx, key, keepBinary, cpy); + Object uKey = unwrapBinary(ctx, key, keepBinary, cpy, ldr); Object val = entry.getValue(); - Object uVal = unwrapBinary(ctx, val, keepBinary, cpy); + Object uVal = unwrapBinary(ctx, val, keepBinary, cpy, ldr); return (key != uKey || val != uVal) ? F.t(uKey, uVal) : o; } - return unwrapBinary(ctx, o, keepBinary, cpy); + return unwrapBinary(ctx, o, keepBinary, cpy, ldr); } /** @@ -90,7 +99,7 @@ private static Collection unwrapKnownCollection(CacheObjectValueContext assert col0 != null; for (Object obj : col) - col0.add(unwrapBinary(ctx, obj, keepBinary, cpy)); + col0.add(unwrapBinary(ctx, obj, keepBinary, cpy, null)); return (col0 instanceof MutableSingletonList) ? U.convertToSingletonList(col0) : col0; } @@ -112,8 +121,8 @@ private static Map unwrapBinariesIfNeeded(CacheObjectValueContex for (Map.Entry e : map.entrySet()) // TODO why don't we use keepBinary parameter here? map0.put( - unwrapBinary(ctx, e.getKey(), false, cpy), - unwrapBinary(ctx, e.getValue(), false, cpy)); + unwrapBinary(ctx, e.getKey(), false, cpy, null), + unwrapBinary(ctx, e.getValue(), false, cpy, null)); return map0; } @@ -132,7 +141,7 @@ private static Collection unwrapBinariesIfNeeded(CacheObjectValueContext col0 = new ArrayList<>(col.size()); for (Object obj : col) - col0.add(unwrapBinaryIfNeeded(ctx, obj, keepBinary, cpy)); + col0.add(unwrapBinaryIfNeeded(ctx, obj, keepBinary, cpy, null)); return col0; } @@ -153,16 +162,28 @@ private static Object[] unwrapBinariesInArrayIfNeeded(CacheObjectValueContext ct Object[] res = new Object[arr.length]; for (int i = 0; i < arr.length; i++) - res[i] = unwrapBinary(ctx, arr[i], keepBinary, cpy); + res[i] = unwrapBinary(ctx, arr[i], keepBinary, cpy, null); return res; } /** + * Unwraps an object for end user. + * + * @param ctx Cache object context. * @param o Object to unwrap. + * @param keepBinary False when need to deserialize object from a binary one, true otherwise. + * @param cpy True means the object will be copied before return, false otherwise. + * @param ldr Class loader, used for deserialization from binary representation. * @return Unwrapped object. */ - private static Object unwrapBinary(CacheObjectValueContext ctx, Object o, boolean keepBinary, boolean cpy) { + private static Object unwrapBinary( + CacheObjectValueContext ctx, + Object o, + boolean keepBinary, + boolean cpy, + @Nullable ClassLoader ldr + ) { if (o == null) return o; @@ -173,7 +194,7 @@ private static Object unwrapBinary(CacheObjectValueContext ctx, Object o, boolea return o; // It may be a collection of binaries - o = co.value(ctx, cpy); + o = co.value(ctx, cpy, ldr); } if (BinaryUtils.knownCollection(o)) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 88a9fde320f289..96ca0072e35857 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -1864,9 +1864,10 @@ else if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) /** * @param data Joining node data. + * @param joiningNodeClient Joining node is client flag. * @return Message with error or null if everything was OK. */ - public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData data) { + public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData data, boolean joiningNodeClient) { if (data.hasJoiningNodeData()) { Serializable joiningNodeData = data.joiningNodeData(); @@ -1874,6 +1875,7 @@ public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData CacheJoinNodeDiscoveryData joinData = (CacheJoinNodeDiscoveryData)joiningNodeData; Set problemCaches = null; + Set encClientCaches = null; for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.caches().values()) { CacheConfiguration cfg = cacheInfo.cacheData().config(); @@ -1895,6 +1897,12 @@ public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData problemCaches.add(cfg.getName()); } + else if (joiningNodeClient && cfg.isEncryptionEnabled()) { + if (encClientCaches == null) + encClientCaches = new HashSet<>(); + + encClientCaches.add(cfg.getName()); + } } } @@ -1903,6 +1911,14 @@ public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData "Joining node has caches with data which are not presented on cluster, " + "it could mean that they were already destroyed, to add the node to cluster - " + "remove directories with the caches[", "]")); + + if (!F.isEmpty(encClientCaches)) { + return encClientCaches.stream().collect(Collectors.joining(", ", + "Joining node has encrypted caches which are not presented on the cluster, " + + "encrypted caches configured on client node cannot be started when such node joins " + + "the cluster, these caches can be started manually (dynamically) after node joined" + + "[caches=", "]")); + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java index fa956bc9fc00a2..a6fcee81de904f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java @@ -952,7 +952,7 @@ else if (modes.heap) { } } - Object val = ctx.unwrapBinaryIfNeeded(cacheVal, ctx.keepBinary(), false); + Object val = ctx.unwrapBinaryIfNeeded(cacheVal, ctx.keepBinary(), false, null); return (V)val; } @@ -1479,7 +1479,7 @@ private boolean evictx(K key, GridCacheVersion ver, V val = repairableGet(key, !keepBinary, false); if (ctx.config().getInterceptor() != null) { - key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false) : key; + key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false, null) : key; val = (V)ctx.config().getInterceptor().onGet(key, val); } @@ -1507,13 +1507,13 @@ private boolean evictx(K key, GridCacheVersion ver, = (EntryGetResult)repairableGet(key, !keepBinary, true); CacheEntry val = t != null ? new CacheEntryImplEx<>( - keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false) : key, + keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false, null) : key, (V)t.value(), t.version()) : null; if (ctx.config().getInterceptor() != null) { - key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false) : key; + key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false, null) : key; V val0 = (V)ctx.config().getInterceptor().onGet(key, t != null ? val.getValue() : null); @@ -1549,7 +1549,7 @@ private boolean evictx(K key, GridCacheVersion ver, if (ctx.config().getInterceptor() != null) fut = fut.chain(new CX1, V>() { @Override public V applyx(IgniteInternalFuture f) throws IgniteCheckedException { - K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false) : key0; + K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false, null) : key0; return (V)ctx.config().getInterceptor().onGet(key, f.get()); } @@ -1590,7 +1590,7 @@ private boolean evictx(K key, GridCacheVersion ver, throws IgniteCheckedException { EntryGetResult t = f.get(); - K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false) : key0; + K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false, null) : key0; CacheEntry val = t != null ? new CacheEntryImplEx<>( key, @@ -3173,7 +3173,7 @@ protected V getAndRemove0(final K key) throws IgniteCheckedException { V ret = fut.get().value(); if (ctx.config().getInterceptor() != null) { - K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false) : key0; + K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false, null) : key0; return (V)ctx.config().getInterceptor().onBeforeRemove(new CacheEntryImpl(key, ret)).get2(); } @@ -5214,6 +5214,9 @@ protected final void validateCacheKeys(Iterable keys) { * @param m Map to examine. */ protected void warnIfUnordered(Map m, BulkOperation op) { + if (ctx.atomic()) + return; + if (m == null || m.size() <= 1) return; @@ -5238,6 +5241,9 @@ protected void warnIfUnordered(Map m, BulkOperation op) { * @param coll Collection to examine. */ protected void warnIfUnordered(Collection coll, BulkOperation op) { + if (ctx.atomic()) + return; + if (coll == null || coll.size() <= 1) return; @@ -5374,8 +5380,8 @@ private void advance() { KeyCacheObject key = entry.key(); - Object key0 = ctx.unwrapBinaryIfNeeded(key, !deserializeBinary, true); - Object val0 = ctx.unwrapBinaryIfNeeded(val, !deserializeBinary, true); + Object key0 = ctx.unwrapBinaryIfNeeded(key, !deserializeBinary, true, null); + Object val0 = ctx.unwrapBinaryIfNeeded(val, !deserializeBinary, true, null); return new CacheEntryImpl<>((K)key0, (V)val0, entry.version()); } @@ -7237,7 +7243,7 @@ private KeySetIterator(Iterator internalIterator, boolean kee @Override public K next() { current = internalIterator.next(); - return (K)ctx.unwrapBinaryIfNeeded(current.key(), keepBinary, true); + return (K)ctx.unwrapBinaryIfNeeded(current.key(), keepBinary, true, null); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java index a8def49124f45b..613a6be330a4b3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java @@ -1805,10 +1805,11 @@ public Collection unwrapBinariesIfNeeded(Collection col, boolean * * @param o Object to unwrap. * @param keepBinary Keep binary flag. + * @param ldr Class loader, used for deserialization from binary representation. * @return Unwrapped object. */ - public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary) { - return unwrapBinaryIfNeeded(o, keepBinary, true); + public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, @Nullable ClassLoader ldr) { + return unwrapBinaryIfNeeded(o, keepBinary, true, ldr); } /** @@ -1817,10 +1818,11 @@ public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary) { * @param o Object to unwrap. * @param keepBinary Keep binary flag. * @param cpy Copy value flag. + * @param ldr Class loader, used for deserialization from binary representation. * @return Unwrapped object. */ - public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy) { - return cacheObjCtx.unwrapBinaryIfNeeded(o, keepBinary, cpy); + public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy, @Nullable ClassLoader ldr) { + return cacheObjCtx.unwrapBinaryIfNeeded(o, keepBinary, cpy, ldr); } /** @@ -1836,7 +1838,7 @@ public Map unwrapInvokeResult(@Nullable Map resMap if (invokeRes.result() != null) res = CacheInvokeResult.fromResult(unwrapBinaryIfNeeded(invokeRes.result(), - keepBinary, false)); + keepBinary, false, null)); } return res; @@ -1924,6 +1926,7 @@ public void validateKeyAndValue(KeyCacheObject key, CacheObject val) throws Igni * @param deserializeBinary Deserialize binary flag. * @param cpy Copy flag. * @param ver GridCacheVersion. + * @param ldr Class loader, used for deserialization from binary representation. */ public void addResult(Map map, KeyCacheObject key, @@ -1934,10 +1937,11 @@ public void addResult(Map map, boolean cpy, final GridCacheVersion ver, final long expireTime, - final long ttl) { + final long ttl, + @Nullable ClassLoader ldr) { // Creates EntryGetResult addResult(map, key, val, skipVals, keepCacheObjects, deserializeBinary, cpy, null, - ver, expireTime, ttl, ver != null); + ver, expireTime, ttl, ver != null, ldr); } /** @@ -1960,7 +1964,7 @@ public void addResult(Map map, boolean needVer) { // Uses getRes as result. addResult(map, key, getRes.value(), skipVals, keepCacheObjects, deserializeBinary, cpy, getRes, - null, 0, 0, needVer); + null, 0, 0, needVer, null); } /** @@ -1976,6 +1980,7 @@ public void addResult(Map map, * @param expireTime Entry expire time. * @param ttl Entry TTL. * @param needVer Need version flag. + * @param ldr Class loader, used for deserialization from binary representation. */ public void addResult(Map map, KeyCacheObject key, @@ -1988,14 +1993,15 @@ public void addResult(Map map, final GridCacheVersion ver, final long expireTime, final long ttl, - boolean needVer) { + boolean needVer, + @Nullable ClassLoader ldr) { assert key != null; assert val != null || skipVals; if (!keepCacheObjects) { - Object key0 = unwrapBinaryIfNeeded(key, !deserializeBinary, cpy); + Object key0 = unwrapBinaryIfNeeded(key, !deserializeBinary, cpy, ldr); - Object val0 = skipVals ? true : unwrapBinaryIfNeeded(val, !deserializeBinary, cpy); + Object val0 = skipVals ? true : unwrapBinaryIfNeeded(val, !deserializeBinary, cpy, ldr); assert key0 != null : key; assert val0 != null : val; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java index d1b5506a8dd19d..6a5f6c6aad1bfd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java @@ -92,6 +92,15 @@ public String name() { return config.getName(); } + /** + * Returns name of cache group. + * + * @return Cache group name. + */ + public String groupName() { + return CacheGroupContext.cacheOrGroupName(config); + } + /** * @return Cache group id. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java index 971de98985c95d..afd67b27dda05e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java @@ -36,11 +36,9 @@ import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.Event; import org.apache.ignite.internal.managers.deployment.GridDeployment; -import org.apache.ignite.internal.managers.deployment.GridDeploymentInfo; import org.apache.ignite.internal.managers.deployment.GridDeploymentInfoBean; import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter; -import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.lang.GridPeerDeployAware; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.CA; @@ -73,9 +71,6 @@ public class GridCacheDeploymentManager extends GridCacheSharedManagerAdap /** Per-thread deployment context. */ private ConcurrentMap> deps = new ConcurrentHashMap<>(); - /** Collection of all known participants (Node ID -> Loader ID). */ - private Map allParticipants = new ConcurrentHashMap<>(); - /** Discovery listener. */ private GridLocalEventListener discoLsnr; @@ -93,7 +88,7 @@ public class GridCacheDeploymentManager extends GridCacheSharedManagerAdap private boolean depEnabled; /** Class loader id for local thread. */ - private ThreadLocal localLdrId = new ThreadLocal<>(); + private final ThreadLocal localLdrId = new ThreadLocal<>(); /** {@inheritDoc} */ @Override public void start0() throws IgniteCheckedException { @@ -124,8 +119,6 @@ public class GridCacheDeploymentManager extends GridCacheSharedManagerAdap log.debug("Removed cached info [d=" + d + ", deps=" + deps + ']'); } } - - allParticipants.remove(id); } }; @@ -454,104 +447,27 @@ public void p2pContext( break; } - if (cctx.discovery().node(sndId) == null) { - // Sender has left. + // Sender has left. + if (cctx.discovery().node(sndId) == null) deps.remove(ldrId, depInfo); - allParticipants.remove(sndId); - } - if (participants != null) { for (UUID id : participants.keySet()) { if (cctx.discovery().node(id) == null) { if (depInfo.removeParticipant(id)) deps.remove(ldrId, depInfo); - - allParticipants.remove(id); } } } } /** - * Adds deployment info to deployment contexts queue. + * Gets a local class loader id. * - * @param info Info to add. - */ - public void addDeploymentContext(GridDeploymentInfo info) { - IgniteUuid ldrId = info.classLoaderId(); - - while (true) { - CachedDeploymentInfo depInfo = deps.get(ldrId); - - if (depInfo == null) { - depInfo = new CachedDeploymentInfo<>(ldrId.globalId(), ldrId, info.userVersion(), info.deployMode(), - info.participants()); - - CachedDeploymentInfo old = deps.putIfAbsent(ldrId, depInfo); - - if (old != null) - depInfo = old; - else - break; - } - - Map participants = info.participants(); - - if (participants != null) { - if (!depInfo.addParticipants(participants, cctx)) { - deps.remove(ldrId, depInfo); - - continue; - } - } - - break; - } - } - - /** - * @param sndNodeId Sender node ID. - * @param sndLdrId Sender loader ID. - * @param participants Participants. - * @param locDepOwner {@code True} if local deployment owner. - * @return Added participants. + * @return Class loader uuid. */ - @Nullable private Map addGlobalParticipants(UUID sndNodeId, IgniteUuid sndLdrId, - Map participants, boolean locDepOwner) { - Map added = null; - - if (participants != null) { - for (Map.Entry entry : participants.entrySet()) { - UUID nodeId = entry.getKey(); - IgniteUuid ldrVer = entry.getValue(); - - if (!ldrVer.equals(allParticipants.get(nodeId))) { - allParticipants.put(nodeId, ldrVer); - - if (added == null) - added = IgniteUtils.newHashMap(participants.size()); - - added.put(nodeId, ldrVer); - } - } - } - - if (locDepOwner) { - assert sndNodeId != null; - assert sndLdrId != null; - - if (!sndLdrId.equals(allParticipants.get(sndNodeId))) { - allParticipants.put(sndNodeId, sndLdrId); - - if (added == null) - added = U.newHashMap(1); - - added.put(sndNodeId, sndLdrId); - } - } - - return added; + public IgniteUuid locLoaderId() { + return localLdrId.get(); } /** @@ -730,25 +646,42 @@ private void checkDeploymentIsCorrect(GridDeploymentInfoBean deployment, GridCac if (cctx.gridConfig().getDeploymentMode() == CONTINUOUS) return null; + IgniteUuid localLdrId0 = localLdrId.get(); + + if (localLdrId0 != null) { + GridDeploymentInfoBean deploymentInfoBean = getDepBean(deps.get(localLdrId.get())); + + if (deploymentInfoBean != null) + return deploymentInfoBean; + } + for (CachedDeploymentInfo d : deps.values()) { - if (cctx.discovery().node(d.senderId()) == null) - // Sender has left. - continue; + GridDeploymentInfoBean deploymentInfoBean = getDepBean(d); + if (deploymentInfoBean != null) + return deploymentInfoBean; + } + + return null; + } - // Participants map. - Map participants = d.participants(); + @Nullable private GridDeploymentInfoBean getDepBean(CachedDeploymentInfo d) { + if (d == null || cctx.discovery().node(d.senderId()) == null) + // Sender has left. + return null; - if (participants != null) { - for (UUID id : participants.keySet()) { - if (cctx.discovery().node(id) != null) { - // At least 1 participant is still in the grid. - return new GridDeploymentInfoBean( - d.loaderId(), - d.userVersion(), - d.mode(), - participants - ); - } + // Participants map. + Map participants = d.participants(); + + if (participants != null) { + for (UUID id : participants.keySet()) { + if (cctx.discovery().node(id) != null) { + // At least 1 participant is still in the grid. + return new GridDeploymentInfoBean( + d.loaderId(), + d.userVersion(), + d.mode(), + participants + ); } } } @@ -762,7 +695,6 @@ private void checkDeploymentIsCorrect(GridDeploymentInfoBean deployment, GridCac X.println(">>> Cache deployment manager memory stats [igniteInstanceName=" + cctx.igniteInstanceName() + ']'); X.println(">>> Undeploys: " + undeploys.size()); X.println(">>> Cached deployments: " + deps.size()); - X.println(">>> All participants: " + allParticipants.size()); } /** @@ -955,8 +887,7 @@ private CachedDeploymentInfo(UUID sndId, IgniteUuid ldrId, String userVer, Deplo this.ldrId = ldrId; this.userVer = userVer; this.depMode = depMode; - this.participants = participants == null || participants.isEmpty() ? null : - new ConcurrentLinkedHashMap<>(participants); + this.participants = F.isEmpty(participants) ? null : new ConcurrentLinkedHashMap<>(participants); } /** @@ -974,8 +905,8 @@ boolean addParticipants(Map newParticipants, GridCacheSharedCo for (Map.Entry e : newParticipants.entrySet()) { assert e.getKey().equals(e.getValue().globalId()); - if (cctx.discovery().node(e.getKey()) != null) - // Participant has left. + // Participant has been left. + if (cctx.discovery().node(e.getKey()) == null) continue; if (participants == null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java index c095ebe27fdc71..e17d4cd16742cd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java @@ -313,9 +313,9 @@ public void addEvent( Object oldVal0; try { - key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false); - val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, keepBinary, false); - oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, keepBinary, false); + key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false, null); + val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, keepBinary, false, null); + oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, keepBinary, false, null); } catch (Exception e) { if (!cctx.cacheObjectContext().kernalContext().cacheObjects().isBinaryEnabled(cctx.config())) @@ -330,9 +330,9 @@ public void addEvent( forceKeepBinary = true; - key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, true, false); - val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, true, false); - oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, true, false); + key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, true, false, null); + val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, true, false, null); + oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, true, false, null); } IgniteUuid xid = tx == null ? null : tx.xid(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index de4edb0949e98b..5f39b50ac2b3b5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -564,7 +564,8 @@ protected GridDhtLocalPartition localPartition() { checkObsolete(); if (isStartVersion() && ((flags & IS_UNSWAPPED_MASK) == 0)) { - assert row == null || row.key() == key : "Unexpected row key"; + assert row == null || Objects.equals(row.key(), key) : + "Unexpected row key [row.key=" + row.key() + ", cacheEntry.key=" + key + "]"; CacheDataRow read = row == null ? cctx.offheap().read(this) : row; @@ -1519,7 +1520,7 @@ else if (res.resultType() == ResultType.LOCKED) { intercept = !skipInterceptor(explicitVer); if (intercept) { - val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false); + val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false, null); CacheLazyEntry e = new CacheLazyEntry(cctx, key, old, keepBinary); @@ -2004,7 +2005,7 @@ else if (ttl == CU.TTL_NOT_CHANGED) updateTtl(expiryPlc); Object val = retval ? - cctx.cacheObjectContext().unwrapBinaryIfNeeded(CU.value(old, cctx, false), keepBinary, false) + cctx.cacheObjectContext().unwrapBinaryIfNeeded(CU.value(old, cctx, false), keepBinary, false, null) : null; return new T3<>(false, val, null); @@ -2243,7 +2244,7 @@ else if (op == DELETE && transformOp) return new GridTuple3<>(res, cctx.unwrapTemporary(interceptorRes != null ? interceptorRes.get2() : - cctx.cacheObjectContext().unwrapBinaryIfNeeded(old, keepBinary, false)), + cctx.cacheObjectContext().unwrapBinaryIfNeeded(old, keepBinary, false, null)), invokeRes); } @@ -2564,7 +2565,7 @@ else if (updateMetrics && REMOVE_NO_VAL.equals(updateRes.outcome()) if (val != null) return val; - return cctx.unwrapBinaryIfNeeded(cacheObj, keepBinary, cpy); + return cctx.unwrapBinaryIfNeeded(cacheObj, keepBinary, cpy, null); } /** @@ -5725,12 +5726,12 @@ private LazyValueEntry(KeyCacheObject key, boolean keepBinary) { /** {@inheritDoc} */ @Override public K getKey() { - return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, true); + return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, true, null); } /** {@inheritDoc} */ @Override public V getValue() { - return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(peekVisibleValue(), keepBinary, true); + return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(peekVisibleValue(), keepBinary, true, null); } /** {@inheritDoc} */ @@ -6429,7 +6430,7 @@ else if (newSysTtl == CU.TTL_ZERO) { } if (intercept && (conflictVer == null || !skipInterceptorOnConflict)) { - Object updated0 = cctx.unwrapBinaryIfNeeded(updated, keepBinary, false); + Object updated0 = cctx.unwrapBinaryIfNeeded(updated, keepBinary, false, null); CacheLazyEntry interceptEntry = new CacheLazyEntry<>(cctx, entry.key, null, oldVal, null, keepBinary); @@ -6780,6 +6781,12 @@ private IgniteBiTuple runEntryProcessor(CacheInvokeEntry startCachesOnLocalJoin( ctx.query().initQueryStructuresForNotStartedCache(cacheDesc); } catch (Exception e) { - log.error("Can't initialize query structures for not started cache [cacheName=" + cacheDesc.cacheName() + "]"); + log.error("Can't initialize query structures for not started cache [cacheName=" + + cacheDesc.cacheName() + "]", e); } }); @@ -3106,7 +3107,7 @@ private GridCacheSharedContext createSharedContext( if (!cachesInfo.isMergeConfigSupports(node)) return null; - String validationRes = cachesInfo.validateJoiningNodeData(discoData); + String validationRes = cachesInfo.validateJoiningNodeData(discoData, node.isClient()); if (validationRes != null) return new IgniteNodeValidationResult(node.id(), validationRes, validationRes); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java index 695a733f3229f0..87cbd67fd84f71 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java @@ -101,16 +101,25 @@ public GridCacheReturn(boolean loc, boolean success) { /** * @param cctx Cache context. * @param loc {@code True} if created on the node initiated cache operation. + * @param keepBinary True is deserialize value from a binary representation, false otherwise. + * @param ldr Class loader, used for deserialization from binary representation. * @param v Value. * @param success Success flag. */ - public GridCacheReturn(GridCacheContext cctx, boolean loc, boolean keepBinary, Object v, boolean success) { + public GridCacheReturn( + GridCacheContext cctx, + boolean loc, + boolean keepBinary, + @Nullable ClassLoader ldr, + Object v, + boolean success + ) { this.loc = loc; this.success = success; if (v != null) { if (v instanceof CacheObject) - initValue(cctx, (CacheObject)v, keepBinary); + initValue(cctx, (CacheObject)v, keepBinary, ldr); else { assert loc; @@ -150,10 +159,12 @@ public void invokeResult(boolean invokeRes) { /** * @param cctx Cache context. * @param v Value. + * @param keepBinary Keep binary flag. + * @param ldr Class loader, used for deserialization from binary representation. * @return This instance for chaining. */ - public GridCacheReturn value(GridCacheContext cctx, CacheObject v, boolean keepBinary) { - initValue(cctx, v, keepBinary); + public GridCacheReturn value(GridCacheContext cctx, CacheObject v, boolean keepBinary, @Nullable ClassLoader ldr) { + initValue(cctx, v, keepBinary, ldr); return this; } @@ -170,17 +181,19 @@ public boolean success() { * @param cacheObj Value to set. * @param success Success flag to set. * @param keepBinary Keep binary flag. + * @param ldr Class loader, used for deserialization from binary representation. * @return This instance for chaining. */ public GridCacheReturn set( GridCacheContext cctx, @Nullable CacheObject cacheObj, boolean success, - boolean keepBinary + boolean keepBinary, + @Nullable ClassLoader ldr ) { this.success = success; - initValue(cctx, cacheObj, keepBinary); + initValue(cctx, cacheObj, keepBinary, ldr); return this; } @@ -189,10 +202,16 @@ public GridCacheReturn set( * @param cctx Cache context. * @param cacheObj Cache object. * @param keepBinary Keep binary flag. + * @param ldr Class loader, used for deserialization from binary representation. */ - private void initValue(GridCacheContext cctx, @Nullable CacheObject cacheObj, boolean keepBinary) { + private void initValue( + GridCacheContext cctx, + @Nullable CacheObject cacheObj, + boolean keepBinary, + @Nullable ClassLoader ldr + ) { if (loc) - v = cctx.cacheObjectContext().unwrapBinaryIfNeeded(cacheObj, keepBinary, true); + v = cctx.cacheObjectContext().unwrapBinaryIfNeeded(cacheObj, keepBinary, true, ldr); else { assert cacheId == 0 || cacheId == cctx.cacheId(); @@ -346,7 +365,7 @@ public void finishUnmarshal(GridCacheContext ctx, ClassLoader ldr) throws Ignite if (cacheObj != null) { cacheObj.finishUnmarshal(ctx.cacheObjectContext(), ldr); - v = ctx.cacheObjectContext().unwrapBinaryIfNeeded(cacheObj, true, false); + v = ctx.cacheObjectContext().unwrapBinaryIfNeeded(cacheObj, true, false, ldr); } if (invokeRes && invokeResCol != null) { @@ -357,10 +376,10 @@ public void finishUnmarshal(GridCacheContext ctx, ClassLoader ldr) throws Ignite for (CacheInvokeDirectResult res : invokeResCol) { CacheInvokeResult res0 = res.error() == null ? - CacheInvokeResult.fromResult(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.result(), true, false)) : + CacheInvokeResult.fromResult(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.result(), true, false, null)) : CacheInvokeResult.fromError(res.error()); - map0.put(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.key(), true, false), res0); + map0.put(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.key(), true, false, null), res0); } v = map0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java index c1981c664e1894..f40d4d7b41c9af 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java @@ -760,7 +760,7 @@ public IgniteSnapshotManager snapshotMgr() { /** * @return Write ahead log manager. */ - public IgniteWriteAheadLogManager wal() { + @Nullable public IgniteWriteAheadLogManager wal() { return walMgr; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index cb0daedbc29f98..e260983fa71862 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -2021,9 +2021,13 @@ public static boolean isPersistenceEnabled(DataStorageConfiguration cfg) { * @return Page size without encryption overhead. */ public static int encryptedPageSize(int pageSize, EncryptionSpi encSpi) { + // If encryption is enabled, a space of one encryption block is reserved to store CRC and encryption key ID. + // If encryption is disabled, NoopEncryptionSPI with a zero encryption block size is used. + assert encSpi.blockSize() >= /* CRC */ 4 + /* Key ID */ 1 || encSpi.blockSize() == 0; + return pageSize - (encSpi.encryptedSizeNoPadding(pageSize) - pageSize) - - encSpi.blockSize(); /* For CRC. */ + - encSpi.blockSize(); /* For CRC and encryption key ID. */ } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java index d0fd64c4b67a15..ceebc72d190fa2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorage; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; +import org.apache.ignite.internal.processors.cache.tree.CacheDataTree; import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult; import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow; @@ -606,6 +607,12 @@ interface OffheapInvokeClosure extends IgniteTree.InvokeClosure { * */ interface CacheDataStore { + + /** + * @return Cache data tree object. + */ + public CacheDataTree tree(); + /** * Initialize data store if it exists. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index a8ead2c0dfd27b..773297f23a26e1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import javax.cache.Cache; import javax.cache.processor.EntryProcessor; import org.apache.ignite.IgniteCheckedException; @@ -43,6 +44,8 @@ import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.metric.IoStatisticsHolder; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccMarkUpdatedRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateNewTxStateHintRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateTxStateHintRecord; @@ -152,10 +155,6 @@ public class IgniteCacheOffheapManagerImpl implements IgniteCacheOffheapManager /** The maximum number of entries that can be preloaded under checkpoint read lock. */ public static final int PRELOAD_SIZE_UNDER_CHECKPOINT_LOCK = 100; - /** */ - private final boolean failNodeOnPartitionInconsistency = Boolean.getBoolean( - IgniteSystemProperties.IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY); - /** Batch size for cache removals during destroy. */ private static final int BATCH_SIZE = 1000; @@ -183,9 +182,6 @@ public class IgniteCacheOffheapManagerImpl implements IgniteCacheOffheapManager /** */ protected final GridSpinBusyLock busyLock = new GridSpinBusyLock(); - /** */ - private int updateValSizeThreshold; - /** */ protected GridStripedLock partStoreLock = new GridStripedLock(Runtime.getRuntime().availableProcessors()); @@ -200,8 +196,6 @@ public class IgniteCacheOffheapManagerImpl implements IgniteCacheOffheapManager this.grp = grp; this.log = ctx.logger(getClass()); - updateValSizeThreshold = ctx.database().pageSize() / 2; - if (grp.affinityNode()) { ctx.database().checkpointReadLock(); @@ -243,7 +237,8 @@ protected void initPendingTree(GridCacheContext cctx) throws IgniteCheckedExcept rootPage, grp.reuseList(), true, - lsnr + lsnr, + FLAG_IDX ); } } @@ -805,8 +800,8 @@ private Iterator cacheData(boolean primary, boolean backup, Affi KeyCacheObject key = nextRow.key(); CacheObject val = nextRow.value(); - Object key0 = cctx.unwrapBinaryIfNeeded(key, keepBinary, false); - Object val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false); + Object key0 = cctx.unwrapBinaryIfNeeded(key, keepBinary, false, null); + Object val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false, null); next = new CacheEntryImplEx(key0, val0, nextRow.version()); @@ -1297,10 +1292,11 @@ protected CacheDataStore createCacheDataStore0(int p) throws IgniteCheckedExcept rowStore, rootPage, true, - lsnr + lsnr, + FLAG_IDX ); - return new CacheDataStoreImpl(p, rowStore, dataTree); + return new CacheDataStoreImpl(p, rowStore, dataTree, () -> pendingEntries, grp, busyLock, log); } /** {@inheritDoc} */ @@ -1447,7 +1443,7 @@ private int expireInternal( /** * */ - protected class CacheDataStoreImpl implements CacheDataStore { + public static class CacheDataStoreImpl implements CacheDataStore { /** */ private final int partId; @@ -1457,6 +1453,15 @@ protected class CacheDataStoreImpl implements CacheDataStore { /** */ private final CacheDataTree dataTree; + /** */ + private final Supplier pendingEntries; + + /** */ + private final CacheGroupContext grp; + + /** */ + private final GridSpinBusyLock busyLock; + /** Update counter. */ protected final PartitionUpdateCounter pCntr; @@ -1467,13 +1472,24 @@ protected class CacheDataStoreImpl implements CacheDataStore { private final IntMap cacheSizes = new IntRWHashMap(); /** Mvcc remove handler. */ - private final PageHandler mvccUpdateMarker = new MvccMarkUpdatedHandler(); + private final PageHandler mvccUpdateMarker; /** Mvcc update tx state hint handler. */ - private final PageHandler mvccUpdateTxStateHint = new MvccUpdateTxStateHintHandler(); + private final PageHandler mvccUpdateTxStateHint; /** */ - private final PageHandler mvccApplyChanges = new MvccApplyChangesHandler(); + private final PageHandler mvccApplyChanges; + + /** */ + private final IgniteLogger log; + + /** */ + private final Boolean failNodeOnPartitionInconsistency = Boolean.getBoolean( + IgniteSystemProperties.IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY + ); + + /** */ + private final int updateValSizeThreshold; /** * @param partId Partition number. @@ -1483,18 +1499,37 @@ protected class CacheDataStoreImpl implements CacheDataStore { public CacheDataStoreImpl( int partId, CacheDataRowStore rowStore, - CacheDataTree dataTree + CacheDataTree dataTree, + Supplier pendingEntries, + CacheGroupContext grp, + GridSpinBusyLock busyLock, + IgniteLogger log ) { this.partId = partId; this.rowStore = rowStore; this.dataTree = dataTree; + this.pendingEntries = pendingEntries; + this.grp = grp; + this.busyLock = busyLock; + this.log = log; PartitionUpdateCounter delegate = grp.mvccEnabled() ? new PartitionUpdateCounterMvccImpl(grp) : !grp.persistenceEnabled() || grp.hasAtomicCaches() ? new PartitionUpdateCounterVolatileImpl(grp) : new PartitionUpdateCounterTrackingImpl(grp); - pCntr = ctx.logger(PartitionUpdateCounterDebugWrapper.class).isDebugEnabled() ? + pCntr = grp.shared().logger(PartitionUpdateCounterDebugWrapper.class).isDebugEnabled() ? new PartitionUpdateCounterDebugWrapper(partId, delegate) : delegate; + + updateValSizeThreshold = grp.shared().database().pageSize() / 2; + + mvccUpdateMarker = new MvccMarkUpdatedHandler(grp); + mvccUpdateTxStateHint = new MvccUpdateTxStateHintHandler(grp); + mvccApplyChanges = new MvccApplyChangesHandler(grp); + } + + /** {@inheritDoc} */ + @Override public CacheDataTree tree() { + return dataTree; } /** @@ -1561,7 +1596,7 @@ void decrementSize(int cacheId) { return grp.mvccEnabled() ? dataTree.isEmpty() : storageSize.get() == 0; } catch (IgniteCheckedException e) { - U.error(log, "Failed to perform operation.", e); + U.error(grp.shared().logger(IgniteCacheOffheapManagerImpl.class), "Failed to perform operation.", e); return false; } @@ -1640,7 +1675,7 @@ void decrementSize(int cacheId) { grp.cacheOrGroupName() + ", partId=" + partId + ']', e); if (failNodeOnPartitionInconsistency) - ctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + grp.shared().kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); } } @@ -2985,11 +3020,11 @@ private void afterRowFound(@Nullable CacheDataRow row, KeyCacheObject key) throw while (cur.next()) { if (++rmv == BATCH_SIZE) { - ctx.database().checkpointReadUnlock(); + grp.shared().database().checkpointReadUnlock(); rmv = 0; - ctx.database().checkpointReadLock(); + grp.shared().database().checkpointReadLock(); } CacheDataRow row = cur.get(); @@ -3019,9 +3054,9 @@ private void afterRowFound(@Nullable CacheDataRow row, KeyCacheObject key) throw throw new IgniteCheckedException("Fail destroy store", ex); // Allow checkpointer to progress if a partition contains less than BATCH_SIZE keys. - ctx.database().checkpointReadUnlock(); + grp.shared().database().checkpointReadUnlock(); - ctx.database().checkpointReadLock(); + grp.shared().database().checkpointReadLock(); } /** {@inheritDoc} */ @@ -3053,7 +3088,7 @@ public void restoreState(long size, long updCntr, @Nullable Map c /** {@inheritDoc} */ @Override public PendingEntriesTree pendingTree() { - return pendingEntries; + return pendingEntries.get(); } /** {@inheritDoc} */ @@ -3251,16 +3286,26 @@ public boolean found() { /** * Mvcc remove handler. */ - private final class MvccMarkUpdatedHandler extends PageHandler { + private static final class MvccMarkUpdatedHandler extends PageHandler { + /** */ + private final CacheGroupContext grp; + + private MvccMarkUpdatedHandler(CacheGroupContext grp) { + this.grp = grp; + } + /** {@inheritDoc} */ @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc, MvccUpdateDataRow updateDataRow, int itemId, IoStatisticsHolder statHolder) throws IgniteCheckedException { assert grp.mvccEnabled(); + PageMemory pageMem = grp.dataRegion().pageMemory(); + IgniteWriteAheadLogManager wal = grp.shared().wal(); + DataPageIO iox = (DataPageIO)io; int off = iox.getPayloadOffset(pageAddr, itemId, - grp.dataRegion().pageMemory().realPageSize(grp.groupId()), MVCC_INFO_SIZE); + pageMem.realPageSize(grp.groupId()), MVCC_INFO_SIZE); long newCrd = iox.newMvccCoordinator(pageAddr, off); long newCntr = iox.newMvccCounter(pageAddr, off); @@ -3273,8 +3318,8 @@ private final class MvccMarkUpdatedHandler extends PageHandler { + private static final class MvccUpdateTxStateHintHandler extends PageHandler { + /** */ + private final CacheGroupContext grp; + + /** */ + private MvccUpdateTxStateHintHandler(CacheGroupContext grp) { + this.grp = grp; + } + /** {@inheritDoc} */ @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc, Void ignore, int itemId, IoStatisticsHolder statHolder) throws IgniteCheckedException { DataPageIO iox = (DataPageIO)io; + PageMemory pageMem = grp.dataRegion().pageMemory(); + IgniteWriteAheadLogManager wal = grp.shared().wal(); + int off = iox.getPayloadOffset(pageAddr, itemId, - grp.dataRegion().pageMemory().realPageSize(grp.groupId()), MVCC_INFO_SIZE); + pageMem.realPageSize(grp.groupId()), MVCC_INFO_SIZE); long crd = iox.mvccCoordinator(pageAddr, off); long cntr = iox.mvccCounter(pageAddr, off); @@ -3305,8 +3361,8 @@ private final class MvccUpdateTxStateHintHandler extends PageHandler { + private static final class MvccApplyChangesHandler extends PageHandler { + /** */ + private final CacheGroupContext grp; + + /** */ + private MvccApplyChangesHandler(CacheGroupContext grp) { + this.grp = grp; + } + /** {@inheritDoc} */ @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc, MvccDataRow newRow, int itemId, IoStatisticsHolder statHolder) throws IgniteCheckedException { @@ -3345,8 +3409,11 @@ private final class MvccApplyChangesHandler extends PageHandler T value(CacheObjectValueContext ctx, boolean cpy) { + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { assert val != null; return (T)val; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LockedEntriesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LockedEntriesInfo.java new file mode 100644 index 00000000000000..039c6bd49cd223 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LockedEntriesInfo.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Helper class to acquire java level locks on unordered set of entries and avoid deadlocks. + */ +public class LockedEntriesInfo { + /** Deadlock detection timeout in milliseconds. */ + private static final long DEADLOCK_DETECTION_TIMEOUT = 500L; + + /** Locked entries info for each thread. */ + private final Map lockedEntriesPerThread = new ConcurrentHashMap<>(); + + /** + * Attempt to lock all provided entries avoiding deadlocks. + * + * @param entries Entries to lock. + * @return {@code True} if entries were successfully locked, {@code false} if possible deadlock detected or + * some entries are obsolete (lock attempt should be retried in this case). + */ + public boolean tryLockEntries(GridCacheEntryEx[] entries) { + long threadId = Thread.currentThread().getId(); + + LockedEntries lockedEntries = new LockedEntries(entries); + + lockedEntriesPerThread.put(threadId, lockedEntries); + + boolean wasInterrupted = false; + + try { + for (int i = 0; i < entries.length; i++) { + GridCacheEntryEx entry = entries[i]; + + if (entry == null) + continue; + + boolean retry = false; + + while (true) { + if (entry.tryLockEntry(DEADLOCK_DETECTION_TIMEOUT)) + break; // Successfully locked. + else { + wasInterrupted |= Thread.interrupted(); // Clear thread interruption flag. + + if (hasLockCollisions(entry, lockedEntries)) { + // Possible deadlock detected, unlock all locked entries and retry again. + retry = true; + + break; + } + // Possible deadlock not detected, just retry lock on current entry. + } + } + + if (!retry && entry.obsolete()) { + entry.unlockEntry(); + + retry = true; + } + + if (retry) { + lockedEntries.lockedIdx = -1; + + // Unlock all previously locked. + for (int j = 0; j < i; j++) { + if (entries[j] != null) + entries[j].unlockEntry(); + } + + return false; + } + + lockedEntries.lockedIdx = i; + } + + return true; + } + finally { + if (wasInterrupted) + Thread.currentThread().interrupt(); + + // Already acuired all locks or released all locks here, deadlock is not possible by this thread anymore, + // can safely delete locks information. + lockedEntriesPerThread.remove(threadId); + } + } + + /** + * @param entry Entry. + * @param curLockedEntries Current locked entries info. + * @return {@code True} if another thread holds lock for this entry and started to lock entries earlier. + */ + private boolean hasLockCollisions(GridCacheEntryEx entry, LockedEntries curLockedEntries) { + for (Map.Entry other : lockedEntriesPerThread.entrySet()) { + LockedEntries otherLockedEntries = other.getValue(); + + if (otherLockedEntries == curLockedEntries || otherLockedEntries.ts > curLockedEntries.ts) + // Skip current thread and threads started to lock after the current thread. + continue; + + GridCacheEntryEx[] otherThreadLocks = otherLockedEntries.entries; + + int otherThreadLockedIdx = otherLockedEntries.lockedIdx; + + // Visibility guarantees provided by volatile lockedIdx field. + for (int i = 0; i <= otherThreadLockedIdx; i++) { + if (otherThreadLocks[i] == entry) + return true; + } + } + + return false; + } + + /** Per-thread locked entries info. */ + private static class LockedEntries { + /** Timestamp of lock. */ + private final long ts = System.nanoTime(); + + /** Entries to lock. */ + private final GridCacheEntryEx[] entries; + + /** Current locked entry index. */ + private volatile int lockedIdx = -1; + + /** */ + private LockedEntries(GridCacheEntryEx[] entries) { + this.entries = entries; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterMvccImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterMvccImpl.java index cbb58961048263..2e3066a2a04922 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterMvccImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterMvccImpl.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.processors.cache; +import java.util.TreeMap; + /** * Update counter implementation for MVCC mode. */ @@ -42,4 +44,17 @@ public PartitionUpdateCounterMvccImpl(CacheGroupContext grp) { @Override protected PartitionUpdateCounterTrackingImpl createInstance() { return new PartitionUpdateCounterMvccImpl(grp); } + + /** {@inheritDoc} */ + @Override public PartitionUpdateCounter copy() { + PartitionUpdateCounterMvccImpl copy = new PartitionUpdateCounterMvccImpl(grp); + + copy.cntr.set(cntr.get()); + copy.first = first; + copy.queue = new TreeMap<>(queue); + copy.initCntr = initCntr; + copy.reserveCntr.set(reserveCntr.get()); + + return copy; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java index 7671396ba8dc35..0f9d72c5b84bfd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java @@ -68,16 +68,16 @@ public class PartitionUpdateCounterTrackingImpl implements PartitionUpdateCounte private static final byte VERSION = 1; /** Queue of applied out of order counter updates. */ - private NavigableMap queue = new TreeMap<>(); + protected NavigableMap queue = new TreeMap<>(); /** LWM. */ - private final AtomicLong cntr = new AtomicLong(); + protected final AtomicLong cntr = new AtomicLong(); /** HWM. */ protected final AtomicLong reserveCntr = new AtomicLong(); /** */ - private boolean first = true; + protected boolean first = true; /** */ protected final CacheGroupContext grp; @@ -86,7 +86,7 @@ public class PartitionUpdateCounterTrackingImpl implements PartitionUpdateCounte * Initial counter points to last sequential update after WAL recovery. * @deprecated TODO FIXME https://issues.apache.org/jira/browse/IGNITE-11794 */ - @Deprecated private volatile long initCntr; + @Deprecated protected volatile long initCntr; /** * @param grp Group. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java index 231d8e6e8316a4..173888bd0025ed 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java @@ -484,7 +484,7 @@ private void commitIfLocked() throws IgniteCheckedException { if (!near() && !local() && onePhaseCommit()) { if (needReturnValue()) { - ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, true); + ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, null, true); UUID origNodeId = otherNodeId(); // Originating node. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java index 15730b2574a490..96147362d5b2e4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java @@ -43,6 +43,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse; import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.C1; import org.apache.ignite.internal.util.typedef.CIX1; @@ -128,6 +129,10 @@ public abstract class CacheDistributedGetFutureAdapter /** */ protected final boolean recovery; + /** Deployment class loader id which will be used for deserialization of entries on a distributed task. */ + @GridToStringExclude + protected final IgniteUuid deploymentLdrId; + /** */ protected Map>> invalidNodes = Collections.emptyMap(); @@ -175,6 +180,7 @@ protected CacheDistributedGetFutureAdapter( this.needVer = needVer; this.keepCacheObjects = keepCacheObjects; this.recovery = recovery; + this.deploymentLdrId = U.contextDeploymentClassLoaderId(cctx.kernalContext()); futId = IgniteUuid.randomUuid(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ClientCacheDhtTopologyFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ClientCacheDhtTopologyFuture.java deleted file mode 100644 index 8fae639a1fa27d..00000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ClientCacheDhtTopologyFuture.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.processors.cache.distributed.dht; - -import java.util.Collection; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; -import org.apache.ignite.internal.processors.cache.CacheGroupContext; -import org.apache.ignite.internal.util.typedef.internal.U; - -/** - * Topology future created for client cache start. - */ -public class ClientCacheDhtTopologyFuture extends GridDhtTopologyFutureAdapter { - /** */ - final AffinityTopologyVersion topVer; - - /** - * @param topVer Topology version. - */ - public ClientCacheDhtTopologyFuture(AffinityTopologyVersion topVer) { - assert topVer != null; - - this.topVer = topVer; - - onDone(topVer); - } - - /** - * @param topVer Topology version. - * @param e Error. - */ - public ClientCacheDhtTopologyFuture(AffinityTopologyVersion topVer, IgniteCheckedException e) { - assert e != null; - assert topVer != null; - - this.topVer = topVer; - - onDone(e); - } - - /** - * @param grp Cache group. - * @param topNodes Topology nodes. - */ - public void validate(CacheGroupContext grp, Collection topNodes) { - grpValidRes = U.newHashMap(1); - - CacheGroupValidation valRes = validateCacheGroup(grp, topNodes); - - if (!valRes.isValid() || valRes.hasLostPartitions()) - grpValidRes.put(grp.groupId(), valRes); - } - - /** {@inheritDoc} */ - @Override public AffinityTopologyVersion initialVersion() { - return topVer; - } - - /** {@inheritDoc} */ - @Override public boolean exchangeDone() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public AffinityTopologyVersion topologyVersion() { - return topVer; - } - - /** {@inheritDoc} */ - @Override public boolean changedAffinity() { - return true; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return "ClientCacheDhtTopologyFuture [topVer=" + topVer + ']'; - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java index b5e55906af35c8..3f637c525200ed 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java @@ -33,7 +33,6 @@ import static java.lang.String.format; import static org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_ALL; import static org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_SAFE; -import static org.apache.ignite.internal.processors.cache.GridCacheProcessor.CLUSTER_READ_ONLY_MODE_ERROR_MSG_FORMAT; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isSystemCache; /** @@ -41,6 +40,10 @@ */ public abstract class GridDhtTopologyFutureAdapter extends GridFutureAdapter implements GridDhtTopologyFuture { + /** Error message format if cluster in read-only mode and write operation tries to execute.*/ + private static final String CLUSTER_READ_ONLY_ERROR_MSG = + "Failed to perform cache operation (cluster is in read-only mode) [cacheGrp=%s, cache=%s]"; + /** Cache groups validation results. */ protected volatile Map grpValidRes = Collections.emptyMap(); @@ -85,7 +88,7 @@ protected final CacheGroupValidation validateCacheGroup(CacheGroupContext grp, C if (!clusterIsActive) { return new CacheInvalidStateException( - "Failed to perform cache operation (cluster is not activated): " + cctx.name()); + "Failed to perform cache operation (cluster is not activated): " + cctx.name()); } if (cctx.cache() == null) @@ -96,7 +99,7 @@ protected final CacheGroupValidation validateCacheGroup(CacheGroupContext grp, C if (cctx.shared().readOnlyMode() && !read && !isSystemCache(cctx.name())) { return new CacheInvalidStateException(new IgniteClusterReadOnlyException( - format(CLUSTER_READ_ONLY_MODE_ERROR_MSG_FORMAT, "cache", cctx.group().name(), cctx.name()) + format(CLUSTER_READ_ONLY_ERROR_MSG, grp.name(), cctx.name()) )); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java index b2e7b30c6b965e..1831c1055d2ed7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheEntryPredicate; +import org.apache.ignite.internal.processors.cache.CacheInvalidStateException; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.CacheOperationContext; import org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap; @@ -52,6 +53,7 @@ import org.apache.ignite.internal.processors.cache.distributed.GridDistributedUnlockRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysResponse; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; @@ -1145,6 +1147,16 @@ public IgniteInternalFuture lockAllAsync( tx.topologyVersion(req.topologyVersion()); } + + GridDhtPartitionsExchangeFuture lastFinishedFut = ctx.shared().exchange().lastFinishedFuture(); + + CacheOperationContext opCtx = ctx.operationContextPerCall(); + + CacheInvalidStateException validateCacheE = lastFinishedFut + .validateCache(ctx, opCtx != null && opCtx.recovery(), req.txRead(), null, keys); + + if (validateCacheE != null) + throw validateCacheE; } else { fut = new GridDhtLockFuture(ctx, @@ -1299,9 +1311,7 @@ else if (!b) } } catch (IgniteCheckedException | RuntimeException e) { - String err = "Failed to unmarshal at least one of the keys for lock request message: " + req; - - U.error(log, err, e); + U.error(log, req, e); if (tx != null) { try { @@ -1327,7 +1337,7 @@ else if (!b) } return new GridDhtFinishedFuture<>( - new IgniteCheckedException(err, e)); + new IgniteCheckedException(e)); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java index 8d45780db4dcef..21b895d9d36980 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java @@ -187,6 +187,10 @@ public abstract class GridDhtTxAbstractEnlistFuture extends GridCacheFutureAd /** Map for tracking nodes to which first request was already sent in order to send smaller subsequent requests. */ private final Set firstReqSent = new HashSet<>(); + /** Deployment class loader id which will be used for deserialization of entries on a distributed task. */ + @GridToStringExclude + protected final IgniteUuid deploymentLdrId; + /** * @param nearNodeId Near node ID. * @param nearLockVer Near lock version. @@ -227,6 +231,7 @@ protected GridDhtTxAbstractEnlistFuture(UUID nearNodeId, this.tx = tx; this.filter = filter; this.keepBinary = keepBinary; + this.deploymentLdrId = U.contextDeploymentClassLoaderId(cctx.kernalContext()); lockVer = tx.xidVersion(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java index 1b14cf359a110c..5f7a3332dd7909 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.processors.query.EnlistOperation; import org.apache.ignite.internal.processors.query.UpdateSourceIterator; import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; @@ -130,7 +131,7 @@ public GridDhtTxEnlistFuture(UUID nearNodeId, res.addEntryProcessResult(cctx, key, null, invokeRes.result(), invokeRes.error(), keepBinary); } else if (needRes) - res.set(cctx, txRes.prevValue(), txRes.success(), keepBinary); + res.set(cctx, txRes.prevValue(), txRes.success(), keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java index 0aa3579622c46b..5afa379aad4b26 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java @@ -226,6 +226,10 @@ public final class GridDhtTxPrepareFuture extends GridCacheCompoundFuture invokeEntry = new CacheInvokeEntry<>(key, val, txEntry.cached().version(), keepBinary, txEntry.cached()); + EntryProcessor processor = t.get1(); + IgniteThread.onEntryProcessorEntered(false); - try { - EntryProcessor processor = t.get1(); + if (cctx.kernalContext().deploy().enabled() && + cctx.kernalContext().deploy().isGlobalLoader(processor.getClass().getClassLoader())) { + U.restoreDeploymentContext(cctx.kernalContext(), cctx.kernalContext() + .deploy().getClassLoaderId(processor.getClass().getClassLoader())); + } + try { procRes = processor.process(invokeEntry, t.get2()); val = cacheCtx.toCacheObject(invokeEntry.getValue(true)); @@ -495,7 +506,7 @@ private void onEntriesLocked() { } } else if (retVal) - ret.value(cacheCtx, val, keepBinary); + ret.value(cacheCtx, val, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } if (hasFilters && !cacheCtx.isAll(cached, txEntry.filters())) { @@ -1237,7 +1248,7 @@ private IgniteTxOptimisticCheckedException versionCheckError(IgniteTxEntry entry GridCacheContext cctx = entry.context(); try { - Object key = cctx.unwrapBinaryIfNeeded(entry.key(), entry.keepBinary(), false); + Object key = cctx.unwrapBinaryIfNeeded(entry.key(), entry.keepBinary(), false, null); assert key != null : entry.key(); @@ -1253,7 +1264,7 @@ private IgniteTxOptimisticCheckedException versionCheckError(IgniteTxEntry entry CacheObject cacheVal = entryEx != null ? entryEx.rawGet() : null; - Object val = cacheVal != null ? cctx.unwrapBinaryIfNeeded(cacheVal, entry.keepBinary(), false) : null; + Object val = cacheVal != null ? cctx.unwrapBinaryIfNeeded(cacheVal, entry.keepBinary(), false, null) : null; if (val != null) { if (S.includeSensitive()) @@ -1980,8 +1991,14 @@ void onResult(GridDhtTxPrepareResponse res) { null, null, EVT_CACHE_REBALANCE_OBJECT_LOADED, info.value(), true, null, false, null, null, null, false); - if (retVal && !invoke) - ret.value(cacheCtx, info.value(), false); + if (retVal && !invoke) { + ret.value( + cacheCtx, + info.value(), + false, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId) + ); + } } break; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java index e3ba39ab8fbfe3..84c3a3af71dea8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java @@ -567,7 +567,8 @@ private boolean localGet(AffinityTopologyVersion topVer, KeyCacheObject key, int ver, 0, 0, - needVer); + needVer, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); return true; } @@ -650,7 +651,8 @@ private Map createResultMap(Collection infos) { false, needVer ? info.version() : null, 0, - 0); + 0, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } return map; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java index 57cfbd1b4fb0d3..1dcf4e4f0c5e43 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java @@ -57,6 +57,7 @@ import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.util.lang.GridPlainRunnable; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -145,6 +146,10 @@ public class GridPartitionedSingleGetFuture extends GridCacheFutureAdapter extends GridDhtCacheAdapter { } }; + /** Locked entries info for each thread. */ + private final LockedEntriesInfo lockedEntriesInfo = new LockedEntriesInfo(); + /** Update reply closure. */ @GridToStringExclude private UpdateReplyClosure updateReplyClos; @@ -892,7 +896,7 @@ private IgniteInternalFuture> invoke0( if (invokeRes.result() != null) res = CacheInvokeResult.fromResult((T)ctx.unwrapBinaryIfNeeded(invokeRes.result(), - keepBinary, false)); + keepBinary, false, null)); } return res; @@ -1562,7 +1566,8 @@ private IgniteInternalFuture> getAllAsync0(@Nullable Collection> getAllAsync0(@Nullable Collection lockEntries(GridNearAtomicAbstractUpdateRequest } } else { - List locked = new ArrayList<>(req.size()); + GridDhtCacheEntry[] locked = new GridDhtCacheEntry[req.size()]; while (true) { for (int i = 0; i < req.size(); i++) { GridDhtCacheEntry entry = entryExx(req.key(i), topVer); - locked.add(entry); - } - - boolean retry = false; - - for (int i = 0; i < locked.size(); i++) { - GridCacheMapEntry entry = locked.get(i); - - if (entry == null) - continue; - - entry.lockEntry(); - - if (entry.obsolete()) { - // Unlock all locked. - for (int j = 0; j <= i; j++) { - if (locked.get(j) != null) - locked.get(j).unlockEntry(); - } - - // Clear entries. - locked.clear(); - - // Retry. - retry = true; - - break; - } + locked[i] = entry; } - if (!retry) - return locked; + if (lockedEntriesInfo.tryLockEntries(locked)) + return Arrays.asList(locked); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java index e388ba2d822f1c..b7cc08109a6aa2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java @@ -46,6 +46,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheReturn; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; @@ -117,6 +118,10 @@ public abstract class GridNearAtomicAbstractUpdateFuture extends GridCacheFuture /** Near cache flag. */ protected final boolean nearEnabled; + /** Deployment class loader id which will be used for deserialization of entries on a distributed task. */ + @GridToStringExclude + protected final IgniteUuid deploymentLdrId; + /** Topology locked flag. Set if atomic update is performed inside a TX or explicit lock. */ protected boolean topLocked; @@ -198,6 +203,7 @@ protected GridNearAtomicAbstractUpdateFuture( this.skipStore = skipStore; this.keepBinary = keepBinary; this.recovery = recovery; + this.deploymentLdrId = U.contextDeploymentClassLoaderId(cctx.kernalContext()); nearEnabled = CU.isNearEnabled(cctx); @@ -350,7 +356,11 @@ else if (res.remapTopologyVersion() != null) */ final void completeFuture(@Nullable GridCacheReturn ret, Throwable err, @Nullable Long futId) { Object retval = ret == null ? null : rawRetval ? ret : (this.retval || op == TRANSFORM) ? - cctx.unwrapBinaryIfNeeded(ret.value(), keepBinary) : ret.success(); + cctx.unwrapBinaryIfNeeded( + ret.value(), + keepBinary, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId) + ) : ret.success(); if (op == TRANSFORM && retval == null) retval = Collections.emptyMap(); @@ -406,7 +416,12 @@ final void onPrimaryError(GridNearAtomicAbstractUpdateRequest req, GridNearAtomi for (KeyCacheObject key : keys0) { try { - keys.add(cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false)); + keys.add(cctx.cacheObjectContext().unwrapBinaryIfNeeded( + key, + keepBinary, + false, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)) + ); } catch (BinaryInvalidTypeException e) { keys.add(cctx.toCacheKeyObject(key)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java index 2965dd84d28cb9..18d2684451551a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java @@ -486,7 +486,7 @@ private void updateNear(GridNearAtomicAbstractUpdateRequest req, GridNearAtomicU sendSingleRequest(reqState0.req.nodeId(), reqState0.req); if (syncMode == FULL_ASYNC) { - completeFuture(new GridCacheReturn(cctx, true, true, null, true), null, null); + completeFuture(new GridCacheReturn(cctx, true, true, null, null, true), null, null); return; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java index 3f108e36d0c199..6045b967c3f5fd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java @@ -723,7 +723,7 @@ else if (res.remapTopologyVersion() != null) } if (syncMode == FULL_ASYNC) - completeFuture(new GridCacheReturn(cctx, true, true, null, true), null, null); + completeFuture(new GridCacheReturn(cctx, true, true, null, null, true), null, null); } /** {@inheritDoc} */ @@ -816,7 +816,7 @@ private void map(AffinityTopologyVersion topVer, @Nullable Collection> loadAsync( row.version(), 0, 0, - needVer); + needVer, + U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext()))); if (evt) { ctx.events().readEvent(key, @@ -651,7 +652,8 @@ public final IgniteInternalFuture> loadAsync( ver, 0, 0, - needVer); + needVer, + U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext()))); } } else diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java index 8d0abde17e20dd..7ba91d7e947052 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java @@ -40,7 +40,9 @@ import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheEntryPredicate; +import org.apache.ignite.internal.processors.cache.CacheInvalidStateException; import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheOperationContext; import org.apache.ignite.internal.processors.cache.CacheStoppedException; import org.apache.ignite.internal.processors.cache.GridCacheCompoundIdentityFuture; import org.apache.ignite.internal.processors.cache.GridCacheContext; @@ -53,6 +55,7 @@ import org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockMapping; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockResponse; @@ -804,20 +807,6 @@ void map() { } } - for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) { - if (fut.exchangeDone() && fut.topologyVersion().equals(lastChangeVer)) { - Throwable err = fut.validateCache(cctx, recovery, read, null, keys); - - if (err != null) { - onDone(err); - - return; - } - - break; - } - } - // Continue mapping on the same topology version as it was before. synchronized (this) { if (this.topVer == null) @@ -1392,6 +1381,15 @@ private boolean mapAsPrimary(Collection keys, AffinityTopologyVe lockLocally(distributedKeys, topVer); } + GridDhtPartitionsExchangeFuture lastFinishedFut = cctx.shared().exchange().lastFinishedFuture(); + + CacheOperationContext opCtx = cctx.operationContextPerCall(); + + CacheInvalidStateException validateCacheE = lastFinishedFut.validateCache(cctx, opCtx != null && opCtx.recovery(), read, null, keys); + + if (validateCacheE != null) + onDone(validateCacheE); + return true; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java index e9f4d05e720a1f..4a9435c378c934 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java @@ -2672,6 +2672,19 @@ private String exchangeTimingsLogMessage(String header, List timings) { return false; } + /** + * @param grp Cache group. + */ + public void validate(CacheGroupContext grp) { + if (grpValidRes == null) + grpValidRes = new ConcurrentHashMap<>(); + + CacheGroupValidation valRes = validateCacheGroup(grp, events().lastEvent().topologyNodes()); + + if (!valRes.isValid() || valRes.hasLostPartitions()) + grpValidRes.put(grp.groupId(), valRes); + } + /** * Updates the {@link GridMetricManager#PME_OPS_BLOCKED_DURATION_HISTOGRAM} and {@link * GridMetricManager#PME_DURATION_HISTOGRAM} metrics if needed. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java index ad0a52b2a9d8c6..4df827338b327d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java @@ -849,7 +849,8 @@ private boolean shouldOverridePartitionMap(GridDhtPartitionMap currentMap, GridD consistencyCheck(); - this.lostParts = lostParts == null ? null : new TreeSet<>(lostParts); + if (exchangeVer != null) + this.lostParts = lostParts == null ? null : new TreeSet<>(lostParts); if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java index 1c7fa8151eeaad..4b4f194b4cd044 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java @@ -586,13 +586,13 @@ private void addResult(KeyCacheObject key, CacheObject v, GridCacheVersion ver) add(new GridFinishedFuture<>(Collections.singletonMap(key0, val0))); } else { - K key0 = (K)cctx.unwrapBinaryIfNeeded(key, !deserializeBinary, false); + K key0 = (K)cctx.unwrapBinaryIfNeeded(key, !deserializeBinary, false, null); V val0 = needVer ? (V)new EntryGetResult(!skipVals ? - (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false) : + (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false, null) : (V)Boolean.TRUE, ver) : !skipVals ? - (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false) : + (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false, null) : (V)Boolean.TRUE; add(new GridFinishedFuture<>(Collections.singletonMap(key0, val0))); @@ -676,7 +676,8 @@ private Map loadEntries( false, needVer ? info.version() : null, 0, - 0); + 0, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java index 73b3aff5b65e7c..b88f74c722e356 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java @@ -843,30 +843,6 @@ void map() { topVer = tx.topologyVersionSnapshot(); if (topVer != null) { - for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) { - if (fut.exchangeDone() && fut.topologyVersion().equals(topVer)) { - Throwable err = null; - - // Before cache validation, make sure that this topology future is already completed. - try { - fut.get(); - } - catch (IgniteCheckedException e) { - err = fut.error(); - } - - err = (err == null) ? fut.validateCache(cctx, recovery, read, null, keys) : err; - - if (err != null) { - onDone(err); - - return; - } - - break; - } - } - // Continue mapping on the same topology version as it was before. if (this.topVer == null) this.topVer = topVer; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java index fc239da9c5f0a0..8a25f86154e690 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java @@ -31,6 +31,7 @@ import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; +import org.apache.ignite.internal.processors.cache.CacheInvalidStateException; import org.apache.ignite.internal.processors.cache.GridCacheCompoundIdentityFuture; import org.apache.ignite.internal.processors.cache.GridCacheFuture; import org.apache.ignite.internal.processors.cache.GridCacheReturn; @@ -58,11 +59,16 @@ import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.transactions.TransactionRollbackException; +import static java.util.Collections.emptySet; +import static java.util.stream.Stream.concat; +import static java.util.stream.Stream.of; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_ASYNC; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings; import static org.apache.ignite.internal.processors.tracing.MTC.support; import static org.apache.ignite.internal.processors.tracing.SpanType.TX_NEAR_FINISH; +import static org.apache.ignite.transactions.TransactionState.COMMITTED; +import static org.apache.ignite.transactions.TransactionState.COMMITTING; import static org.apache.ignite.transactions.TransactionState.UNKNOWN; /** @@ -73,6 +79,10 @@ public final class GridNearTxFinishFuture extends GridCacheCompoundIdentit /** */ private static final long serialVersionUID = 0L; + /** All owners left grid message. */ + public static final String ALL_PARTITION_OWNERS_LEFT_GRID_MSG = + "Failed to commit a transaction (all partition owners have left the grid, partition data has been lost)"; + /** Tracing span. */ private Span span; @@ -979,6 +989,19 @@ public GridDistributedTxMapping mapping() { /** {@inheritDoc} */ @Override boolean onNodeLeft(UUID nodeId, boolean discoThread) { + if (tx.state() == COMMITTING || tx.state() == COMMITTED) { + if (concat(of(m.primary().id()), tx.transactionNodes().getOrDefault(nodeId, emptySet()).stream()) + .noneMatch(uuid -> cctx.discovery().alive(uuid))) { + onDone(new CacheInvalidStateException(ALL_PARTITION_OWNERS_LEFT_GRID_MSG + + m.entries().stream().map(e -> " [cacheName=" + e.cached().context().name() + + ", partition=" + e.key().partition() + + (S.includeSensitive() ? ", key=" + e.key() : "") + + "]").findFirst().orElse(""))); + + return true; + } + } + if (nodeId.equals(m.primary().id())) { if (msgLog.isDebugEnabled()) { msgLog.debug("Near finish fut, mini future node left [txId=" + tx.nearXidVersion() + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java index 8b456773321114..f1f99d37e507cf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java @@ -1493,7 +1493,13 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx, entryProcessor != null ? TRANSFORM : old != null ? UPDATE : CREATE; if (old != null && hasFilters && !filter(entry.context(), cacheKey, old, filter)) { - ret.set(cacheCtx, old, false, keepBinary); + ret.set( + cacheCtx, + old, + false, + keepBinary, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId) + ); if (!readCommitted()) { if (optimistic() && serializable()) { @@ -1581,7 +1587,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx, assert txEntry.op() != TRANSFORM : txEntry; if (retval) - ret.set(cacheCtx, null, true, keepBinary); + ret.set(cacheCtx, null, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); else ret.success(true); } @@ -1594,7 +1600,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx, } if (retval && !transform) - ret.set(cacheCtx, old, true, keepBinary); + ret.set(cacheCtx, old, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); else { if (txEntry.op() == TRANSFORM) { GridCacheVersion ver; @@ -1622,7 +1628,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx, // Pessimistic. else { if (retval && !transform) - ret.set(cacheCtx, old, true, keepBinary); + ret.set(cacheCtx, old, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); else ret.success(true); } @@ -1648,7 +1654,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx, if (!del) { if (hasFilters && !filter(entry.context(), cacheKey, v, filter)) { - ret.set(cacheCtx, v, false, keepBinary); + ret.set(cacheCtx, v, false, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); return loadMissed; } @@ -1702,7 +1708,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx, txEntry.markValid(); if (retval && !transform) - ret.set(cacheCtx, v, true, keepBinary); + ret.set(cacheCtx, v, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); else ret.success(true); } @@ -1924,7 +1930,7 @@ private IgniteInternalFuture removeAllAsync0( try { txFut.get(); - return new GridCacheReturn(cacheCtx, true, keepBinary, + return new GridCacheReturn(cacheCtx, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId), implicitRes.value(), implicitRes.success()); } catch (IgniteCheckedException | RuntimeException e) { @@ -2147,7 +2153,7 @@ private IgniteInternalFuture updateAsync(GridCacheContext cache val = cacheCtx.unwrapInvokeResult((Map)val, keepBinary); } - return new GridCacheReturn(cacheCtx, true, keepBinary, val, futRes.success()); + return new GridCacheReturn(cacheCtx, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId), val, futRes.success()); } })); } @@ -2313,7 +2319,7 @@ public IgniteInternalFuture> getAllAsync( K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, - true)); + true, null)); if (retMap.containsKey(keyVal)) // We already have a return value. @@ -2391,7 +2397,8 @@ public IgniteInternalFuture> getAllAsync( readVer, 0, 0, - needVer); + needVer, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); if (readVer != null) txEntry.entryReadVersion(readVer); @@ -2480,7 +2487,8 @@ public IgniteInternalFuture> getAllAsync( getRes.version(), 0, 0, - needVer); + needVer, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } return Collections.emptyMap(); @@ -2541,7 +2549,7 @@ public IgniteInternalFuture> getAllAsync( K keyVal = (K)(keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext() - .unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false)); + .unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false, null)); if (retMap.containsKey(keyVal)) it.remove(); @@ -2685,7 +2693,7 @@ private Collection enlistRead( } cacheCtx.addResult(map, key, val, skipVals, keepCacheObjects, deserializeBinary, false, - ver, 0, 0); + ver, 0, 0, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } } else { @@ -2751,7 +2759,8 @@ private Collection enlistRead( readVer, 0, 0, - needVer); + needVer, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } else missed.put(key, txEntry.cached().version()); @@ -2831,7 +2840,8 @@ private Collection enlistRead( readVer, 0, 0, - needVer); + needVer, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } else missed.put(key, ver); @@ -2944,7 +2954,7 @@ private IgniteInternalFuture loadMissing( assert !hasFilters && !retval; assert val == null || Boolean.TRUE.equals(val) : val; - ret.set(cacheCtx, null, val != null, keepBinary); + ret.set(cacheCtx, null, val != null, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } else { CacheObject cacheVal = cacheCtx.toCacheObject(val); @@ -2983,7 +2993,7 @@ private IgniteInternalFuture loadMissing( else success = true; - ret.set(cacheCtx, cacheVal, success, keepBinary); + ret.set(cacheCtx, cacheVal, success, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } } } @@ -3044,7 +3054,7 @@ private IgniteInternalFuture optimisticPutFuture( res = cacheCtx.unwrapInvokeResult((Map)res, keepBinary); } - return new GridCacheReturn(cacheCtx, true, keepBinary, res, implicitRes.success()); + return new GridCacheReturn(cacheCtx, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId), res, implicitRes.success()); } catch (IgniteCheckedException | RuntimeException e) { if (!(e instanceof NodeStoppingException)) @@ -4077,6 +4087,9 @@ public IgniteInternalFuture prepareNearTxLocal() { mapExplicitLocks(); + if (cctx.kernalContext().deploy().enabled() && deploymentLdrId != null) + U.restoreDeploymentContext(cctx.kernalContext(), deploymentLdrId); + fut.prepare(); return fut; @@ -4950,7 +4963,8 @@ private IgniteInternalFuture> checkMissed( false, needVer ? loadVer : null, 0, - 0); + 0, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } } else { @@ -4974,7 +4988,8 @@ private IgniteInternalFuture> checkMissed( false, needVer ? loadVer : null, 0, - 0); + 0, + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java index e04ea2704b42b6..63638e103f496b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java @@ -138,7 +138,8 @@ public IgniteInternalFuture single() { getRes.version(), 0, 0, - needVer); + needVer, + null); } if (skipVals) { @@ -179,7 +180,8 @@ public IgniteInternalFuture> multi() { getRes.version(), 0, 0, - needVer); + needVer, + null); } return map; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java index f546fd76ec086d..21c748826421b6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java @@ -132,7 +132,8 @@ private void recordConsistencyViolation(Map fixe false, null, 0, - 0); + 0, + null); } Map> originalMap = new HashMap<>(); @@ -162,7 +163,8 @@ private void recordConsistencyViolation(Map fixe false, null, 0, - 0); + 0, + null); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java index 6b2128de70815d..cde847ba3207c5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java @@ -54,6 +54,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheReturn; import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy; import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.LockedEntriesInfo; import org.apache.ignite.internal.processors.cache.local.GridLocalCache; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx; @@ -90,6 +91,9 @@ public class GridLocalAtomicCache extends GridLocalCache { /** */ private GridCachePreloader preldr; + /** Locked entries info for each thread. */ + private final LockedEntriesInfo lockedEntriesInfo = new LockedEntriesInfo(); + /** * Empty constructor required by {@link Externalizable}. */ @@ -424,7 +428,8 @@ private Map getAllInternal(@Nullable Collection keys, row.version(), 0, 0, - needVer); + needVer, + null); if (ctx.statisticsEnabled() && !skipVals) metrics0().onRead(true); @@ -506,7 +511,8 @@ private Map getAllInternal(@Nullable Collection keys, true, null, 0, - 0); + 0, + null); } else success = false; @@ -1007,8 +1013,14 @@ else if (res == null) if (err != null) throw err; - Object ret = res == null ? null : rawRetval ? new GridCacheReturn(ctx, true, keepBinary, res.get2(), res.get1()) : - (retval || op == TRANSFORM) ? res.get2() : res.get1(); + Object ret = res == null ? null : rawRetval ? new GridCacheReturn( + ctx, + true, + keepBinary, + U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())), + res.get2(), + res.get1() + ) : (retval || op == TRANSFORM) ? res.get2() : res.get1(); if (op == TRANSFORM && ret == null) ret = Collections.emptyMap(); @@ -1476,11 +1488,13 @@ else if (op == UPDATE) { * @return Collection of locked entries. */ private List lockEntries(Collection keys) { - List locked = new ArrayList<>(keys.size()); + GridCacheEntryEx[] locked = new GridCacheEntryEx[keys.size()]; boolean nullKeys = false; while (true) { + int i = 0; + for (K key : keys) { if (key == null) { nullKeys = true; @@ -1490,40 +1504,24 @@ private List lockEntries(Collection keys) { GridCacheEntryEx entry = entryEx(ctx.toCacheKeyObject(key)); - locked.add(entry); + locked[i++] = entry; } if (nullKeys) break; - for (int i = 0; i < locked.size(); i++) { - GridCacheEntryEx entry = locked.get(i); - - entry.lockEntry(); - - if (entry.obsolete()) { - // Unlock all locked. - for (int j = 0; j <= i; j++) - locked.get(j).unlockEntry(); - - // Clear entries. - locked.clear(); - - // Retry. - break; - } - } - - if (!locked.isEmpty()) - return locked; + if (lockedEntriesInfo.tryLockEntries(locked)) + return Arrays.asList(locked); } assert nullKeys; AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion(); - for (GridCacheEntryEx entry : locked) - entry.touch(); + for (GridCacheEntryEx entry : locked) { + if (entry != null) + entry.touch(); + } throw new NullPointerException("Null key."); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java index 472c6815fdba9c..9fbf83c35473ab 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java @@ -34,7 +34,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -405,7 +404,7 @@ private void txLogPageStoreInit(IgniteCacheDatabaseSharedManager mgr) throws Ign //noinspection ConstantConditions ctx.cache().context().pageStore().initialize(TX_LOG_CACHE_ID, 0, - TX_LOG_CACHE_NAME, mgr.dataRegion(TX_LOG_CACHE_NAME).memoryMetrics().totalAllocatedPages()); + TX_LOG_CACHE_NAME, mgr.dataRegion(TX_LOG_CACHE_NAME).memoryMetrics().totalAllocatedPages()::add); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java index a41790f8bd44a3..8cf61a8ccec82c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java @@ -33,6 +33,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; import org.apache.ignite.internal.processors.cache.CacheDiagnosticManager; import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; @@ -41,6 +42,7 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIOV2; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseListImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -101,6 +103,8 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { PageLockListener txLogLockLsnr = diagnosticMgr.pageLockTracker().createPageLockTracker(txLogName); + DataRegion txLogDataRegion = mgr.dataRegion(TX_LOG_CACHE_NAME); + if (CU.isPersistenceEnabled(ctx.config())) { String txLogReuseListName = TX_LOG_CACHE_NAME + "##ReuseList"; PageLockListener txLogReuseListLockLsnr = diagnosticMgr.pageLockTracker().createPageLockTracker(txLogReuseListName); @@ -109,9 +113,9 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { try { IgniteWriteAheadLogManager wal = ctx.cache().context().wal(); - PageMemoryEx pageMemory = (PageMemoryEx)mgr.dataRegion(TX_LOG_CACHE_NAME).pageMemory(); + PageMemoryEx pageMemory = (PageMemoryEx)txLogDataRegion.pageMemory(); - long metaId = pageMemory.metaPageId(TX_LOG_CACHE_ID); + long metaId = PageMemory.META_PAGE_ID; long metaPage = pageMemory.acquirePage(TX_LOG_CACHE_ID, metaId); long treeRoot, reuseListRoot; @@ -124,7 +128,7 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { try { if (PageIO.getType(pageAddr) != PageIO.T_META) { // Initialize new page. - PageMetaIO io = PageMetaIO.VERSIONS.latest(); + PageMetaIO io = PageMetaIOV2.VERSIONS.latest(); io.initNewPage(pageAddr, metaId, pageMemory.pageSize()); @@ -180,7 +184,8 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { isNew, txLogReuseListLockLsnr, ctx, - null + null, + FLAG_IDX ); tree = new TxLogTree( @@ -194,14 +199,14 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { txLogLockLsnr ); - ((GridCacheDatabaseSharedManager)mgr).addCheckpointListener(this); + ((GridCacheDatabaseSharedManager)mgr).addCheckpointListener(this, txLogDataRegion); } finally { mgr.checkpointReadUnlock(); } } else { - PageMemory pageMemory = mgr.dataRegion(TX_LOG_CACHE_NAME).pageMemory(); + PageMemory pageMemory = txLogDataRegion.pageMemory(); ReuseList reuseList1 = mgr.reuseList(TX_LOG_CACHE_NAME); long treeRoot; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java index c8509283976f08..a4df8bd0e888f8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java @@ -19,6 +19,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; @@ -61,6 +62,7 @@ public TxLogTree( reuseList, TxLogInnerIO.VERSIONS, TxLogLeafIO.VERSIONS, + PageIdAllocator.FLAG_IDX, failureProcessor, lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckCorruptedCacheStoresCleanAction.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckCorruptedCacheStoresCleanAction.java new file mode 100644 index 00000000000000..2073b9bc3ebeba --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckCorruptedCacheStoresCleanAction.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.io.File; + +import org.apache.ignite.maintenance.MaintenanceAction; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.CACHE_DATA_FILENAME; + +/** */ +public class CheckCorruptedCacheStoresCleanAction implements MaintenanceAction { + /** */ + public static final String ACTION_NAME = "check_cache_files_cleaned"; + + /** */ + private final File rootStoreDir; + + /** */ + private final String[] cacheStoreDirs; + + /** */ + public CheckCorruptedCacheStoresCleanAction(File rootStoreDir, String[] cacheStoreDirs) { + this.rootStoreDir = rootStoreDir; + this.cacheStoreDirs = cacheStoreDirs; + } + + /** {@inheritDoc} */ + @Override public Boolean execute() { + for (String cacheStoreDirName : cacheStoreDirs) { + File cacheStoreDir = new File(rootStoreDir, cacheStoreDirName); + + if (cacheStoreDir.exists() && cacheStoreDir.isDirectory()) { + for (File f : cacheStoreDir.listFiles()) { + if (!f.getName().equals(CACHE_DATA_FILENAME)) + return Boolean.FALSE; + } + } + } + + return Boolean.TRUE; + } + + /** {@inheritDoc} */ + @Override public @NotNull String name() { + return ACTION_NAME; + } + + /** {@inheritDoc} */ + @Override public @Nullable String description() { + return "Checks if all corrupted data files are cleaned from cache store directories"; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java index 52a8f6f45f15af..7dfbd5c6cd058c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java @@ -20,7 +20,6 @@ import java.io.File; import java.util.Arrays; import java.util.List; - import org.apache.ignite.maintenance.MaintenanceAction; import org.apache.ignite.maintenance.MaintenanceWorkflowCallback; import org.jetbrains.annotations.NotNull; @@ -68,12 +67,14 @@ public CorruptedPdsMaintenanceCallback(@NotNull File workDir, } /** {@inheritDoc} */ - @Override public List allActions() { - return Arrays.asList(new CleanCacheStoresMaintenanceAction(workDir, cacheStoreDirs.toArray(new String[0]))); + @Override public List> allActions() { + return Arrays.asList( + new CleanCacheStoresMaintenanceAction(workDir, cacheStoreDirs.toArray(new String[0])), + new CheckCorruptedCacheStoresCleanAction(workDir, cacheStoreDirs.toArray(new String[0]))); } /** {@inheritDoc} */ - @Override public MaintenanceAction automaticAction() { + @Override public MaintenanceAction automaticAction() { return null; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java index 059eb88b099c94..80006d8a1c2e22 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java @@ -62,6 +62,9 @@ public class DataStorageMetricsImpl implements DataStorageMetricsMXBean { /** */ private final AtomicLongMetric lastCpDuration; + /** */ + private final AtomicLongMetric lastCpStart; + /** */ private final AtomicLongMetric lastCpFsyncDuration; @@ -163,6 +166,9 @@ public DataStorageMetricsImpl( lastCpDuration = mreg.longMetric("LastCheckpointDuration", "Duration of the last checkpoint in milliseconds."); + lastCpStart = mreg.longMetric("LastCheckpointStart", + "Start timestamp of the last checkpoint."); + lastCpFsyncDuration = mreg.longMetric("LastCheckpointFsyncDuration", "Duration of the sync phase of the last checkpoint in milliseconds."); @@ -250,6 +256,14 @@ public DataStorageMetricsImpl( return lastCpDuration.value(); } + /** {@inheritDoc} */ + @Override public long getLastCheckpointStarted() { + if (!metricsEnabled) + return 0; + + return lastCpStart.value(); + } + /** {@inheritDoc} */ @Override public long getLastCheckpointLockWaitDuration() { if (!metricsEnabled) @@ -597,6 +611,7 @@ public void onCheckpoint( long pagesWriteDuration, long fsyncDuration, long duration, + long start, long totalPages, long dataPages, long cowPages @@ -607,6 +622,7 @@ public void onCheckpoint( lastCpPagesWriteDuration.value(pagesWriteDuration); lastCpFsyncDuration.value(fsyncDuration); lastCpDuration.value(duration); + lastCpStart.value(start); lastCpTotalPages.value(totalPages); lastCpDataPages.value(dataPages); lastCpCowPages.value(cowPages); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java index e1aa7469b44eeb..ddb5705c1029b5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java @@ -43,6 +43,9 @@ public class DataStorageMetricsSnapshot implements DataStorageMetrics { /** */ private long lastCpDuration; + /** */ + private long lastCpStart; + /** */ private long lastCpLockWaitDuration; @@ -119,6 +122,7 @@ public DataStorageMetricsSnapshot(DataStorageMetrics metrics) { walFsyncTimeAvg = metrics.getWalFsyncTimeAverage(); walBuffPollSpinsNum = metrics.getWalBuffPollSpinsRate(); lastCpDuration = metrics.getLastCheckpointDuration(); + lastCpStart = metrics.getLastCheckpointStarted(); lastCpLockWaitDuration = metrics.getLastCheckpointLockWaitDuration(); lastCpMmarkDuration = metrics.getLastCheckpointMarkDuration(); lastCpPagesWriteDuration = metrics.getLastCheckpointPagesWriteDuration(); @@ -173,6 +177,11 @@ public DataStorageMetricsSnapshot(DataStorageMetrics metrics) { return lastCpDuration; } + /** {@inheritDoc} */ + @Override public long getLastCheckpointStarted() { + return lastCpStart; + } + /** {@inheritDoc} */ @Override public long getLastCheckpointLockWaitDuration() { return lastCpLockWaitDuration; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java index 300e98ac6237c7..8814d181f009e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java @@ -29,6 +29,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.RecycleRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.RotatedIdPartRecord; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -66,18 +67,29 @@ public abstract class DataStructure { /** */ protected ReuseList reuseList; + /** */ + protected final PageIoResolver pageIoRslvr; + + /** */ + protected final byte pageFlag; + /** * @param cacheGrpId Cache group ID. * @param grpName Cache group name. * @param pageMem Page memory. * @param wal Write ahead log manager. + * @param lockLsnr Page lock listener. + * @param pageIoRslvr Page IO resolver. + * @param pageFlag Default flag value for allocated pages. */ public DataStructure( int cacheGrpId, String grpName, PageMemory pageMem, IgniteWriteAheadLogManager wal, - PageLockListener lockLsnr + PageLockListener lockLsnr, + PageIoResolver pageIoRslvr, + byte pageFlag ) { assert pageMem != null; @@ -86,6 +98,8 @@ public DataStructure( this.pageMem = pageMem; this.wal = wal; this.lockLsnr = lockLsnr == null ? NOOP_LSNR : lockLsnr; + this.pageIoRslvr = pageIoRslvr; + this.pageFlag = pageFlag; } /** @@ -123,16 +137,30 @@ protected final long allocatePage(ReuseBag bag) throws IgniteCheckedException { * @throws IgniteCheckedException If failed. */ protected final long allocatePage(ReuseBag bag, boolean useRecycled) throws IgniteCheckedException { - long pageId = bag != null ? bag.pollFreePage() : 0; + long pageId = 0; + + if (useRecycled && reuseList != null) { + pageId = bag != null ? bag.pollFreePage() : 0; - if (pageId == 0 && useRecycled && reuseList != null) - pageId = reuseList.takeRecycledPage(); + if (pageId == 0) + pageId = reuseList.takeRecycledPage(); + + // Recycled. "pollFreePage" result should be reinitialized to move rotatedId to itemId. + if (pageId != 0) + pageId = reuseList.initRecycledPage(pageId, pageFlag, null); + } if (pageId == 0) pageId = allocatePageNoReuse(); assert pageId != 0; + assert PageIdUtils.flag(pageId) == FLAG_IDX && PageIdUtils.partId(pageId) == INDEX_PARTITION || + PageIdUtils.flag(pageId) != FLAG_IDX && PageIdUtils.partId(pageId) <= MAX_PARTITION_ID : + PageIdUtils.toDetailString(pageId); + + assert PageIdUtils.flag(pageId) != FLAG_DATA || PageIdUtils.itemId(pageId) == 0 : PageIdUtils.toDetailString(pageId); + return pageId; } @@ -152,7 +180,7 @@ protected long allocatePageNoReuse() throws IgniteCheckedException { */ protected final long acquirePage(long pageId, IoStatisticsHolder statHolder) throws IgniteCheckedException { assert PageIdUtils.flag(pageId) == FLAG_IDX && PageIdUtils.partId(pageId) == INDEX_PARTITION || - PageIdUtils.flag(pageId) == FLAG_DATA && PageIdUtils.partId(pageId) <= MAX_PARTITION_ID : + PageIdUtils.flag(pageId) != FLAG_IDX && PageIdUtils.partId(pageId) <= MAX_PARTITION_ID : U.hexLong(pageId) + " flag=" + PageIdUtils.flag(pageId) + " part=" + PageIdUtils.partId(pageId); return pageMem.acquirePage(grpId, pageId, statHolder); @@ -251,7 +279,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, lockLsnr, h, - null, null, null, null, intArg, lockFailed, statHolder); + null, null, null, null, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -272,7 +300,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, lockLsnr, h, - null, null, null, arg, intArg, lockFailed, statHolder); + null, null, null, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -295,7 +323,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, page, lockLsnr, h, - null, null, null, arg, intArg, lockFailed, statHolder); + null, null, null, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -318,7 +346,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, lockLsnr, h, - init, wal, null, arg, intArg, lockFailed, statHolder); + init, wal, null, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -339,7 +367,7 @@ protected final R read( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.readPage(pageMem, grpId, pageId, lockLsnr, - h, arg, intArg, lockFailed, statHolder); + h, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -362,7 +390,7 @@ protected final R read( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.readPage(pageMem, grpId, pageId, page, lockLsnr, h, - arg, intArg, lockFailed, statHolder); + arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -395,7 +423,7 @@ protected final long recyclePage( int rotatedIdPart = PageIO.getRotatedIdPart(pageAddr); if (rotatedIdPart != 0) { - recycled = PageIdUtils.link(pageId, rotatedIdPart > MAX_ITEMID_NUM ? 1 : rotatedIdPart); + recycled = PageIdUtils.link(pageId, rotatedIdPart); PageIO.setRotatedIdPart(pageAddr, 0); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 50224f38629b5f..2c366ebba3f6ce 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -50,8 +50,8 @@ import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.ToLongFunction; +import java.util.regex.Pattern; import java.util.stream.Collectors; - import org.apache.ignite.DataRegionMetricsProvider; import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCheckedException; @@ -85,12 +85,13 @@ import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; import org.apache.ignite.internal.pagemem.wal.record.DataEntry; import org.apache.ignite.internal.pagemem.wal.record.DataRecord; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord; import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord; import org.apache.ignite.internal.pagemem.wal.record.MvccDataEntry; import org.apache.ignite.internal.pagemem.wal.record.MvccTxRecord; import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; import org.apache.ignite.internal.pagemem.wal.record.RollbackRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.WalRecordCacheGroupAware; @@ -116,7 +117,11 @@ import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointProgress; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointStatus; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.Checkpointer; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.LightweightCheckpointManager; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.ReservationReason; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationPageReadWriteManager; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationWorkflowCallback; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore; @@ -125,6 +130,7 @@ import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; @@ -134,6 +140,7 @@ import org.apache.ignite.internal.processors.compress.CompressionProcessor; import org.apache.ignite.internal.processors.port.GridPortRecord; import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.GridCountDownCallback; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.StripedExecutor; @@ -142,6 +149,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiPredicate; @@ -159,6 +167,7 @@ import static java.util.Objects.nonNull; import static java.util.function.Function.identity; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD; import static org.apache.ignite.IgniteSystemProperties.IGNITE_PREFER_WAL_REBALANCE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_RECOVERY_SEMAPHORE_PERMITS; @@ -168,13 +177,17 @@ import static org.apache.ignite.internal.pagemem.PageIdUtils.partId; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.CHECKPOINT_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.METASTORE_DATA_RECORD; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.fromOrdinal; import static org.apache.ignite.internal.processors.cache.persistence.CheckpointState.FINISHED; import static org.apache.ignite.internal.processors.cache.persistence.CheckpointState.LOCK_RELEASED; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointReadWriteLock.CHECKPOINT_LOCK_HOLD_COUNT; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager.DEFRAGMENTATION_MNTC_TASK_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters.fromStore; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.CORRUPTED_DATA_FILES_MNTC_TASK_NAME; +import static org.apache.ignite.internal.util.IgniteUtils.GB; import static org.apache.ignite.internal.util.IgniteUtils.checkpointBufferSize; /** @@ -203,6 +216,12 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** Description of the system view for a {@link MetaStorage}. */ public static final String METASTORE_VIEW_DESC = "Local metastorage data"; + /** */ + public static final String DEFRAGMENTATION_PART_REGION_NAME = "defragPartitionsDataRegion"; + + /** */ + public static final String DEFRAGMENTATION_MAPPING_REGION_NAME = "defragMappingDataRegion"; + /** * Threshold to calculate limit for pages list on-heap caches. *

    @@ -219,6 +238,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** @see IgniteSystemProperties#IGNITE_PDS_WAL_REBALANCE_THRESHOLD */ public static final int DFLT_PDS_WAL_REBALANCE_THRESHOLD = 500; + /** @see IgniteSystemProperties#IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE */ + public static final int DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE = 60; + /** */ private final int walRebalanceThreshold = getInteger(IGNITE_PDS_WAL_REBALANCE_THRESHOLD, DFLT_PDS_WAL_REBALANCE_THRESHOLD); @@ -230,6 +252,10 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan private final String throttlingPolicyOverride = IgniteSystemProperties.getString( IgniteSystemProperties.IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED); + /** Defragmentation regions size percentage of configured ones. */ + private final int defragmentationRegionSizePercentageOfConfiguredSize = + getInteger(IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE, DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE); + /** */ private static final String MBEAN_NAME = "DataStorageMetrics"; @@ -314,6 +340,12 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** Lock for releasing history for preloading. */ private ReentrantLock releaseHistForPreloadingLock = new ReentrantLock(); + /** */ + private CachePartitionDefragmentationManager defrgMgr; + + /** Data regions which should be checkpointed. */ + protected final Set checkpointedDataRegions = new GridConcurrentHashSet<>(); + /** * @param ctx Kernal context. */ @@ -440,6 +472,32 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi return cfg; } + /** */ + private DataRegionConfiguration createDefragmentationDataRegionConfig(long regionSize) { + DataRegionConfiguration cfg = new DataRegionConfiguration(); + + cfg.setName(DEFRAGMENTATION_PART_REGION_NAME); + cfg.setInitialSize(regionSize); + cfg.setMaxSize(regionSize); + cfg.setPersistenceEnabled(true); + cfg.setLazyMemoryAllocation(false); + + return cfg; + } + + /** */ + private DataRegionConfiguration createDefragmentationMappingRegionConfig(long regionSize) { + DataRegionConfiguration cfg = new DataRegionConfiguration(); + + cfg.setName(DEFRAGMENTATION_MAPPING_REGION_NAME); + cfg.setInitialSize(regionSize); + cfg.setMaxSize(regionSize); + cfg.setPersistenceEnabled(true); + cfg.setLazyMemoryAllocation(false); + + return cfg; + } + /** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { super.start0(); @@ -468,7 +526,7 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi persistenceCfg, storeMgr, this::isCheckpointInapplicableForWalRebalance, - this::dataRegions, + this::checkpointedDataRegions, this::cacheGroupContexts, this::getPageMemoryForCacheGroup, resolveThrottlingPolicy(), @@ -490,6 +548,104 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi } } + /** {@inheritDoc} */ + @Override protected void initDataRegions(DataStorageConfiguration memCfg) throws IgniteCheckedException { + if (isDefragmentationScheduled() && !dataRegionsInitialized) { + //Region size configuration will be changed for defragmentation needs. + memCfg = configureDataRegionForDefragmentation(memCfg); + } + + super.initDataRegions(memCfg); + } + + /** + * Configure data regions: + *

    Size of configured cache data regions will be decreased in order of freeing space for

    + *

    defragmentation needs. * New defragmentation regions will be created which size would be based on freed space + * from previous step.

    + * + * @param memCfg Data storage configuration with data region configurations. + * @return New data storage configuration which contains data regions with changed size. + * @throws IgniteCheckedException If fail. + */ + private DataStorageConfiguration configureDataRegionForDefragmentation( + DataStorageConfiguration memCfg + ) throws IgniteCheckedException { + List regionConfs = new ArrayList<>(); + + DataStorageConfiguration dataConf = memCfg;//not do the changes in-place it's better to make the copy of memCfg. + + regionConfs.add(dataConf.getDefaultDataRegionConfiguration()); + + if (dataConf.getDataRegionConfigurations() != null) + regionConfs.addAll(Arrays.asList(dataConf.getDataRegionConfigurations())); + + long totalDefrRegionSize = 0; + long totalRegionsSize = 0; + + for (DataRegionConfiguration regionCfg : regionConfs) { + totalDefrRegionSize = Math.max( + totalDefrRegionSize, + (long)(regionCfg.getMaxSize() * 0.01 * defragmentationRegionSizePercentageOfConfiguredSize) + ); + + totalRegionsSize += regionCfg.getMaxSize(); + } + + double shrinkPercentage = 1d * (totalRegionsSize - totalDefrRegionSize) / totalRegionsSize; + + for (DataRegionConfiguration region : regionConfs) { + long newSize = (long)(region.getMaxSize() * shrinkPercentage); + long newInitSize = Math.min(region.getInitialSize(), newSize); + + log.info("Region size was reassigned by defragmentation reason: " + + "region = '" + region.getName() + "', " + + "oldInitialSize = '" + region.getInitialSize() + "', " + + "newInitialSize = '" + newInitSize + "', " + + "oldMaxSize = '" + region.getMaxSize() + "', " + + "newMaxSize = '" + newSize + ); + + region.setMaxSize(newSize); + region.setInitialSize(newInitSize); + region.setCheckpointPageBufferSize(0); + } + + long mappingRegionSize = Math.min(GB, (long)(totalDefrRegionSize * 0.1)); + + checkpointedDataRegions.remove( + addDataRegion( + memCfg, + createDefragmentationDataRegionConfig(totalDefrRegionSize - mappingRegionSize), + true, + new DefragmentationPageReadWriteManager(cctx.kernalContext(), "defrgPartitionsStore") + ) + ); + + checkpointedDataRegions.remove( + addDataRegion( + memCfg, + createDefragmentationMappingRegionConfig(mappingRegionSize), + true, + new DefragmentationPageReadWriteManager(cctx.kernalContext(), "defrgLinkMappingStore") + ) + ); + + return dataConf; + } + + /** + * @return {@code true} if maintenance mode is on and defragmentation task exists. + */ + private boolean isDefragmentationScheduled() { + return cctx.kernalContext().maintenanceRegistry().activeMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME) != null; + } + + /** */ + public Collection checkpointedDataRegions() { + return checkpointedDataRegions; + } + /** */ private Collection cacheGroupContexts() { return cctx.cache().cacheGroups(); @@ -591,6 +747,57 @@ private void releaseFileLock() { fileLockHolder.close(); } + /** */ + private void prepareCacheDefragmentation(List cacheGroupIds) throws IgniteCheckedException { + GridKernalContext kernalCtx = cctx.kernalContext(); + DataStorageConfiguration dsCfg = kernalCtx.config().getDataStorageConfiguration(); + + assert CU.isPersistenceEnabled(dsCfg); + + List regions = Arrays.asList( + dataRegion(DEFRAGMENTATION_MAPPING_REGION_NAME), + dataRegion(DEFRAGMENTATION_PART_REGION_NAME) + ); + + LightweightCheckpointManager lightCheckpointMgr = new LightweightCheckpointManager( + kernalCtx::log, + cctx.igniteInstanceName(), + "db-checkpoint-thread-defrag", + kernalCtx.workersRegistry(), + persistenceCfg, + () -> regions, + this::getPageMemoryForCacheGroup, + resolveThrottlingPolicy(), + snapshotMgr, + persistentStoreMetricsImpl(), + kernalCtx.longJvmPauseDetector(), + kernalCtx.failure(), + kernalCtx.cache() + ); + + lightCheckpointMgr.start(); + + defrgMgr = new CachePartitionDefragmentationManager( + cacheGroupIds, + cctx, + this, + (FilePageStoreManager)cctx.pageStore(), + checkpointManager, + lightCheckpointMgr, + persistenceCfg.getPageSize() + ); + } + + /** {@inheritDoc} */ + @Override public DataRegion addDataRegion(DataStorageConfiguration dataStorageCfg, DataRegionConfiguration dataRegionCfg, + boolean trackable, PageReadWriteManager pmPageMgr) throws IgniteCheckedException { + DataRegion region = super.addDataRegion(dataStorageCfg, dataRegionCfg, trackable, pmPageMgr); + + checkpointedDataRegions.add(region); + + return region; + } + /** */ private void readMetastore() throws IgniteCheckedException { try { @@ -614,6 +821,16 @@ private void readMetastore() throws IgniteCheckedException { registerSystemView(); notifyMetastorageReadyForRead(); + + cctx.kernalContext().maintenanceRegistry() + .registerWorkflowCallbackIfTaskExists( + DEFRAGMENTATION_MNTC_TASK_NAME, + task -> { + prepareCacheDefragmentation(fromStore(task).cacheGroupIds()); + + return new DefragmentationWorkflowCallback(cctx.kernalContext()::log, defrgMgr); + } + ); } finally { metaStorage = null; @@ -769,7 +986,7 @@ private void finishRecovery() throws IgniteCheckedException { walTail = CheckpointStatus.NULL_PTR.equals(status.endPtr) ? null : status.endPtr; } - cctx.wal().resumeLogging(walTail); + resumeWalLogging(); walTail = null; @@ -926,10 +1143,11 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi DataStorageConfiguration memCfg, DataRegionConfiguration plcCfg, DataRegionMetricsImpl memMetrics, - final boolean trackable + final boolean trackable, + PageReadWriteManager pmPageMgr ) { if (!plcCfg.isPersistenceEnabled()) - return super.createPageMemory(memProvider, memCfg, plcCfg, memMetrics, trackable); + return super.createPageMemory(memProvider, memCfg, plcCfg, memMetrics, trackable, pmPageMgr); memMetrics.persistenceEnabled(true); @@ -970,6 +1188,7 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi chpBufSize ), cctx, + pmPageMgr, memCfg.getPageSize(), (fullId, pageBuf, tag) -> { memMetrics.onPageWritten(); @@ -978,7 +1197,7 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi snapshotMgr.beforePageWrite(fullId); // Write page to disk. - storeMgr.write(fullId.groupId(), fullId.pageId(), pageBuf, tag); + pmPageMgr.write(fullId.groupId(), fullId.pageId(), pageBuf, tag, true); getCheckpointer().currentProgress().updateEvictedPages(1); }, @@ -1255,6 +1474,9 @@ private String cacheInfo(GridCacheContext cacheCtx) { grpIds.add(tup.get1().groupId()); + if (gctx.config().isEncryptionEnabled()) + cctx.kernalContext().encryption().onCacheGroupStop(gctx.groupId()); + pageMem.onCacheGroupDestroyed(tup.get1().groupId()); if (tup.get2()) @@ -1296,6 +1518,7 @@ private String cacheInfo(GridCacheContext cacheCtx) { /** * Gets the checkpoint read lock. While this lock is held, checkpoint thread will not acquireSnapshotWorker memory * state. + * * @throws IgniteException If failed. */ @Override public void checkpointReadLock() { @@ -1551,11 +1774,19 @@ public File checkpointDirectory() { return checkpointManager.checkpointDirectory(); } + /** + * @param lsnr Listener. + * @param dataRegion Data region for which listener is corresponded to. + */ + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + checkpointManager.addCheckpointListener(lsnr, dataRegion); + } + /** * @param lsnr Listener. */ public void addCheckpointListener(CheckpointListener lsnr) { - checkpointManager.addCheckpointListener(lsnr); + checkpointManager.addCheckpointListener(lsnr, null); } /** @@ -1590,7 +1821,7 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { mntcRegistry.registerWorkflowCallback(CORRUPTED_DATA_FILES_MNTC_TASK_NAME, new CorruptedPdsMaintenanceCallback(workDir, - Arrays.asList(mntcTask.parameters().split(File.separator))) + Arrays.asList(mntcTask.parameters().split(Pattern.quote(File.separator)))) ); return; @@ -1598,6 +1829,8 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { checkpointReadLock(); + RestoreLogicalState logicalState; + try { // Preform early regions startup before restoring state. initAndStartRegions(kctx.config().getDataStorageConfiguration()); @@ -1620,7 +1853,7 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { CheckpointStatus status = readCheckpointStatus(); - RestoreLogicalState logicalState = applyLogicalUpdates( + logicalState = applyLogicalUpdates( status, groupsWithEnabledWal(), logicalRecords(), @@ -1634,10 +1867,6 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { } startTimer.finishGlobalStage("Restore logical state"); - - walTail = tailPointer(logicalState); - - cctx.wal().onDeActivate(kctx); } catch (IgniteCheckedException e) { releaseFileLock(); @@ -1647,6 +1876,15 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { finally { checkpointReadUnlock(); } + + walTail = tailPointer(logicalState); + + cctx.wal().onDeActivate(kctx); + } + + /** */ + public void resumeWalLogging() throws IgniteCheckedException { + cctx.wal().resumeLogging(walTail); } /** @@ -2347,6 +2585,7 @@ private RestoreLogicalState applyLogicalUpdates( case MVCC_DATA_RECORD: case DATA_RECORD: case ENCRYPTED_DATA_RECORD: + case ENCRYPTED_DATA_RECORD_V2: DataRecord dataRec = (DataRecord)rec; for (DataEntry dataEntry : dataRec.writeEntries()) { @@ -2427,8 +2666,13 @@ private RestoreLogicalState applyLogicalUpdates( break; - case MASTER_KEY_CHANGE_RECORD: - cctx.kernalContext().encryption().applyKeys((MasterKeyChangeRecord)rec); + case MASTER_KEY_CHANGE_RECORD_V2: + cctx.kernalContext().encryption().applyKeys((MasterKeyChangeRecordV2)rec); + + break; + + case REENCRYPTION_START_RECORD: + cctx.kernalContext().encryption().applyReencryptionStartRecord((ReencryptionStartRecord)rec); break; @@ -3003,7 +3247,8 @@ else if (key.startsWith(WAL_GLOBAL_KEY_PREFIX)) * @param log Logger. * @throws IgniteCheckedException If failed. */ - private static void dumpPartitionsInfo(GridCacheSharedContext cctx, IgniteLogger log) throws IgniteCheckedException { + private static void dumpPartitionsInfo(GridCacheSharedContext cctx, + IgniteLogger log) throws IgniteCheckedException { for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal() || !grp.persistenceEnabled()) continue; @@ -3125,7 +3370,8 @@ private IgnitePredicate groupsWithEnabledWal() { * @return WAL records predicate that passes only Metastorage and encryption data records. */ private IgniteBiPredicate onlyMetastorageAndEncryptionRecords() { - return (type, ptr) -> type == METASTORE_DATA_RECORD || type == MASTER_KEY_CHANGE_RECORD; + return (type, ptr) -> type == METASTORE_DATA_RECORD || + type == MASTER_KEY_CHANGE_RECORD || type == MASTER_KEY_CHANGE_RECORD_V2; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 1fba65daf224a3..50c3039d76edd7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -40,6 +40,8 @@ import org.apache.ignite.SystemProperty; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; @@ -55,7 +57,8 @@ import org.apache.ignite.internal.pagemem.wal.record.RollbackRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; -import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateIndexDataRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV3; import org.apache.ignite.internal.pagemem.wal.record.delta.PartitionDestroyRecord; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheDiagnosticManager; @@ -65,6 +68,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryEx; import org.apache.ignite.internal.processors.cache.GridCacheMvccEntryInfo; +import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.GridCacheTtlManager; import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManagerImpl; import org.apache.ignite.internal.processors.cache.KeyCacheObject; @@ -88,11 +92,13 @@ import org.apache.ignite.internal.processors.cache.persistence.partstate.PartitionAllocationMap; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorage; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorageImpl; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIOV2; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionCountersIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; -import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV2; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV3; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseListImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -106,6 +112,7 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner; import org.apache.ignite.internal.util.GridLongList; +import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.lang.IgniteInClosure2X; import org.apache.ignite.internal.util.lang.IgnitePredicateX; @@ -199,7 +206,8 @@ public class GridCacheOffheapManager extends IgniteCacheOffheapManagerImpl imple reuseListRoot.isAllocated(), diagnosticMgr.pageLockTracker().createPageLockTracker(reuseListName), ctx.kernalContext(), - pageListCacheLimit + pageListCacheLimit, + PageIdAllocator.FLAG_IDX ); RootPage metastoreRoot = metas.treeRoot; @@ -221,7 +229,7 @@ public class GridCacheOffheapManager extends IgniteCacheOffheapManagerImpl imple persStoreMetrics = databaseSharedManager.persistentStoreMetricsImpl(); - databaseSharedManager.addCheckpointListener(this); + databaseSharedManager.addCheckpointListener(this, grp.dataRegion()); } /** @@ -239,7 +247,7 @@ public IndexStorage getIndexStorage() { boolean exists = ctx.pageStore() != null && ctx.pageStore().exists(grp.groupId(), p); - return new GridCacheDataStore(p, exists); + return createGridCacheDataStore(grp, p, exists, log); } /** {@inheritDoc} */ @@ -362,6 +370,9 @@ private void syncMetadata(Context ctx, Executor execSvc, boolean needSnapshot) t } }); } + + if (grp.config().isEncryptionEnabled()) + saveIndexReencryptionStatus(grp.groupId()); } /** @@ -389,8 +400,10 @@ private void saveStoreMetadata( PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); IgniteWriteAheadLogManager wal = this.ctx.wal(); + GridEncryptionManager encMgr = this.ctx.kernalContext().encryption(); - if (size > 0 || updCntr > 0 || !store.partUpdateCounter().sequential()) { + if (size > 0 || updCntr > 0 || !store.partUpdateCounter().sequential() || + (grp.config().isEncryptionEnabled() && encMgr.getEncryptionState(grp.groupId(), store.partId()) > 0)) { GridDhtPartitionState state = null; // localPartition will not acquire writeLock here because create=false. @@ -429,7 +442,7 @@ private void saveStoreMetadata( boolean changed = false; try { - PagePartitionMetaIOV2 io = PageIO.getPageIO(partMetaPageAddr); + PagePartitionMetaIOV3 io = PageIO.getPageIO(partMetaPageAddr); long link = io.getGapsLink(partMetaPageAddr); @@ -474,6 +487,27 @@ else if (updCntrsBytes != null && link != 0) { changed |= io.setGlobalRemoveId(partMetaPageAddr, rmvId); changed |= io.setSize(partMetaPageAddr, size); + int encryptIdx = 0; + int encryptCnt = 0; + + if (grp.config().isEncryptionEnabled()) { + long reencryptState = encMgr.getEncryptionState(grpId, store.partId()); + + if (reencryptState != 0) { + encryptIdx = ReencryptStateUtils.pageIndex(reencryptState); + encryptCnt = ReencryptStateUtils.pageCount(reencryptState); + + if (encryptIdx == encryptCnt) { + encMgr.setEncryptionState(grp, store.partId(), 0, 0); + + encryptIdx = encryptCnt = 0; + } + + changed |= io.setEncryptedPageIndex(partMetaPageAddr, encryptIdx); + changed |= io.setEncryptedPageCount(partMetaPageAddr, encryptCnt); + } + } + if (state != null) changed |= io.setPartitionState(partMetaPageAddr, (byte)state.ordinal()); else @@ -541,7 +575,7 @@ else if (state == MOVING || state == RENTING) { pageCnt = io.getCandidatePageCount(partMetaPageAddr); if (changed && PageHandler.isWalDeltaRecordNeeded(pageMem, grpId, partMetaId, partMetaPage, wal, null)) - wal.log(new MetaPageUpdatePartitionDataRecordV2( + wal.log(new MetaPageUpdatePartitionDataRecordV3( grpId, partMetaId, updCntr, @@ -550,7 +584,9 @@ else if (state == MOVING || state == RENTING) { cntrsPageId, state == null ? -1 : (byte)state.ordinal(), pageCnt, - link + link, + encryptIdx, + encryptCnt )); } finally { @@ -734,7 +770,7 @@ private GridDhtLocalPartition getPartition(CacheDataStore store) { * return null if counter page does not exist. * @throws IgniteCheckedException If page memory operation failed. */ - @Nullable private static Map readSharedGroupCacheSizes(PageSupport pageMem, int grpId, + @Nullable public static Map readSharedGroupCacheSizes(PageSupport pageMem, int grpId, long cntrsPageId) throws IgniteCheckedException { if (cntrsPageId == 0L) @@ -785,7 +821,7 @@ private GridDhtLocalPartition getPartition(CacheDataStore store) { * @return new counter page Id. Same as {@code cntrsPageId} or new value if cache size pages were initialized. * @throws IgniteCheckedException if page memory operation failed. */ - private static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, + public static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, long cntrsPageId, int partId, Map sizes) throws IgniteCheckedException { byte[] data = PagePartitionCountersIO.VERSIONS.latest().serializeCacheSizes(sizes); @@ -793,7 +829,7 @@ private static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, boolean init = cntrsPageId == 0; if (init && !sizes.isEmpty()) - cntrsPageId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + cntrsPageId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); long nextId = cntrsPageId; int written = 0; @@ -824,7 +860,7 @@ private static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, if (written != items && (init = nextId == 0)) { //allocate new counters page - nextId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + nextId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); partCntrIo.setNextCountersPageId(curAddr, nextId); } } @@ -848,7 +884,7 @@ private void addPartitions(Context ctx) throws IgniteCheckedException { int grpId = grp.groupId(); PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); - long metaPageId = pageMem.metaPageId(grpId); + long metaPageId = PageMemory.META_PAGE_ID; long metaPage = pageMem.acquirePage(grpId, metaPageId); try { @@ -949,6 +985,9 @@ private static boolean addPartition( public void destroyPartitionStore(int grpId, int partId) throws IgniteCheckedException { PageMemoryEx pageMemory = (PageMemoryEx)grp.dataRegion().pageMemory(); + if (grp.config().isEncryptionEnabled()) + ctx.kernalContext().encryption().onDestroyPartitionStore(grp, partId); + int tag = pageMemory.invalidate(grp.groupId(), partId); if (grp.walEnabled()) @@ -1013,19 +1052,19 @@ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { IgniteWriteAheadLogManager wal = ctx.wal(); int grpId = grp.groupId(); - long metaId = pageMem.metaPageId(grpId); + long metaId = PageMemory.META_PAGE_ID; long metaPage = pageMem.acquirePage(grpId, metaId); try { final long pageAddr = pageMem.writeLock(grpId, metaId, metaPage); - boolean allocated = false; + boolean markDirty = false; try { long metastoreRoot, reuseListRoot; if (PageIO.getType(pageAddr) != PageIO.T_META) { - PageMetaIO pageIO = PageMetaIO.VERSIONS.latest(); + PageMetaIOV2 pageIO = (PageMetaIOV2)PageMetaIOV2.VERSIONS.latest(); pageIO.initNewPage(pageAddr, metaId, pageMem.realPageSize(grpId)); @@ -1048,25 +1087,38 @@ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { )); } - allocated = true; + markDirty = true; } else { - PageMetaIO pageIO = PageIO.getPageIO(pageAddr); + if (PageMetaIO.getVersion(pageAddr) < 2) { + ((PageMetaIOV2)PageMetaIOV2.VERSIONS.latest()).upgradePage(pageAddr); + + markDirty = true; + } + + PageMetaIOV2 pageIO = PageIO.getPageIO(pageAddr); metastoreRoot = pageIO.getTreeRoot(pageAddr); reuseListRoot = pageIO.getReuseListRoot(pageAddr); + int encrPageCnt = pageIO.getEncryptedPageCount(pageAddr); + + if (encrPageCnt > 0) { + ctx.kernalContext().encryption().setEncryptionState(grp, PageIdAllocator.INDEX_PARTITION, + pageIO.getEncryptedPageIndex(pageAddr), encrPageCnt); + } + assert reuseListRoot != 0L; } return new Metas( - new RootPage(new FullPageId(metastoreRoot, grpId), allocated), - new RootPage(new FullPageId(reuseListRoot, grpId), allocated), + new RootPage(new FullPageId(metastoreRoot, grpId), markDirty), + new RootPage(new FullPageId(reuseListRoot, grpId), markDirty), null, null); } finally { - pageMem.writeUnlock(grpId, metaId, metaPage, null, allocated); + pageMem.writeUnlock(grpId, metaId, metaPage, null, markDirty); } } finally { @@ -1143,7 +1195,7 @@ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { int cleared = 0; for (CacheDataStore store : cacheDataStores()) { - cleared += ((GridCacheDataStore)store).purgeExpired(cctx, c, amount - cleared); + cleared += ((GridCacheDataStore)store).purgeExpired(cctx, c, unwindThrottlingTimeout, amount - cleared); if (amount != -1 && cleared >= amount) return true; @@ -1192,7 +1244,7 @@ long freeSpace() { for (CacheDataStore store : partDataStores.values()) { assert store instanceof GridCacheDataStore; - AbstractFreeList freeList = ((GridCacheDataStore)store).freeList; + AbstractFreeList freeList = ((GridCacheDataStore)store).getCacheStoreFreeList(); if (freeList == null) continue; @@ -1214,7 +1266,7 @@ long emptyDataPages() { for (CacheDataStore store : partDataStores.values()) { assert store instanceof GridCacheDataStore; - AbstractFreeList freeList = ((GridCacheDataStore)store).freeList; + AbstractFreeList freeList = ((GridCacheDataStore)store).getCacheStoreFreeList(); if (freeList == null) continue; @@ -1255,6 +1307,71 @@ public void findAndCleanupLostIndexesForStoppedCache(int cacheId) throws IgniteC } } + /** + * @param grpId Cache group ID. + * @throws IgniteCheckedException If failed. + */ + private void saveIndexReencryptionStatus(int grpId) throws IgniteCheckedException { + long state = ctx.kernalContext().encryption().getEncryptionState(grpId, PageIdAllocator.INDEX_PARTITION); + + if (state == 0) + return; + + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); + + long metaPageId = PageIdAllocator.META_PAGE_ID; + long metaPage = pageMem.acquirePage(grpId, metaPageId); + + try { + boolean changed = false; + + long metaPageAddr = pageMem.writeLock(grpId, metaPageId, metaPage); + + try { + PageMetaIOV2 metaIo = PageMetaIO.getPageIO(metaPageAddr); + + int encryptIdx = ReencryptStateUtils.pageIndex(state); + int encryptCnt = ReencryptStateUtils.pageCount(state); + + if (encryptIdx == encryptCnt) { + ctx.kernalContext().encryption().setEncryptionState(grp, PageIdAllocator.INDEX_PARTITION, 0, 0); + + encryptIdx = encryptCnt = 0; + } + + changed |= metaIo.setEncryptedPageIndex(metaPageAddr, encryptIdx); + changed |= metaIo.setEncryptedPageCount(metaPageAddr, encryptCnt); + + IgniteWriteAheadLogManager wal = ctx.cache().context().wal(); + + if (changed && PageHandler.isWalDeltaRecordNeeded(pageMem, grpId, metaPageId, metaPage, wal, null)) + wal.log(new MetaPageUpdateIndexDataRecord(grpId, metaPageId, encryptIdx, encryptCnt)); + } + finally { + pageMem.writeUnlock(grpId, metaPageId, metaPage, null, changed); + } + } + finally { + pageMem.releasePage(grpId, metaPageId, metaPage); + } + } + + /** */ + public GridCacheDataStore createGridCacheDataStore( + CacheGroupContext grpCtx, + int partId, + boolean exists, + IgniteLogger log + ) { + return new GridCacheDataStore( + grpCtx, + partId, + exists, + busyLock, + log + ); + } + /** * */ @@ -1666,22 +1783,22 @@ private DataEntryRow(DataEntry entry) { /** * */ - private static class Metas { + static class Metas { /** */ @GridToStringInclude - private final RootPage reuseListRoot; + public final RootPage reuseListRoot; /** */ @GridToStringInclude - private final RootPage treeRoot; + public final RootPage treeRoot; /** */ @GridToStringInclude - private final RootPage pendingTreeRoot; + public final RootPage pendingTreeRoot; /** */ @GridToStringInclude - private final RootPage partMetastoreReuseListRoot; + public final RootPage partMetastoreReuseListRoot; /** * @param treeRoot Metadata storage root. @@ -1703,10 +1820,13 @@ private static class Metas { /** * */ - public class GridCacheDataStore implements CacheDataStore { + public static class GridCacheDataStore implements CacheDataStore { /** */ private final int partId; + /** */ + private final CacheGroupContext grp; + /** */ private volatile AbstractFreeList freeList; @@ -1734,19 +1854,38 @@ public class GridCacheDataStore implements CacheDataStore { /** */ private final boolean exists; + /** */ + private final GridSpinBusyLock busyLock; + + /** */ + private final IgniteLogger log; + /** */ private final AtomicBoolean init = new AtomicBoolean(); /** */ private final CountDownLatch latch = new CountDownLatch(1); + /** */ + private CacheDataTree dataTree; + /** * @param partId Partition. * @param exists {@code True} if store exists. */ - private GridCacheDataStore(int partId, boolean exists) { + public GridCacheDataStore(CacheGroupContext grp, int partId, boolean exists, + GridSpinBusyLock busyLock, + IgniteLogger log) { + this.grp = grp; this.partId = partId; this.exists = exists; + this.busyLock = busyLock; + this.log = log; + } + + /** */ + public AbstractFreeList getCacheStoreFreeList() { + return freeList; } /** @@ -1767,7 +1906,7 @@ private String partitionMetaStoreName() { * @return Name of data tree. */ private String dataTreeName() { - return grp.cacheOrGroupName() + "-" + treeName(partId); + return grp.cacheOrGroupName() + "-" + BPlusTree.treeName("p-" + partId, "CacheData"); } /** @@ -1795,6 +1934,10 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException return null; } + final GridCacheSharedContext ctx = grp.shared(); + + AtomicLong pageListCacheLimit = ((GridCacheDatabaseSharedManager) ctx.database()).pageListCacheLimitHolder(grp.dataRegion()); + if (init.compareAndSet(false, true)) { IgniteCacheDatabaseSharedManager dbMgr = ctx.database(); @@ -1807,7 +1950,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException PageIdUtils.partId(metas.treeRoot.pageId().pageId()) != partId || PageIdUtils.partId(metas.pendingTreeRoot.pageId().pageId()) != partId || PageIdUtils.partId(metas.partMetastoreReuseListRoot.pageId().pageId()) != partId - ) { + ) { throw new IgniteCheckedException("Invalid meta root allocated [" + "cacheOrGroupName=" + grp.cacheOrGroupName() + ", partId=" + partId + @@ -1828,13 +1971,14 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException reuseRoot.isAllocated(), ctx.diagnostic().pageLockTracker().createPageLockTracker(freeListName), ctx.kernalContext(), - pageListCacheLimit + pageListCacheLimit, + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; @@ -1853,13 +1997,14 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException partMetastoreReuseListRoot.isAllocated(), ctx.diagnostic().pageLockTracker().createPageLockTracker(partitionMetaStoreName), ctx.kernalContext(), - pageListCacheLimit + pageListCacheLimit, + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; @@ -1869,20 +2014,21 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException RootPage treeRoot = metas.treeRoot; - CacheDataTree dataTree = new CacheDataTree( + dataTree = new CacheDataTree( grp, dataTreeName, freeList, rowStore, treeRoot.pageId().pageId(), treeRoot.isAllocated(), - ctx.diagnostic().pageLockTracker().createPageLockTracker(dataTreeName) + ctx.diagnostic().pageLockTracker().createPageLockTracker(dataTreeName), + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; @@ -1897,19 +2043,29 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException pendingTreeRoot.pageId().pageId(), freeList, pendingTreeRoot.isAllocated(), - ctx.diagnostic().pageLockTracker().createPageLockTracker(pendingEntriesTreeName) + ctx.diagnostic().pageLockTracker().createPageLockTracker(pendingEntriesTreeName), + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; - PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx) grp.dataRegion().pageMemory(); + + int grpId = grp.groupId(); - delegate0 = new CacheDataStoreImpl(partId, rowStore, dataTree) { + delegate0 = new CacheDataStoreImpl(partId, + rowStore, + dataTree, + () -> pendingTree0, + grp, + busyLock, + log + ) { /** {@inheritDoc} */ @Override public PendingEntriesTree pendingTree() { return pendingTree0; @@ -1922,20 +2078,20 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException if (pageStoreMgr == null) return; - final int pages = pageStoreMgr.pages(grp.groupId(), partId); + final int pages = pageStoreMgr.pages(grpId, partId); - long pageId = pageMem.partitionMetaPageId(grp.groupId(), partId); + long pageId = pageMem.partitionMetaPageId(grpId, partId); // For each page sequentially pin/unpin. for (int pageNo = 0; pageNo < pages; pageId++, pageNo++) { long pagePointer = -1; try { - pagePointer = pageMem.acquirePage(grp.groupId(), pageId); + pagePointer = pageMem.acquirePage(grpId, pageId); } finally { if (pagePointer != -1) - pageMem.releasePage(grp.groupId(), pageId, pagePointer); + pageMem.releasePage(grpId, pageId, pagePointer); } } } @@ -1946,7 +2102,6 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException if (!pendingTree0.isEmpty()) grp.caches().forEach(cctx -> cctx.ttl().hasPendingEntries(true)); - int grpId = grp.groupId(); long partMetaId = pageMem.partitionMetaPageId(grpId, partId); long partMetaPage = pageMem.acquirePage(grpId, partMetaId); @@ -1955,7 +2110,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException try { if (PageIO.getType(pageAddr) != 0) { - PagePartitionMetaIOV2 io = (PagePartitionMetaIOV2)PagePartitionMetaIO.VERSIONS.latest(); + PagePartitionMetaIOV3 io = (PagePartitionMetaIOV3)PagePartitionMetaIO.VERSIONS.latest(); Map cacheSizes = null; @@ -1968,7 +2123,14 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException delegate0.restoreState(io.getSize(pageAddr), io.getUpdateCounter(pageAddr), cacheSizes, data); - globalRemoveId().setIfGreater(io.getGlobalRemoveId(pageAddr)); + int encrPageCnt = io.getEncryptedPageCount(pageAddr); + + if (encrPageCnt > 0) { + ctx.kernalContext().encryption().setEncryptionState( + grp, partId, io.getEncryptedPageIndex(pageAddr), encrPageCnt); + } + + grp.offheap().globalRemoveId().setIfGreater(io.getGlobalRemoveId(pageAddr)); } } finally { @@ -2012,7 +2174,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException */ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); - IgniteWriteAheadLogManager wal = ctx.wal(); + IgniteWriteAheadLogManager wal = grp.shared().wal(); int grpId = grp.groupId(); long partMetaId = pageMem.partitionMetaPageId(grpId, partId); @@ -2035,19 +2197,19 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { // Initialize new page. if (PageIO.getType(pageAddr) != PageIO.T_PART_META) { - PagePartitionMetaIOV2 io = (PagePartitionMetaIOV2)PagePartitionMetaIO.VERSIONS.latest(); + PagePartitionMetaIOV3 io = (PagePartitionMetaIOV3)PagePartitionMetaIO.VERSIONS.latest(); io.initNewPage(pageAddr, partMetaId, pageMem.realPageSize(grpId)); - treeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - reuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); + treeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + reuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); - assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(pendingTreeRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(partMetaStoreReuseListRoot) == PageMemory.FLAG_DATA; + assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(pendingTreeRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(partMetaStoreReuseListRoot) == PageMemory.FLAG_AUX; io.setTreeRoot(pageAddr, treeRoot); io.setReuseListRoot(pageAddr, reuseListRoot); @@ -2069,21 +2231,21 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { int pageVer = PagePartitionMetaIO.getVersion(pageAddr); - if (pageVer < 2) { - assert pageVer == 1; + if (pageVer < 3) { + assert pageVer == 1 || pageVer == 2; if (log.isDebugEnabled()) - log.info("Upgrade partition meta page version: [part=" + partId + + log.debug("Upgrade partition meta page version: [part=" + partId + ", grpId=" + grpId + ", oldVer=" + pageVer + ", newVer=" + io.getVersion() ); io = PagePartitionMetaIO.VERSIONS.latest(); - ((PagePartitionMetaIOV2)io).upgradePage(pageAddr); + ((PagePartitionMetaIOV3)io).upgradePage(pageAddr); - pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); + pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); io.setPendingTreeRoot(pageAddr, pendingTreeRoot); io.setPartitionMetaStoreReuseListRoot(pageAddr, partMetaStoreReuseListRoot); @@ -2101,7 +2263,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { partMetaStoreReuseListRoot = io.getPartitionMetaStoreReuseListRoot(pageAddr); if (partMetaStoreReuseListRoot == 0) { - partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); + partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); if (PageHandler.isWalDeltaRecordNeeded(pageMem, grpId, partMetaId, partMetaPage, wal, null)) { @@ -2113,19 +2275,23 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { } } - if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong tree root page id flag: treeRoot=" + U.hexLong(treeRoot) + ", part=" + partId + ", grpId=" + grpId); - if (PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong reuse list root page id flag: reuseListRoot=" + U.hexLong(reuseListRoot) + ", part=" + partId + ", grpId=" + grpId); - if (PageIdUtils.flag(pendingTreeRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(pendingTreeRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(pendingTreeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong pending tree root page id flag: reuseListRoot=" - + U.hexLong(reuseListRoot) + ", part=" + partId + ", grpId=" + grpId); + + U.hexLong(pendingTreeRoot) + ", part=" + partId + ", grpId=" + grpId); - if (PageIdUtils.flag(partMetaStoreReuseListRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(partMetaStoreReuseListRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(partMetaStoreReuseListRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong partition meta store list root page id flag: partMetaStoreReuseListRoot=" + U.hexLong(partMetaStoreReuseListRoot) + ", part=" + partId + ", grpId=" + grpId); } @@ -2146,6 +2312,11 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { } } + /** {@inheritDoc} */ + @Override public CacheDataTree tree() { + return dataTree; + } + /** {@inheritDoc} */ @Override public boolean init() { try { @@ -2391,7 +2562,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { long expireTime, @Nullable CacheDataRow oldRow ) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2521,7 +2692,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { GridCacheVersion ver, long expireTime, @Nullable CacheDataRow oldRow) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2554,7 +2725,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void invoke(GridCacheContext cctx, KeyCacheObject key, OffheapInvokeClosure c) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2564,7 +2735,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void remove(GridCacheContext cctx, KeyCacheObject key, int partId) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2724,7 +2895,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void clear(int cacheId) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate0 = init0(true); @@ -2775,6 +2946,7 @@ public long expiredSize() throws IgniteCheckedException { public int purgeExpired( GridCacheContext cctx, IgniteInClosure2X c, + long throttlingTimeout, int amount ) throws IgniteCheckedException { CacheDataStore delegate0 = init0(true); @@ -2792,7 +2964,7 @@ public int purgeExpired( if (cleared < amount) { lastThrottledCacheId = cctx.cacheId(); - nextStoreCleanTimeNanos = nowNanos + U.millisToNanos(unwindThrottlingTimeout); + nextStoreCleanTimeNanos = nowNanos + U.millisToNanos(throttlingTimeout); } return cleared; @@ -2934,7 +3106,8 @@ private int purgeExpiredInternal( } } - @Override public PartitionMetaStorage partStorage() { + /** */ + @Override public PartitionMetaStorage partStorage() { return partStorage; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 5f937342dc7a48..346b842585c5be 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -55,6 +55,7 @@ import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -75,6 +76,7 @@ import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; import org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer; @@ -103,6 +105,7 @@ import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_WAL_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog.TX_LOG_CACHE_NAME; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.METASTORE_DATA_REGION_NAME; +import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.VOLATILE_DATA_REGION_NAME; /** * @@ -144,7 +147,7 @@ public class IgniteCacheDatabaseSharedManager extends GridCacheSharedManagerAdap protected final Map memMetricsMap = new ConcurrentHashMap<>(); /** */ - private volatile boolean dataRegionsInitialized; + protected volatile boolean dataRegionsInitialized; /** */ private volatile boolean dataRegionsStarted; @@ -313,7 +316,8 @@ protected void initPageMemoryDataStructures(DataStorageConfiguration dbCfg) thro true, lsnr, cctx.kernalContext(), - null + null, + PageIdAllocator.FLAG_IDX ); freeListMap.put(memPlcCfg.getName(), freeList); @@ -363,6 +367,8 @@ protected void initDataRegions(DataStorageConfiguration memCfg) throws IgniteChe protected void initDataRegions0(DataStorageConfiguration memCfg) throws IgniteCheckedException { DataRegionConfiguration[] dataRegionCfgs = memCfg.getDataRegionConfigurations(); + boolean persistenceEnabled = CU.isPersistenceEnabled(memCfg); + if (dataRegionCfgs != null) { for (DataRegionConfiguration dataRegionCfg : dataRegionCfgs) addDataRegion(memCfg, dataRegionCfg, dataRegionCfg.isPersistenceEnabled()); @@ -379,9 +385,18 @@ protected void initDataRegions0(DataStorageConfiguration memCfg) throws IgniteCh createSystemDataRegion( memCfg.getSystemRegionInitialSize(), memCfg.getSystemRegionMaxSize(), - CU.isPersistenceEnabled(memCfg) + persistenceEnabled + ), + persistenceEnabled + ); + + addDataRegion( + memCfg, + createVolatileDataRegion( + memCfg.getSystemRegionInitialSize(), + memCfg.getSystemRegionMaxSize() ), - CU.isPersistenceEnabled(memCfg) + false ); for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) @@ -401,10 +416,25 @@ protected List getDatabaseListeners(GridKernalContext * @param dataRegionCfg Data region config. * @throws IgniteCheckedException If failed to initialize swap path. */ - public void addDataRegion( + public DataRegion addDataRegion( DataStorageConfiguration dataStorageCfg, DataRegionConfiguration dataRegionCfg, boolean trackable + ) throws IgniteCheckedException { + return addDataRegion(dataStorageCfg, dataRegionCfg, trackable, cctx.pageStore()); + } + + /** + * @param dataStorageCfg Database config. + * @param dataRegionCfg Data region config. + * @param pmPageMgr Page manager. + * @throws IgniteCheckedException If failed to initialize swap path. + */ + protected DataRegion addDataRegion( + DataStorageConfiguration dataStorageCfg, + DataRegionConfiguration dataRegionCfg, + boolean trackable, + PageReadWriteManager pmPageMgr ) throws IgniteCheckedException { String dataRegionName = dataRegionCfg.getName(); @@ -418,7 +448,7 @@ public void addDataRegion( cctx.kernalContext().metric(), dataRegionMetricsProvider(dataRegionCfg)); - DataRegion region = initMemory(dataStorageCfg, dataRegionCfg, memMetrics, trackable); + DataRegion region = initMemory(dataStorageCfg, dataRegionCfg, memMetrics, trackable, pmPageMgr); dataRegionMap.put(dataRegionName, region); @@ -429,6 +459,8 @@ public void addDataRegion( else if (dataRegionName.equals(DFLT_DATA_REG_DEFAULT_NAME)) U.warn(log, "Data Region with name 'default' isn't used as a default. " + "Please, check Data Region configuration."); + + return region; } /** @@ -533,8 +565,24 @@ private DataRegionConfiguration createSystemDataRegion( } /** - * Validation of memory configuration. + * @param volatileCacheInitSize Initial size of PageMemory to be created for volatile cache. + * @param volatileCacheMaxSize Maximum size of PageMemory to be created for volatile cache. * + * @return {@link DataRegionConfiguration configuration} of DataRegion for volatile cache. + */ + private DataRegionConfiguration createVolatileDataRegion(long volatileCacheInitSize, long volatileCacheMaxSize) { + DataRegionConfiguration res = new DataRegionConfiguration(); + + res.setName(VOLATILE_DATA_REGION_NAME); + res.setInitialSize(volatileCacheInitSize); + res.setMaxSize(volatileCacheMaxSize); + res.setPersistenceEnabled(false); + res.setLazyMemoryAllocation(true); + + return res; + } + + /** * @param memCfg configuration to validate. * @throws IgniteCheckedException In case of validation violation. */ @@ -1175,6 +1223,7 @@ public void ensureFreeSpace(DataRegion memPlc) throws IgniteCheckedException { * @param memCfg memory configuration with common parameters. * @param plcCfg data region with PageMemory specific parameters. * @param memMetrics {@link DataRegionMetrics} object to collect memory usage metrics. + * @param pmPageMgr Page manager. * @return data region instance. * * @throws IgniteCheckedException If failed to initialize swap path. @@ -1183,9 +1232,10 @@ private DataRegion initMemory( DataStorageConfiguration memCfg, DataRegionConfiguration plcCfg, DataRegionMetricsImpl memMetrics, - boolean trackable + boolean trackable, + PageReadWriteManager pmPageMgr ) throws IgniteCheckedException { - PageMemory pageMem = createPageMemory(createOrReuseMemoryProvider(plcCfg), memCfg, plcCfg, memMetrics, trackable); + PageMemory pageMem = createPageMemory(createOrReuseMemoryProvider(plcCfg), memCfg, plcCfg, memMetrics, trackable, pmPageMgr); return new DataRegion(pageMem, plcCfg, memMetrics, createPageEvictionTracker(plcCfg, pageMem)); } @@ -1284,6 +1334,7 @@ protected PageEvictionTracker createPageEvictionTracker(DataRegionConfiguration * @param memCfg Memory configuartion. * @param memPlcCfg data region configuration. * @param memMetrics DataRegionMetrics to collect memory usage metrics. + * @param pmPageMgr Page manager. * @return PageMemory instance. */ protected PageMemory createPageMemory( @@ -1291,7 +1342,8 @@ protected PageMemory createPageMemory( DataStorageConfiguration memCfg, DataRegionConfiguration memPlcCfg, DataRegionMetricsImpl memMetrics, - boolean trackable + boolean trackable, + PageReadWriteManager pmPageMgr ) { memMetrics.persistenceEnabled(false); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java index 1be0b973ccc65b..94f7feb2080158 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; @@ -267,6 +268,7 @@ private MetaTree( reuseList, innerIos, leafIos, + PageIdAllocator.FLAG_IDX, failureProcessor, lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java index 415e2cfbe4d3d2..7fe7002db5f049 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java @@ -170,7 +170,7 @@ public CheckpointManager( checkpointPagesWriterFactory = new CheckpointPagesWriterFactory( logger, snapshotMgr, - (fullPage, buf, tag) -> pageStoreManager.writeInternal(fullPage.groupId(), fullPage.pageId(), buf, tag, true), + (pageMemEx, fullPage, buf, tag) -> pageStoreManager.write(fullPage.groupId(), fullPage.pageId(), buf, tag, true), persStoreMetrics, throttlingPolicy, threadBuf, pageMemoryGroupResolver @@ -231,9 +231,10 @@ public void threadBuf(ThreadLocal threadBuf) { /** * @param lsnr Listener. + * @param dataRegion Data region for which listener is corresponded to. */ - public void addCheckpointListener(CheckpointListener lsnr) { - checkpointWorkflow.addCheckpointListener(lsnr); + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + checkpointWorkflow.addCheckpointListener(lsnr, dataRegion); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java index 77f9e2eb09ca31..79f774c087b73b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java @@ -178,7 +178,7 @@ private GridConcurrentMultiPairQueue writePages( CheckpointMetricsTracker tracker = persStoreMetrics.metricsEnabled() ? this.tracker : null; - PageStoreWriter pageStoreWriter = createPageStoreWriter(pagesToRetry); + Map pageStoreWriters = new HashMap<>(); ByteBuffer tmpWriteBuf = threadBuf.get(); @@ -201,6 +201,8 @@ private GridConcurrentMultiPairQueue writePages( tmpWriteBuf.rewind(); + PageStoreWriter pageStoreWriter = pageStoreWriters.computeIfAbsent(pageMem, pageMemEx -> createPageStoreWriter(pageMemEx, pagesToRetry)); + pageMem.checkpointWritePage(fullId, tmpWriteBuf, pageStoreWriter, tracker); if (throttlingEnabled) { @@ -227,18 +229,20 @@ private GridConcurrentMultiPairQueue writePages( /** * Factory method for create {@link PageStoreWriter}. * + * @param pageMemEx * @param pagesToRetry List pages for retry. * @return Checkpoint page write context. */ - private PageStoreWriter createPageStoreWriter(Map> pagesToRetry) { + private PageStoreWriter createPageStoreWriter( + PageMemoryEx pageMemEx, + Map> pagesToRetry + ) { return new PageStoreWriter() { /** {@inheritDoc} */ @Override public void writePage(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException { if (tag == PageMemoryImpl.TRY_AGAIN_TAG) { - PageMemoryEx pageMem = pageMemoryGroupResolver.apply(fullPageId.groupId()); - - pagesToRetry.computeIfAbsent(pageMem, k -> new ArrayList<>()).add(fullPageId); + pagesToRetry.computeIfAbsent(pageMemEx, k -> new ArrayList<>()).add(fullPageId); return; } @@ -258,7 +262,7 @@ private PageStoreWriter createPageStoreWriter(Map curCpProgress.updateWrittenPages(1); - PageStore store = pageWriter.write(fullPageId, buf, tag); + PageStore store = pageWriter.write(pageMemEx, fullPageId, buf, tag); updStores.computeIfAbsent(store, k -> new LongAdder()).increment(); } @@ -268,12 +272,15 @@ private PageStoreWriter createPageStoreWriter(Map /** Interface which allows to write one page to page store. */ public interface CheckpointPageWriter { /** + * + * @param pageMemEx Page memory from which page should be written. * @param fullPageId Full page id. * @param buf Byte buffer. * @param tag Page tag. * @return {@link PageStore} which was used to write. * @throws IgniteCheckedException if fail. */ - PageStore write(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException; + PageStore write(PageMemoryEx pageMemEx, FullPageId fullPageId, ByteBuffer buf, int tag) + throws IgniteCheckedException; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java index eb3607b76c89f3..8c882e1473eeb4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java @@ -19,6 +19,8 @@ import java.nio.ByteBuffer; import java.util.Collection; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; @@ -144,22 +146,13 @@ Runnable buildRecovery( AtomicInteger cpPagesCnt ) { return () -> { - PageStoreWriter pageStoreWriter = (fullPageId, buf, tag) -> { - assert tag != PageMemoryImpl.TRY_AGAIN_TAG : "Lock is held by other thread for page " + fullPageId; - - // Write buf to page store. - PageStore store = checkpointPageWriter.write(fullPageId, buf, tag); - - // Save store for future fsync. - updStores.add(store); - }; - GridConcurrentMultiPairQueue.Result res = new GridConcurrentMultiPairQueue.Result<>(); int pagesWritten = 0; ByteBuffer tmpWriteBuf = threadBuf.get(); + Map pageStoreWriters = new HashMap<>(); try { while (pages.next(res)) { // Fail-fast break if some exception occurred. @@ -168,6 +161,19 @@ Runnable buildRecovery( PageMemoryEx pageMem = res.getKey(); + PageStoreWriter pageStoreWriter = pageStoreWriters.computeIfAbsent( + pageMem, + (pageMemEx) -> (fullPageId, buf, tag) -> { + assert tag != PageMemoryImpl.TRY_AGAIN_TAG : "Lock is held by other thread for page " + fullPageId; + + // Write buf to page store. + PageStore store = checkpointPageWriter.write(pageMemEx, fullPageId, buf, tag); + + // Save store for future fsync. + updStores.add(store); + } + ); + // Write page content to page store via pageStoreWriter. // Tracker is null, because no need to track checkpoint metrics on recovery. pageMem.checkpointWritePage(res.getValue(), tmpWriteBuf, pageStoreWriter, null); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java index 6a97a188eae4c6..93b7fed55c8ef0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.UUID; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinTask; @@ -39,6 +38,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Collectors; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteInterruptedException; @@ -74,7 +74,9 @@ import org.apache.ignite.internal.util.worker.WorkProgressDispatcher; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.thread.IgniteThreadPoolExecutor; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.jsr166.ConcurrentLinkedHashMap; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.LOST; @@ -106,6 +108,9 @@ public class CheckpointWorkflow { /** @see IgniteSystemProperties#CHECKPOINT_PARALLEL_SORT_THRESHOLD */ public static final int DFLT_CHECKPOINT_PARALLEL_SORT_THRESHOLD = 512 * 1024; + /****/ + private static final DataRegion NO_REGION = new DataRegion(null, null, null, null); + /** * Starting from this number of dirty pages in checkpoint, array will be sorted with {@link * Arrays#parallelSort(Comparable[])} in case of {@link CheckpointWriteOrder#SEQUENTIAL}. @@ -144,7 +149,7 @@ public class CheckpointWorkflow { private final CheckpointWriteOrder checkpointWriteOrder; /** Collections of checkpoint listeners. */ - private final Collection lsnrs = new CopyOnWriteArrayList<>(); + private final Map lsnrs = new ConcurrentLinkedHashMap<>(); /** Ignite instance name. */ private final String igniteInstanceName; @@ -228,7 +233,9 @@ public Checkpoint markCheckpointBegin( CheckpointMetricsTracker tracker, WorkProgressDispatcher workProgressDispatcher ) throws IgniteCheckedException { - List dbLsnrs = new ArrayList<>(lsnrs); + Collection checkpointedRegions = dataRegions.get(); + + List dbLsnrs = getRelevantCheckpointListeners(checkpointedRegions); CheckpointRecord cpRec = new CheckpointRecord(memoryRecoveryRecordPtr); @@ -283,7 +290,7 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress fillCacheGroupState(cpRec); //There are allowable to replace pages only after checkpoint entry was stored to disk. - cpPagesHolder = beginAllCheckpoints(dataRegions.get(), curr.futureFor(MARKER_STORED_TO_DISK)); + cpPagesHolder = beginAllCheckpoints(checkpointedRegions, curr.futureFor(MARKER_STORED_TO_DISK)); curr.currentCheckpointPagesCount(cpPagesHolder.pagesNum()); @@ -293,7 +300,8 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress if (dirtyPagesCount > 0 || curr.nextSnapshot() || hasPartitionsToDestroy) { // No page updates for this checkpoint are allowed from now on. - cpPtr = wal.log(cpRec); + if (wal != null) + cpPtr = wal.log(cpRec); if (cpPtr == null) cpPtr = CheckpointStatus.NULL_PTR; @@ -326,18 +334,22 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress tracker.onWalCpRecordFsyncStart(); // Sync log outside the checkpoint write lock. - wal.flush(cpPtr, true); + if (wal != null) + wal.flush(cpPtr, true); tracker.onWalCpRecordFsyncEnd(); - CheckpointEntry checkpointEntry = checkpointMarkersStorage.writeCheckpointEntry( - cpTs, - cpRec.checkpointId(), - cpPtr, - cpRec, - CheckpointEntryType.START, - skipSync - ); + CheckpointEntry checkpointEntry = null; + + if (checkpointMarkersStorage != null) + checkpointEntry = checkpointMarkersStorage.writeCheckpointEntry( + cpTs, + cpRec.checkpointId(), + cpPtr, + cpRec, + CheckpointEntryType.START, + skipSync + ); curr.transitTo(MARKER_STORED_TO_DISK); @@ -351,7 +363,7 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress return new Checkpoint(checkpointEntry, cpPages, curr); } else { - if (curr.nextSnapshot()) + if (curr.nextSnapshot() && wal != null) wal.flush(null, true); return new Checkpoint(null, GridConcurrentMultiPairQueue.EMPTY, curr); @@ -563,23 +575,28 @@ public void markCheckpointEnd(Checkpoint chp) throws IgniteCheckedException { } if (chp.hasDelta()) { - checkpointMarkersStorage.writeCheckpointEntry( - chp.cpEntry.timestamp(), - chp.cpEntry.checkpointId(), - chp.cpEntry.checkpointMark(), - null, - CheckpointEntryType.END, - skipSync - ); - - wal.notchLastCheckpointPtr(chp.cpEntry.checkpointMark()); + if (checkpointMarkersStorage != null) + checkpointMarkersStorage.writeCheckpointEntry( + chp.cpEntry.timestamp(), + chp.cpEntry.checkpointId(), + chp.cpEntry.checkpointMark(), + null, + CheckpointEntryType.END, + skipSync + ); + + if (wal != null) + wal.notchLastCheckpointPtr(chp.cpEntry.checkpointMark()); } - checkpointMarkersStorage.onCheckpointFinished(chp); + if (checkpointMarkersStorage != null) + checkpointMarkersStorage.onCheckpointFinished(chp); CheckpointContextImpl emptyCtx = new CheckpointContextImpl(chp.progress, null, null, null); - List dbLsnrs = new ArrayList<>(lsnrs); + Collection checkpointedRegions = dataRegions.get(); + + List dbLsnrs = getRelevantCheckpointListeners(checkpointedRegions); for (CheckpointListener lsnr : dbLsnrs) lsnr.afterCheckpointEnd(emptyCtx); @@ -587,6 +604,17 @@ public void markCheckpointEnd(Checkpoint chp) throws IgniteCheckedException { chp.progress.transitTo(FINISHED); } + /** + * @param checkpointedRegions Regions which will be checkpointed. + * @return Checkpoint listeners which should be handled. + */ + @NotNull private List getRelevantCheckpointListeners(Collection checkpointedRegions) { + return lsnrs.entrySet().stream() + .filter(entry -> entry.getValue() == NO_REGION || checkpointedRegions.contains(entry.getValue())) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + } + /** * This method makes sense if node was stopped during the checkpoint(Start marker was written to disk while end * marker are not). It is able to write all pages to disk and create end marker. @@ -687,10 +715,13 @@ public void memoryRecoveryRecordPtr(WALPointer memoryRecoveryRecordPtr) { } /** + * Adding the listener which will be called only when given data region will be checkpointed. + * * @param lsnr Listener. + * @param dataRegion Data region for which listener is corresponded to. */ - public void addCheckpointListener(CheckpointListener lsnr) { - lsnrs.add(lsnr); + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + lsnrs.put(lsnr, dataRegion == null ? NO_REGION : dataRegion); } /** @@ -720,7 +751,8 @@ public void stop() { checkpointCollectPagesInfoPool = null; } - lsnrs.clear(); + for (CheckpointListener lsnr : lsnrs.keySet()) + lsnrs.remove(lsnr); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java index c0368f410f7edc..6747e583a8ccc6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java @@ -106,7 +106,8 @@ public class Checkpointer extends GridWorker { "walCpRecordFsyncDuration=%dms, " + "writeCheckpointEntryDuration=%dms, " + "splitAndSortCpPagesDuration=%dms, " + - "%s pages=%d, " + + "%s" + + "pages=%d, " + "reason='%s']"; /** Skip sync. */ @@ -408,8 +409,8 @@ private void doCheckpoint() { log.info( String.format( CHECKPOINT_STARTED_LOG_FORMAT, - chp.cpEntry.checkpointId(), - chp.cpEntry.checkpointMark(), + chp.cpEntry == null ? "" : chp.cpEntry.checkpointId(), + chp.cpEntry == null ? "" : chp.cpEntry.checkpointMark(), tracker.beforeLockDuration(), tracker.lockWaitDuration(), tracker.listenersExecuteDuration(), @@ -417,7 +418,7 @@ private void doCheckpoint() { tracker.walCpRecordFsyncDuration(), tracker.writeCheckpointEntryDuration(), tracker.splitAndSortCpPagesDuration(), - possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms," : "", + possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms, " : "", chp.pagesSize, chp.progress.reason() ) @@ -455,7 +456,7 @@ private void doCheckpoint() { if (chp.hasDelta() || destroyedPartitionsCnt > 0) { if (log.isInfoEnabled()) { - String walSegsCoveredMsg = prepareWalSegsCoveredMsg(chp.walSegsCoveredRange); + String walSegsCoveredMsg = chp.walSegsCoveredRange == null ? "" : prepareWalSegsCoveredMsg(chp.walSegsCoveredRange); log.info(String.format("Checkpoint finished [cpId=%s, pages=%d, markPos=%s, " + "walSegmentsCleared=%d, walSegmentsCovered=%s, markDuration=%dms, pagesWrite=%dms, fsync=%dms, " + @@ -581,6 +582,7 @@ private void updateMetrics(Checkpoint chp, CheckpointMetricsTracker tracker) { tracker.pagesWriteDuration(), tracker.fsyncDuration(), tracker.totalDuration(), + tracker.checkpointStartTime(), chp.pagesSize, tracker.dataPagesWritten(), tracker.cowPagesWritten() @@ -852,6 +854,9 @@ public void shutdownNow() { * Restart worker in IgniteThread. */ public void start() { + if (runner() != null) + return; + assert runner() == null : "Checkpointer is running."; new IgniteThread(this).start(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java new file mode 100644 index 00000000000000..73bec40ec0b081 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java @@ -0,0 +1,335 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.checkpoint; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Collection; +import java.util.Collections; +import java.util.UUID; +import java.util.function.Function; +import java.util.function.Supplier; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.LongJVMPauseDetector; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.GridCacheProcessor; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.DataStorageMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; +import org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer; +import org.apache.ignite.internal.processors.failure.FailureProcessor; +import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.lang.IgniteThrowableFunction; +import org.apache.ignite.internal.worker.WorkersRegistry; +import org.apache.ignite.lang.IgniteInClosure; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT; + +/** + * Like a sharp checkpoint algorithm implemented in {@link CheckpointManager} this checkpoint ensures that + * all pages marked dirty under {@link #checkpointTimeoutLock()} will be consistently saved to disk. + * + * But unlike {@link CheckpointManager} lightweight checkpoint doesn't store any checkpoint markers to disk + * nor write cp-related records to WAL log. + * + * This allows to use it in situations where no recovery is needed after crush in the middle of checkpoint + * but work can simply be replayed from the beginning. + * + * Such situations include defragmentation and node recovery after crush + * (regular sharp checkpoint cannot be used during recovery). + */ +public class LightweightCheckpointManager { + /** Checkpoint worker. */ + private volatile Checkpointer checkpointer; + + /** Main checkpoint steps. */ + private final CheckpointWorkflow checkpointWorkflow; + + /** Timeout checkpoint lock which should be used while write to memory happened. */ + final CheckpointTimeoutLock checkpointTimeoutLock; + + /** Checkpoint page writer factory. */ + private final CheckpointPagesWriterFactory checkpointPagesWriterFactory; + + /** Checkpointer builder. It allows to create a new checkpointer on each call. */ + private final Supplier checkpointerProvider; + + /** + * @param logger Logger producer. + * @param igniteInstanceName Ignite instance name. + * @param checkpointThreadName Name of main checkpoint thread. + * @param workersRegistry Workers registry. + * @param persistenceCfg Persistence configuration. + * @param dataRegions Data regions. + * @param pageMemoryGroupResolver Page memory resolver. + * @param throttlingPolicy Throttling policy. + * @param snapshotMgr Snapshot manager. + * @param persStoreMetrics Persistence metrics. + * @param longJvmPauseDetector Long JVM pause detector. + * @param failureProcessor Failure processor. + * @param cacheProcessor Cache processor. + * @throws IgniteCheckedException if fail. + */ + public LightweightCheckpointManager( + Function, IgniteLogger> logger, + String igniteInstanceName, + String checkpointThreadName, + WorkersRegistry workersRegistry, + DataStorageConfiguration persistenceCfg, + Supplier> dataRegions, + IgniteThrowableFunction pageMemoryGroupResolver, + PageMemoryImpl.ThrottlingPolicy throttlingPolicy, + IgniteCacheSnapshotManager snapshotMgr, + DataStorageMetricsImpl persStoreMetrics, + LongJVMPauseDetector longJvmPauseDetector, + FailureProcessor failureProcessor, + GridCacheProcessor cacheProcessor + ) throws IgniteCheckedException { + CheckpointReadWriteLock lock = new CheckpointReadWriteLock(logger); + + checkpointWorkflow = new CheckpointWorkflow( + logger, + null, + snapshotMgr, + null, + lock, + persistenceCfg.getCheckpointWriteOrder(), + dataRegions, + Collections::emptyList, + persistenceCfg.getCheckpointThreads(), + igniteInstanceName + ); + + ThreadLocal threadBuf = new ThreadLocal() { + /** {@inheritDoc} */ + @Override protected ByteBuffer initialValue() { + ByteBuffer tmpWriteBuf = ByteBuffer.allocateDirect(persistenceCfg.getPageSize()); + + tmpWriteBuf.order(ByteOrder.nativeOrder()); + + return tmpWriteBuf; + } + }; + + checkpointPagesWriterFactory = new CheckpointPagesWriterFactory( + logger, + snapshotMgr, + (pageMemEx, fullPage, buf, tag) -> + pageMemEx.pageManager().write(fullPage.groupId(), fullPage.pageId(), buf, tag, true), + persStoreMetrics, + throttlingPolicy, + threadBuf, + pageMemoryGroupResolver + ); + + checkpointerProvider = () -> new Checkpointer( + igniteInstanceName, + checkpointThreadName, + workersRegistry, + logger, + longJvmPauseDetector, + failureProcessor, + snapshotMgr, + persStoreMetrics, + cacheProcessor, + checkpointWorkflow, + checkpointPagesWriterFactory, + persistenceCfg.getCheckpointFrequency(), + persistenceCfg.getCheckpointThreads() + ); + + checkpointer = checkpointerProvider.get(); + + Long cfgCheckpointReadLockTimeout = persistenceCfg != null + ? persistenceCfg.getCheckpointReadLockTimeout() + : null; + + long checkpointReadLockTimeout = IgniteSystemProperties.getLong(IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT, + cfgCheckpointReadLockTimeout != null + ? cfgCheckpointReadLockTimeout + : workersRegistry.getSystemWorkerBlockedTimeout()); + + checkpointTimeoutLock = new CheckpointTimeoutLock( + logger, + failureProcessor, + dataRegions, + lock, + checkpointer, + checkpointReadLockTimeout + ); + } + + /** + * @return Checkpoint lock which can be used for protection of writing to memory. + */ + public CheckpointTimeoutLock checkpointTimeoutLock() { + return checkpointTimeoutLock; + } + + /** + * Replace thread local with buffers. Thread local should provide direct buffer with one page in length. + * + * @param threadBuf new thread-local with buffers for the checkpoint threads. + */ + public void threadBuf(ThreadLocal threadBuf) { + checkpointPagesWriterFactory.threadBuf(threadBuf); + } + + /** + * @param lsnr Listener. + * @param dataRegion + */ + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + checkpointWorkflow.addCheckpointListener(lsnr, dataRegion); + } + + /** + * @param lsnr Listener. + */ + public void removeCheckpointListener(CheckpointListener lsnr) { + checkpointWorkflow.removeCheckpointListener(lsnr); + } + + /** + * @param memoryRecoveryRecordPtr Memory recovery record pointer. + */ + public void memoryRecoveryRecordPtr(WALPointer memoryRecoveryRecordPtr) { + checkpointWorkflow.memoryRecoveryRecordPtr(memoryRecoveryRecordPtr); + } + + /** + * Start the new checkpoint immediately. + * + * @param reason Reason. + * @param lsnr Listener which will be called on finish. + * @return Triggered checkpoint progress. + */ + public CheckpointProgress forceCheckpoint( + String reason, + IgniteInClosure> lsnr + ) { + Checkpointer cp = this.checkpointer; + + if (cp == null) + return null; + + return cp.scheduleCheckpoint(0, reason, lsnr); + } + + /** + * + */ + public Checkpointer getCheckpointer() { + return checkpointer; + } + + /** + * @param context Group context. Can be {@code null} in case of crash recovery. + * @param groupId Group ID. + * @param partId Partition ID. + */ + public void schedulePartitionDestroy(@Nullable CacheGroupContext context, int groupId, int partId) { + Checkpointer cp = checkpointer; + + if (cp != null) + cp.schedulePartitionDestroy(context, groupId, partId); + } + + /** + * For test use only. + */ + public IgniteInternalFuture enableCheckpoints(boolean enable) { + return checkpointer.enableCheckpoints(enable); + } + + /** + * @throws IgniteCheckedException If failed. + */ + public void finalizeCheckpointOnRecovery( + long ts, + UUID id, + WALPointer ptr, + StripedExecutor exec + ) throws IgniteCheckedException { + assert checkpointer != null : "Checkpointer hasn't initialized yet"; + + checkpointer.finalizeCheckpointOnRecovery(ts, id, ptr, exec); + } + + /** + * @param grpId Group ID. + * @param partId Partition ID. + */ + public void cancelOrWaitPartitionDestroy(int grpId, int partId) throws IgniteCheckedException { + Checkpointer cp = checkpointer; + + if (cp != null) + checkpointer.cancelOrWaitPartitionDestroy(grpId, partId); + } + + /** + * @param cancel Cancel flag. + */ + public void stop(boolean cancel) { + checkpointTimeoutLock.stop(); + + Checkpointer cp = this.checkpointer; + + if (cp != null) + cp.shutdownCheckpointer(cancel); + + checkpointWorkflow.stop(); + + this.checkpointer = null; + } + + /** + * Initialize the checkpoint and prepare it to work. It should be called if the stop was called before. + */ + public void init() { + if (this.checkpointer == null) { + checkpointWorkflow.start(); + + this.checkpointer = checkpointerProvider.get(); + } + } + + /** + * Checkpoint starts to do their work after this method. + */ + public void start() { + assert checkpointer != null : "Checkpointer can't be null during the start"; + + this.checkpointer.start(); + } + + /** + * Checkpoint lock blocks when stop method is called. This method allows continuing the work with a checkpoint lock + * if needed. + */ + public void unblockCheckpointLock() { + checkpointTimeoutLock.start(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java new file mode 100644 index 00000000000000..006fa8e90cb1ed --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java @@ -0,0 +1,827 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.io.File; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongConsumer; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.metric.IoStatisticsHolderNoOp; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.CacheType; +import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager.CacheDataStore; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; +import org.apache.ignite.internal.processors.cache.persistence.CheckpointState; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager.GridCacheDataStore; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.LightweightCheckpointManager; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.cache.persistence.freelist.AbstractFreeList; +import org.apache.ignite.internal.processors.cache.persistence.freelist.SimpleDataRow; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV3; +import org.apache.ignite.internal.processors.cache.tree.AbstractDataLeafIO; +import org.apache.ignite.internal.processors.cache.tree.CacheDataTree; +import org.apache.ignite.internal.processors.cache.tree.DataRow; +import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; +import org.apache.ignite.internal.processors.cache.tree.PendingRow; +import org.apache.ignite.internal.processors.query.GridQueryIndexing; +import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.util.collection.IntHashMap; +import org.apache.ignite.internal.util.collection.IntMap; +import org.apache.ignite.internal.util.future.GridCompoundFuture; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.lang.IgniteOutClosure; +import org.apache.ignite.maintenance.MaintenanceRegistry; + +import static java.util.stream.StreamSupport.stream; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX; +import static org.apache.ignite.internal.processors.cache.persistence.CheckpointState.FINISHED; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DEFRAGMENTATION_MAPPING_REGION_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DEFRAGMENTATION_PART_REGION_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.batchRenameDefragmentedCacheGroupPartitions; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedIndexTmpFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartMappingFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartTmpFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.renameTempIndexFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.renameTempPartitionFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.skipAlreadyDefragmentedCacheGroup; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.skipAlreadyDefragmentedPartition; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.writeDefragmentationCompletionMarker; + +/** + * Defragmentation manager is the core class that contains main defragmentation procedure. + */ +public class CachePartitionDefragmentationManager { + /** */ + public static final String DEFRAGMENTATION_MNTC_TASK_NAME = "defragmentationMaintenanceTask"; + + /** */ + private final Set cacheGroupsForDefragmentation; + + /** Cache shared context. */ + private final GridCacheSharedContext sharedCtx; + + /** Maintenance registry. */ + private final MaintenanceRegistry mntcReg; + + /** Logger. */ + private final IgniteLogger log; + + /** Database shared manager. */ + private final GridCacheDatabaseSharedManager dbMgr; + + /** File page store manager. */ + private final FilePageStoreManager filePageStoreMgr; + + /** + * Checkpoint for specific defragmentation regions which would store the data to new partitions + * during the defragmentation. + */ + private final LightweightCheckpointManager defragmentationCheckpoint; + + /** Default checkpoint for current node. */ + private final CheckpointManager nodeCheckpoint; + + /** Page size. */ + private final int pageSize; + + /** */ + private final DataRegion partDataRegion; + + /** */ + private final DataRegion mappingDataRegion; + + /** + * @param cacheGrpIds + * @param sharedCtx Cache shared context. + * @param dbMgr Database manager. + * @param filePageStoreMgr File page store manager. + * @param nodeCheckpoint Default checkpoint for this node. + * @param defragmentationCheckpoint Specific checkpoint for defragmentation. + * @param pageSize Page size. + */ + public CachePartitionDefragmentationManager( + List cacheGrpIds, + GridCacheSharedContext sharedCtx, + GridCacheDatabaseSharedManager dbMgr, + FilePageStoreManager filePageStoreMgr, + CheckpointManager nodeCheckpoint, + LightweightCheckpointManager defragmentationCheckpoint, + int pageSize + ) throws IgniteCheckedException { + cacheGroupsForDefragmentation = new HashSet<>(cacheGrpIds); + + this.dbMgr = dbMgr; + this.filePageStoreMgr = filePageStoreMgr; + this.pageSize = pageSize; + this.sharedCtx = sharedCtx; + + this.mntcReg = sharedCtx.kernalContext().maintenanceRegistry(); + this.log = sharedCtx.logger(getClass()); + this.defragmentationCheckpoint = defragmentationCheckpoint; + this.nodeCheckpoint = nodeCheckpoint; + + partDataRegion = dbMgr.dataRegion(DEFRAGMENTATION_PART_REGION_NAME); + mappingDataRegion = dbMgr.dataRegion(DEFRAGMENTATION_MAPPING_REGION_NAME); + } + + /** */ + public void executeDefragmentation() throws IgniteCheckedException { + log.info("Defragmentation started."); + + try { + // Checkpointer must be enabled so all pages on disk are in their latest valid state. + dbMgr.resumeWalLogging(); + + dbMgr.onStateRestored(null); + + nodeCheckpoint.forceCheckpoint("beforeDefragmentation", null).futureFor(FINISHED).get(); + + sharedCtx.wal().onDeActivate(sharedCtx.kernalContext()); + + // Now the actual process starts. + TreeIterator treeIter = new TreeIterator(pageSize); + + IgniteInternalFuture idxDfrgFut = null; + DataPageEvictionMode prevPageEvictionMode = null; + + for (CacheGroupContext oldGrpCtx : sharedCtx.cache().cacheGroups()) { + if (!oldGrpCtx.userCache()) + continue; + + int grpId = oldGrpCtx.groupId(); + + if (!cacheGroupsForDefragmentation.isEmpty() && !cacheGroupsForDefragmentation.contains(grpId)) + continue; + + File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName()); + + if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) + continue; + + GridCacheOffheapManager offheap = (GridCacheOffheapManager)oldGrpCtx.offheap(); + + List oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false) + .filter(store -> { + try { + return filePageStoreMgr.exists(grpId, store.partId()); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + }) + .collect(Collectors.toList()); + + if (workDir != null && !oldCacheDataStores.isEmpty()) { + // We can't start defragmentation of new group on the region that has wrong eviction mode. + // So waiting of the previous cache group defragmentation is inevitable. + DataPageEvictionMode curPageEvictionMode = oldGrpCtx.dataRegion().config().getPageEvictionMode(); + + if (prevPageEvictionMode == null || prevPageEvictionMode != curPageEvictionMode) { + prevPageEvictionMode = curPageEvictionMode; + + partDataRegion.config().setPageEvictionMode(curPageEvictionMode); + + if (idxDfrgFut != null) + idxDfrgFut.get(); + } + + IntMap cacheDataStores = new IntHashMap<>(); + + for (CacheDataStore store : offheap.cacheDataStores()) { + // Tree can be null for not yet initialized partitions. + // This would mean that these partitions are empty. + assert store.tree() == null || store.tree().groupId() == grpId; + + if (store.tree() != null) + cacheDataStores.put(store.partId(), store); + } + + dbMgr.checkpointedDataRegions().remove(oldGrpCtx.dataRegion()); + + // Another cheat. Ttl cleanup manager knows too much shit. + oldGrpCtx.caches().stream() + .filter(cacheCtx -> cacheCtx.groupId() == grpId) + .forEach(cacheCtx -> cacheCtx.ttl().unregister()); + + // Technically wal is already disabled, but "PageHandler.isWalDeltaRecordNeeded" doesn't care and + // WAL records will be allocated anyway just to be ignored later if we don't disable WAL for + // cache group explicitly. + oldGrpCtx.localWalEnabled(false, false); + + boolean encrypted = oldGrpCtx.config().isEncryptionEnabled(); + + FilePageStoreFactory pageStoreFactory = filePageStoreMgr.getPageStoreFactory(grpId, encrypted); + + createIndexPageStore(grpId, workDir, pageStoreFactory, partDataRegion, val -> { + }); //TODO Allocated tracker. + + GridCompoundFuture cmpFut = new GridCompoundFuture<>(); + + PageMemoryEx oldPageMem = (PageMemoryEx)oldGrpCtx.dataRegion().pageMemory(); + + CacheGroupContext newGrpCtx = new CacheGroupContext( + sharedCtx, + grpId, + oldGrpCtx.receivedFrom(), + CacheType.USER, + oldGrpCtx.config(), + oldGrpCtx.affinityNode(), + partDataRegion, + oldGrpCtx.cacheObjectContext(), + null, + null, + oldGrpCtx.localStartVersion(), + true, + false, + true + ); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + // This will initialize partition meta in index partition - meta tree and reuse list. + newGrpCtx.start(); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + IntMap linkMapByPart = new IntHashMap<>(); + + for (CacheDataStore oldCacheDataStore : oldCacheDataStores) { + int partId = oldCacheDataStore.partId(); + + PartitionContext partCtx = new PartitionContext( + workDir, + grpId, + partId, + partDataRegion, + mappingDataRegion, + oldGrpCtx, + newGrpCtx, + cacheDataStores.get(partId), + pageStoreFactory + ); + + if (skipAlreadyDefragmentedPartition(workDir, grpId, partId, log)) { + partCtx.createPageStore( + () -> defragmentedPartMappingFile(workDir, partId).toPath(), + partCtx.mappingPagesAllocated, + partCtx.mappingPageMemory + ); + + linkMapByPart.put(partId, partCtx.createLinkMapTree(false)); + + continue; + } + + partCtx.createPageStore( + () -> defragmentedPartMappingFile(workDir, partId).toPath(), + partCtx.mappingPagesAllocated, + partCtx.mappingPageMemory + ); + + linkMapByPart.put(partId, partCtx.createLinkMapTree(true)); + + partCtx.createPageStore( + () -> defragmentedPartTmpFile(workDir, partId).toPath(), + partCtx.partPagesAllocated, + partCtx.partPageMemory + ); + + partCtx.createNewCacheDataStore(offheap); + + copyPartitionData(partCtx, treeIter); + + IgniteInClosure> cpLsnr = fut -> { + if (fut.error() != null) + return; + + PageStore oldPageStore = null; + + try { + oldPageStore = filePageStoreMgr.getStore(grpId, partId); + } + catch (IgniteCheckedException ignore) { + } + + if (log.isDebugEnabled()) { + log.debug(S.toString( + "Partition defragmented", + "grpId", grpId, false, + "partId", partId, false, + "oldPages", oldPageStore.pages(), false, + "newPages", partCtx.partPagesAllocated.get() + 1, false, + "mappingPages", partCtx.mappingPagesAllocated.get() + 1, false, + "pageSize", pageSize, false, + "partFile", defragmentedPartFile(workDir, partId).getName(), false, + "workDir", workDir, false + )); + } + + oldPageMem.invalidate(grpId, partId); + + partCtx.partPageMemory.invalidate(grpId, partId); + + DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager)partCtx.partPageMemory.pageManager(); + + pageMgr.pageStoreMap().removePageStore(grpId, partId); // Yes, it'll be invalid in a second. + + renameTempPartitionFile(workDir, partId); + }; + + GridFutureAdapter cpFut = defragmentationCheckpoint + .forceCheckpoint("partition defragmented", null) + .futureFor(CheckpointState.FINISHED); + + cpFut.listen(cpLsnr); + + cmpFut.add((IgniteInternalFuture)cpFut); + } + + // A bit too general for now, but I like it more then saving only the last checkpoint future. + cmpFut.markInitialized().get(); + + idxDfrgFut = new GridFinishedFuture<>(); + + if (filePageStoreMgr.hasIndexStore(grpId)) { + defragmentIndexPartition(oldGrpCtx, newGrpCtx, linkMapByPart); + + idxDfrgFut = defragmentationCheckpoint + .forceCheckpoint("index defragmented", null) + .futureFor(CheckpointState.FINISHED); + } + + idxDfrgFut.listen(fut -> { + oldPageMem.invalidate(grpId, PageIdAllocator.INDEX_PARTITION); + + PageMemoryEx partPageMem = (PageMemoryEx)partDataRegion.pageMemory(); + + partPageMem.invalidate(grpId, PageIdAllocator.INDEX_PARTITION); + + DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager)partPageMem.pageManager(); + + pageMgr.pageStoreMap().removePageStore(grpId, PageIdAllocator.INDEX_PARTITION); + + PageMemoryEx mappingPageMem = (PageMemoryEx)mappingDataRegion.pageMemory(); + + pageMgr = (DefragmentationPageReadWriteManager)mappingPageMem.pageManager(); + + pageMgr.pageStoreMap().clear(grpId); + + renameTempIndexFile(workDir); + + writeDefragmentationCompletionMarker(filePageStoreMgr.getPageStoreFileIoFactory(), workDir, log); + + batchRenameDefragmentedCacheGroupPartitions(workDir, log); + }); + } + + // I guess we should wait for it? + if (idxDfrgFut != null) + idxDfrgFut.get(); + } + + mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME); + + log.info("Defragmentation completed. All partitions are defragmented."); + } + finally { + defragmentationCheckpoint.stop(true); + } + } + + /** */ + public void createIndexPageStore( + int grpId, + File workDir, + FilePageStoreFactory pageStoreFactory, + DataRegion partRegion, + LongConsumer allocatedTracker + ) throws IgniteCheckedException { + // Index partition file has to be deleted before we begin, otherwise there's a chance of reading corrupted file. + // There is a time period when index is already defragmented but marker file is not created yet. If node is + // failed in that time window then index will be deframented once again. That's fine, situation is rare but code + // to fix that would add unnecessary complications. + U.delete(defragmentedIndexTmpFile(workDir)); + + PageStore idxPageStore; + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + try { + idxPageStore = pageStoreFactory.createPageStore( + FLAG_IDX, + () -> defragmentedIndexTmpFile(workDir).toPath(), + allocatedTracker + ); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + idxPageStore.sync(); + + PageMemoryEx partPageMem = (PageMemoryEx)partRegion.pageMemory(); + + DefragmentationPageReadWriteManager partMgr = (DefragmentationPageReadWriteManager)partPageMem.pageManager(); + + partMgr.pageStoreMap().addPageStore(grpId, PageIdAllocator.INDEX_PARTITION, idxPageStore); + } + + /** + * Defragmentate partition. + * + * @param partCtx + * @param treeIter + * @throws IgniteCheckedException If failed. + */ + private void copyPartitionData( + PartitionContext partCtx, + TreeIterator treeIter + ) throws IgniteCheckedException { + CacheDataTree tree = partCtx.oldCacheDataStore.tree(); + + CacheDataTree newTree = partCtx.newCacheDataStore.tree(); + PendingEntriesTree newPendingTree = partCtx.newCacheDataStore.pendingTree(); + AbstractFreeList freeList = partCtx.newCacheDataStore.getCacheStoreFreeList(); + + long cpLockThreshold = 150L; + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + AtomicLong lastCpLockTs = new AtomicLong(System.currentTimeMillis()); + AtomicInteger entriesProcessed = new AtomicInteger(); + + treeIter.iterate(tree, partCtx.cachePageMemory, (tree0, io, pageAddr, idx) -> { + if (System.currentTimeMillis() - lastCpLockTs.get() >= cpLockThreshold) { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + lastCpLockTs.set(System.currentTimeMillis()); + } + + AbstractDataLeafIO leafIo = (AbstractDataLeafIO)io; + CacheDataRow row = tree.getRow(io, pageAddr, idx); + + int cacheId = row.cacheId(); + + // Reuse row that we just read. + row.link(0); + + // "insertDataRow" will corrupt page memory if we don't do this. + if (row instanceof DataRow && !partCtx.oldGrpCtx.storeCacheIdInDataPage()) + ((DataRow)row).cacheId(CU.UNDEFINED_CACHE_ID); + + freeList.insertDataRow(row, IoStatisticsHolderNoOp.INSTANCE); + + // Put it back. + if (row instanceof DataRow) + ((DataRow)row).cacheId(cacheId); + + newTree.putx(row); + + long newLink = row.link(); + + partCtx.linkMap.put(leafIo.getLink(pageAddr, idx), newLink); + + if (row.expireTime() != 0) + newPendingTree.putx(new PendingRow(cacheId, row.expireTime(), newLink)); + + entriesProcessed.incrementAndGet(); + + return true; + }); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + freeList.saveMetadata(IoStatisticsHolderNoOp.INSTANCE); + + copyCacheMetadata(partCtx); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + } + + /** */ + private void copyCacheMetadata( + PartitionContext partCtx + ) throws IgniteCheckedException { + // Same for all page memories. Why does it need to be in PageMemory? + long partMetaPageId = partCtx.cachePageMemory.partitionMetaPageId(partCtx.grpId, partCtx.partId); + + long oldPartMetaPage = partCtx.cachePageMemory.acquirePage(partCtx.grpId, partMetaPageId); + + try { + long oldPartMetaPageAddr = partCtx.cachePageMemory.readLock(partCtx.grpId, partMetaPageId, oldPartMetaPage); + + try { + PagePartitionMetaIO oldPartMetaIo = PageIO.getPageIO(oldPartMetaPageAddr); + + // Newer meta versions may contain new data that we don't copy during defragmentation. + assert Arrays.asList(1, 2, 3).contains(oldPartMetaIo.getVersion()) + : "IO version " + oldPartMetaIo.getVersion() + " is not supported by current defragmentation algorithm." + + " Please implement copying of all data added in new version."; + + long newPartMetaPage = partCtx.partPageMemory.acquirePage(partCtx.grpId, partMetaPageId); + + try { + long newPartMetaPageAddr = partCtx.partPageMemory.writeLock(partCtx.grpId, partMetaPageId, newPartMetaPage); + + try { + PagePartitionMetaIOV3 newPartMetaIo = PageIO.getPageIO(newPartMetaPageAddr); + + // Copy partition state. + byte partState = oldPartMetaIo.getPartitionState(oldPartMetaPageAddr); + newPartMetaIo.setPartitionState(newPartMetaPageAddr, partState); + + // Copy cache size for single cache group. + long size = oldPartMetaIo.getSize(oldPartMetaPageAddr); + newPartMetaIo.setSize(newPartMetaPageAddr, size); + + // Copy update counter value. + long updateCntr = oldPartMetaIo.getUpdateCounter(oldPartMetaPageAddr); + newPartMetaIo.setUpdateCounter(newPartMetaPageAddr, updateCntr); + + // Copy global remove Id. + long rmvId = oldPartMetaIo.getGlobalRemoveId(oldPartMetaPageAddr); + newPartMetaIo.setGlobalRemoveId(newPartMetaPageAddr, rmvId); + + // Copy cache sizes for shared cache group. + long oldCountersPageId = oldPartMetaIo.getCountersPageId(oldPartMetaPageAddr); + if (oldCountersPageId != 0L) { + Map sizes = GridCacheOffheapManager.readSharedGroupCacheSizes( + partCtx.cachePageMemory, + partCtx.grpId, + oldCountersPageId + ); + + long newCountersPageId = GridCacheOffheapManager.writeSharedGroupCacheSizes( + partCtx.partPageMemory, + partCtx.grpId, + 0L, + partCtx.partId, + sizes + ); + + newPartMetaIo.setCountersPageId(newPartMetaPageAddr, newCountersPageId); + } + + // Copy counter gaps. + long oldGapsLink = oldPartMetaIo.getGapsLink(oldPartMetaPageAddr); + if (oldGapsLink != 0L) { + byte[] gapsBytes = partCtx.oldCacheDataStore.partStorage().readRow(oldGapsLink); + + SimpleDataRow gapsDataRow = new SimpleDataRow(partCtx.partId, gapsBytes); + + partCtx.newCacheDataStore.partStorage().insertDataRow(gapsDataRow, IoStatisticsHolderNoOp.INSTANCE); + + newPartMetaIo.setGapsLink(newPartMetaPageAddr, gapsDataRow.link()); + } + + // Encryption stuff. + newPartMetaIo.setEncryptedPageCount(newPartMetaPageAddr, 0); + newPartMetaIo.setEncryptedPageIndex(newPartMetaPageAddr, 0); + } + finally { + partCtx.partPageMemory.writeUnlock(partCtx.grpId, partMetaPageId, newPartMetaPage, null, true); + } + } + finally { + partCtx.partPageMemory.releasePage(partCtx.grpId, partMetaPageId, newPartMetaPage); + } + } + finally { + partCtx.cachePageMemory.readUnlock(partCtx.grpId, partMetaPageId, oldPartMetaPage); + } + } + finally { + partCtx.cachePageMemory.releasePage(partCtx.grpId, partMetaPageId, oldPartMetaPage); + } + } + + /** + * Defragmentate indexing partition. + * + * @param grpCtx + * @param mappingByPartition + * + * @throws IgniteCheckedException If failed. + */ + private void defragmentIndexPartition( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + IntMap mappingByPartition + ) throws IgniteCheckedException { + GridQueryProcessor query = grpCtx.caches().get(0).kernalContext().query(); + + if (!query.moduleEnabled()) + return; + + final GridQueryIndexing idx = query.getIndexing(); + + CheckpointTimeoutLock cpLock = defragmentationCheckpoint.checkpointTimeoutLock(); + + idx.defragment( + grpCtx, + newCtx, + (PageMemoryEx)partDataRegion.pageMemory(), + mappingByPartition, + cpLock + ); + } + + /** */ + @SuppressWarnings("PublicField") + private class PartitionContext { + /** */ + public final File workDir; + + /** */ + public final int grpId; + + /** */ + public final int partId; + + /** */ + public final DataRegion cacheDataRegion; + + /** */ + public final PageMemoryEx cachePageMemory; + + /** */ + public final PageMemoryEx partPageMemory; + + /** */ + public final PageMemoryEx mappingPageMemory; + + /** */ + public final CacheGroupContext oldGrpCtx; + + /** */ + public final CacheGroupContext newGrpCtx; + + /** */ + public final CacheDataStore oldCacheDataStore; + + /** */ + private GridCacheDataStore newCacheDataStore; + + /** */ + public final FilePageStoreFactory pageStoreFactory; + + /** */ + public final AtomicLong partPagesAllocated = new AtomicLong(); + + /** */ + public final AtomicLong mappingPagesAllocated = new AtomicLong(); + + /** */ + private LinkMap linkMap; + + /** */ + public PartitionContext( + File workDir, + int grpId, + int partId, + DataRegion partDataRegion, + DataRegion mappingDataRegion, + CacheGroupContext oldGrpCtx, + CacheGroupContext newGrpCtx, + CacheDataStore oldCacheDataStore, + FilePageStoreFactory pageStoreFactory + ) { + this.workDir = workDir; + this.grpId = grpId; + this.partId = partId; + cacheDataRegion = oldGrpCtx.dataRegion(); + + cachePageMemory = (PageMemoryEx)cacheDataRegion.pageMemory(); + partPageMemory = (PageMemoryEx)partDataRegion.pageMemory(); + mappingPageMemory = (PageMemoryEx)mappingDataRegion.pageMemory(); + + this.oldGrpCtx = oldGrpCtx; + this.newGrpCtx = newGrpCtx; + this.oldCacheDataStore = oldCacheDataStore; + this.pageStoreFactory = pageStoreFactory; + } + + /** */ + public PageStore createPageStore(IgniteOutClosure pathProvider, AtomicLong pagesAllocated, PageMemoryEx pageMemory) throws IgniteCheckedException { + PageStore partPageStore; + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + try { + partPageStore = pageStoreFactory.createPageStore( + FLAG_DATA, + pathProvider, + pagesAllocated::addAndGet + ); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + partPageStore.sync(); + + DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager)pageMemory.pageManager(); + + pageMgr.pageStoreMap().addPageStore(grpId, partId, partPageStore); + + return partPageStore; + } + + /** */ + public LinkMap createLinkMapTree(boolean initNew) throws IgniteCheckedException { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + long mappingMetaPageId = initNew + ? mappingPageMemory.allocatePage(grpId, partId, FLAG_DATA) + : PageIdUtils.pageId(partId, FLAG_DATA, LinkMap.META_PAGE_IDX); + + assert PageIdUtils.pageIndex(mappingMetaPageId) == LinkMap.META_PAGE_IDX + : PageIdUtils.toDetailString(mappingMetaPageId); + + linkMap = new LinkMap(newGrpCtx, mappingPageMemory, mappingMetaPageId, initNew); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + return linkMap; + } + + /** */ + public void createNewCacheDataStore(GridCacheOffheapManager offheap) { + GridCacheDataStore newCacheDataStore = offheap.createGridCacheDataStore( + newGrpCtx, + partId, + true, + log + ); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + newCacheDataStore.init(); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + this.newCacheDataStore = newCacheDataStore; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationFileUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationFileUtils.java new file mode 100644 index 00000000000000..b4273cd78d15f3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationFileUtils.java @@ -0,0 +1,401 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; + +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; +import static java.nio.file.StandardOpenOption.CREATE_NEW; +import static java.nio.file.StandardOpenOption.WRITE; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.FILE_SUFFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.INDEX_FILE_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.INDEX_FILE_PREFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_PREFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_TEMPLATE; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.TMP_SUFFIX; + +/** + * Everything related to file management during defragmentation process. + */ +public class DefragmentationFileUtils { + /** Prefix for link mapping files. */ + private static final String DFRG_LINK_MAPPING_FILE_PREFIX = PART_FILE_PREFIX + "map-"; + + /** Link mapping file template. */ + private static final String DFRG_LINK_MAPPING_FILE_TEMPLATE = DFRG_LINK_MAPPING_FILE_PREFIX + "%d" + FILE_SUFFIX; + + /** Defragmentation complation marker file name. */ + private static final String DFRG_COMPLETION_MARKER_FILE_NAME = "dfrg-completion-marker"; + + /** Name of defragmentated index partition file. */ + private static final String DFRG_INDEX_FILE_NAME = INDEX_FILE_PREFIX + "-dfrg" + FILE_SUFFIX; + + /** Name of defragmentated index partition temporary file. */ + private static final String DFRG_INDEX_TMP_FILE_NAME = DFRG_INDEX_FILE_NAME + TMP_SUFFIX; + + /** Prefix for defragmented partition files. */ + private static final String DFRG_PARTITION_FILE_PREFIX = PART_FILE_PREFIX + "dfrg-"; + + /** Defragmented partition file template. */ + private static final String DFRG_PARTITION_FILE_TEMPLATE = DFRG_PARTITION_FILE_PREFIX + "%d" + FILE_SUFFIX; + + /** Defragmented partition temp file template. */ + private static final String DFRG_PARTITION_TMP_FILE_TEMPLATE = DFRG_PARTITION_FILE_TEMPLATE + TMP_SUFFIX; + + /** + * Performs cleanup of work dir before initializing file page stores. + * Will finish batch renaming if defragmentation was completed or delete garbage if it wasn't. + * + * @param workDir Cache group working directory. + * @param log Logger to write messages. + * @throws IgniteCheckedException If {@link IOException} occurred. + */ + public static void beforeInitPageStores(File workDir, IgniteLogger log) throws IgniteCheckedException { + try { + batchRenameDefragmentedCacheGroupPartitions(workDir, log); + + U.delete(defragmentationCompletionMarkerFile(workDir)); + + for (File file : workDir.listFiles()) { + String fileName = file.getName(); + + if ( + fileName.startsWith(DFRG_PARTITION_FILE_PREFIX) + || fileName.startsWith(DFRG_INDEX_FILE_NAME) + || fileName.startsWith(DFRG_LINK_MAPPING_FILE_PREFIX) + ) + U.delete(file); + } + } + catch (IgniteException e) { + throw new IgniteCheckedException(e); + } + } + + /** + * Checks whether cache group defragmentation completed or not. Completes it if all that's left is renaming. + * + * @param workDir Cache group working directory. + * @param grpId Cache group Id of cache group belonging to the given working directory. + * @param log Logger to write messages. + * @return {@code true} if given cache group is already defragmented. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentationCompletionMarkerFile(File) + */ + public static boolean skipAlreadyDefragmentedCacheGroup(File workDir, int grpId, IgniteLogger log) throws IgniteException { + File completionMarkerFile = defragmentationCompletionMarkerFile(workDir); + + if (completionMarkerFile.exists()) { + if (log.isInfoEnabled()) { + log.info(S.toString( + "Skipping already defragmented page group", + "grpId", grpId, false, + "markerFileName", completionMarkerFile.getName(), false, + "workDir", workDir.getAbsolutePath(), false + )); + } + + batchRenameDefragmentedCacheGroupPartitions(workDir, log); + + return true; + } + + return false; + } + + /** + * Checks whether partition has already been defragmented or not. Cleans corrupted data if previous failed + * defragmentation attempt was found. + * + * @param workDir Cache group working directory. + * @param grpId Cache group Id of cache group belonging to the given working directory. + * @param partId Partition index to check. + * @param log Logger to write messages. + * @return {@code true} if given partition is already defragmented. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentedPartTmpFile(File, int) + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + * @see DefragmentationFileUtils#defragmentedPartMappingFile(File, int) + */ + public static boolean skipAlreadyDefragmentedPartition(File workDir, int grpId, int partId, IgniteLogger log) throws IgniteException { + File defragmentedPartFile = defragmentedPartFile(workDir, partId); + File defragmentedPartMappingFile = defragmentedPartMappingFile(workDir, partId); + + if (defragmentedPartFile.exists() && defragmentedPartMappingFile.exists()) { + if (log.isInfoEnabled()) { + log.info(S.toString( + "Skipping already defragmented partition", + "grpId", grpId, false, + "partId", partId, false, + "partFileName", defragmentedPartFile.getName(), false, + "mappingFileName", defragmentedPartMappingFile.getName(), false, + "workDir", workDir.getAbsolutePath(), false + )); + } + + return true; + } + + File defragmentedPartTmpFile = defragmentedPartTmpFile(workDir, partId); + + try { + Files.deleteIfExists(defragmentedPartTmpFile.toPath()); + + Files.deleteIfExists(defragmentedPartFile.toPath()); + + Files.deleteIfExists(defragmentedPartMappingFile.toPath()); + } + catch (IOException e) { + throw new IgniteException(e); + } + + return false; + } + + /** + * Failure-tolerant batch rename of defragmented partition files. + * + * Deletes all link mapping files old partition and index files, renaming defragmentated files in the process. Can + * be run on the same folder multiple times if failed for some reason. + * + * Does something only if completion marker is present in the folder. This marker won't be deleted in the end. + * Deletion of the marker must be done outside of defragmentation mode to prevent cache groups to be defragmentated + * several times in case of failures. + * + * @param workDir Cache group working directory. + * @param log Logger to write messages. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#writeDefragmentationCompletionMarker(FileIOFactory, File, IgniteLogger) + */ + public static void batchRenameDefragmentedCacheGroupPartitions(File workDir, IgniteLogger log) throws IgniteException { + File completionMarkerFile = defragmentationCompletionMarkerFile(workDir); + + if (!completionMarkerFile.exists()) + return; + + try { + for (File mappingFile : workDir.listFiles((dir, name) -> name.startsWith(DFRG_LINK_MAPPING_FILE_PREFIX))) + Files.delete(mappingFile.toPath()); + + for (File partFile : workDir.listFiles((dir, name) -> name.startsWith(DFRG_PARTITION_FILE_PREFIX))) { + int partId = extractPartId(partFile.getName()); + + File oldPartFile = new File(workDir, String.format(PART_FILE_TEMPLATE, partId)); + + Files.move(partFile.toPath(), oldPartFile.toPath(), ATOMIC_MOVE, REPLACE_EXISTING); + } + + File idxFile = new File(workDir, DFRG_INDEX_FILE_NAME); + + if (idxFile.exists()) { + File oldIdxFile = new File(workDir, INDEX_FILE_NAME); + + Files.move(idxFile.toPath(), oldIdxFile.toPath(), ATOMIC_MOVE, REPLACE_EXISTING); + } + } + catch (IOException e) { + throw new IgniteException(e); + } + } + + /** + * Extracts partition number from file names like {@code part-dfrg-%d.bin}. + * + * @param dfrgPartFileName Defragmented partition file name. + * @return Partition index. + * + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + */ + private static int extractPartId(String dfrgPartFileName) { + assert dfrgPartFileName.startsWith(DFRG_PARTITION_FILE_PREFIX) : dfrgPartFileName; + assert dfrgPartFileName.endsWith(FILE_SUFFIX) : dfrgPartFileName; + + String partIdStr = dfrgPartFileName.substring( + DFRG_PARTITION_FILE_PREFIX.length(), + dfrgPartFileName.length() - FILE_SUFFIX.length() + ); + + return Integer.parseInt(partIdStr); + } + + /** + * Return file named {@code index-dfrg.bin.tmp} in given folder. It will be used for storing defragmented index + * partition during the process. + * + * @param workDir Cache group working directory. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedIndexFile(File) + */ + public static File defragmentedIndexTmpFile(File workDir) { + return new File(workDir, DFRG_INDEX_TMP_FILE_NAME); + } + + /** + * Return file named {@code index-dfrg.bin} in given folder. It will be used for storing defragmented index + * partition when the process is over. + * + * @param workDir Cache group working directory. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedIndexTmpFile(File) + */ + public static File defragmentedIndexFile(File workDir) { + return new File(workDir, DFRG_INDEX_FILE_NAME); + } + + /** + * Rename temporary index defragmentation file to a finalized one. + * + * @param workDir Cache group working directory. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentedIndexTmpFile(File) + * @see DefragmentationFileUtils#defragmentedIndexFile(File) + */ + public static void renameTempIndexFile(File workDir) throws IgniteException { + File defragmentedIdxTmpFile = defragmentedIndexTmpFile(workDir); + File defragmentedIdxFile = defragmentedIndexFile(workDir); + + try { + Files.move(defragmentedIdxTmpFile.toPath(), defragmentedIdxFile.toPath(), ATOMIC_MOVE); + } + catch (IOException e) { + throw new IgniteException(e); + } + } + + /** + * Return file named {@code part-dfrg-%d.bin.tmp} in given folder. It will be used for storing defragmented data + * partition during the process. + * + * @param workDir Cache group working directory. + * @param partId Partition index, will be substituted into file name. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + */ + public static File defragmentedPartTmpFile(File workDir, int partId) { + return new File(workDir, String.format(DFRG_PARTITION_TMP_FILE_TEMPLATE, partId)); + } + + /** + * Return file named {@code part-dfrg-%d.bin} in given folder. It will be used for storing defragmented data + * partition when the process is over. + * + * @param workDir Cache group working directory. + * @param partId Partition index, will be substituted into file name. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedPartTmpFile(File, int) + */ + public static File defragmentedPartFile(File workDir, int partId) { + return new File(workDir, String.format(DFRG_PARTITION_FILE_TEMPLATE, partId)); + } + + /** + * Rename temporary partition defragmentation file to a finalized one. + * + * @param workDir Cache group working directory. + * @param partId Partition index. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentedPartTmpFile(File, int) + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + */ + public static void renameTempPartitionFile(File workDir, int partId) throws IgniteException { + File defragmentedPartTmpFile = defragmentedPartTmpFile(workDir, partId); + File defragmentedPartFile = defragmentedPartFile(workDir, partId); + + assert !defragmentedPartFile.exists() : defragmentedPartFile; + + try { + Files.move(defragmentedPartTmpFile.toPath(), defragmentedPartFile.toPath(), ATOMIC_MOVE); + } + catch (IOException e) { + throw new IgniteException(e); + } + } + + /** + * Return file named {@code part-map-%d.bin} in given folder. It will be used for storing defragmention links + * mapping for given partition during and after defragmentation process. No temporary counterpart is required here. + * + * @param workDir Cache group working directory. + * @param partId Partition index, will be substituted into file name. + * @return File. + * + * @see LinkMap + */ + public static File defragmentedPartMappingFile(File workDir, int partId) { + return new File(workDir, String.format(DFRG_LINK_MAPPING_FILE_TEMPLATE, partId)); + } + + /** + * Return defragmentation completion marker file. This file can only be created when all partitions and index are + * defragmented and renamed from their original {@code *.tmp} versions. Presence of this file signals that no data + * will be lost if original partitions are deleted and batch rename process can be safely initiated. + * + * @param workDir Cache group working directory. + * @return File. + * + * @see DefragmentationFileUtils#writeDefragmentationCompletionMarker(FileIOFactory, File, IgniteLogger) + * @see DefragmentationFileUtils#batchRenameDefragmentedCacheGroupPartitions(File, IgniteLogger) + */ + public static File defragmentationCompletionMarkerFile(File workDir) { + return new File(workDir, DFRG_COMPLETION_MARKER_FILE_NAME); + } + + /** + * Creates empty completion marker file in given directory. + * + * @param ioFactory File IO factory. + * @param workDir Cache group working directory. + * @param log Logger to write messages. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentationCompletionMarkerFile(File) + */ + public static void writeDefragmentationCompletionMarker( + FileIOFactory ioFactory, + File workDir, + IgniteLogger log + ) throws IgniteException { + File completionMarker = defragmentationCompletionMarkerFile(workDir); + + try (FileIO io = ioFactory.create(completionMarker, CREATE_NEW, WRITE)) { + io.force(true); + } + catch (IOException e) { + throw new IgniteException(e); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationPageReadWriteManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationPageReadWriteManager.java new file mode 100644 index 00000000000000..2ed7c91893cb8a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationPageReadWriteManager.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManagerImpl; + +/** */ +public class DefragmentationPageReadWriteManager extends PageReadWriteManagerImpl { + /** + * @param ctx Kernal context. + * @param name name. + */ + public DefragmentationPageReadWriteManager(GridKernalContext ctx, String name) { + super(ctx, new PageStoreMap(), name); + } + + /** */ + public PageStoreMap pageStoreMap() { + return (PageStoreMap)pageStores; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMap.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMap.java new file mode 100644 index 00000000000000..a796ab90d844db --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMap.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.util.concurrent.atomic.AtomicLong; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.PageUtils; +import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; +import org.apache.ignite.internal.processors.failure.FailureProcessor; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; + +/** + * Class that holds mappings of old links to new links. + */ +public class LinkMap { + /** Tree meta page index. */ + public static final int META_PAGE_IDX = 2; + + /** */ + public static final IOVersions> LEAF_IO_VERSIONS = new IOVersions<>( + new LinkMappingLeafIO() + ); + + /** */ + public static final IOVersions> INNER_IO_VERSIONS = new IOVersions<>( + new LinkMappingInnerIO() + ); + + /** Mapping tree. */ + private final LinkTree tree; + + /** + * @param ctx Cache group context. + * @param pageMem Page memory. + * @param metaPageId Meta page id. + * @param initNew If tree should be (re)created. + */ + public LinkMap( + CacheGroupContext ctx, + PageMemory pageMem, + long metaPageId, + boolean initNew + ) throws IgniteCheckedException { + this(ctx.groupId(), ctx.name(), pageMem, metaPageId, initNew); + } + + /** + * @param grpId Cache group id. + * @param grpName Cache group name. + * @param pageMem Page memory. + * @param metaPageId Meta page id. + * @param initNew If tree should be (re)created. + */ + public LinkMap( + int grpId, + String grpName, + PageMemory pageMem, + long metaPageId, + boolean initNew + ) throws IgniteCheckedException { + tree = new LinkTree( + "link-map", + grpId, + grpName, + pageMem, + null, + new AtomicLong(), + metaPageId, + null, + (IOVersions)INNER_IO_VERSIONS, + (IOVersions)LEAF_IO_VERSIONS, + null, + null, + initNew + ); + } + + /** + * Add link mapping. + * + * @param oldLink Old link. + * @param newLink New link. + */ + public void put(long oldLink, long newLink) throws IgniteCheckedException { + tree.put(new LinkMapping(oldLink, newLink)); + } + + /** + * Get new link by old link. + * + * @param oldLink Old link. + */ + public long get(long oldLink) throws IgniteCheckedException { + LinkMapping get = new LinkMapping(oldLink, 0); + LinkMapping found = tree.findOne(get); + + return found.getNewLink(); + } + + /** */ + private static class LinkTree extends BPlusTree { + /** + * @param name Tree name. + * @param cacheGrpId Cache group ID. + * @param cacheGrpName Cache group name. + * @param pageMem Page memory. + * @param wal Write ahead log manager. + * @param globalRmvId Remove ID. + * @param metaPageId Meta page ID. + * @param reuseList Reuse list. + * @param innerIos Inner IO versions. + * @param leafIos Leaf IO versions. + * @param failureProcessor if the tree is corrupted. + * @param initNew If tree should be (re)created. + * + * @throws IgniteCheckedException If failed. + */ + protected LinkTree( + String name, + int cacheGrpId, + String cacheGrpName, + PageMemory pageMem, + IgniteWriteAheadLogManager wal, + AtomicLong globalRmvId, + long metaPageId, + ReuseList reuseList, + IOVersions> innerIos, + IOVersions> leafIos, + @Nullable FailureProcessor failureProcessor, + @Nullable PageLockListener lockLsnr, + boolean initNew + ) throws IgniteCheckedException { + super(name, cacheGrpId, cacheGrpName, pageMem, wal, globalRmvId, metaPageId, reuseList, innerIos, leafIos, FLAG_AUX, failureProcessor, lockLsnr); + + PageIO.registerTest(latestInnerIO(), latestLeafIO()); + + initTree(initNew); + } + + /** {@inheritDoc} */ + @Override protected int compare(BPlusIO io, long pageAddr, int idx, LinkMapping row) throws IgniteCheckedException { + LinkMapping lookupRow = io.getLookupRow(this, pageAddr, idx); + + return Long.compare(lookupRow.getOldLink(), row.getOldLink()); + } + + /** {@inheritDoc} */ + @Override public LinkMapping getRow(BPlusIO io, long pageAddr, int idx, Object x) throws IgniteCheckedException { + return io.getLookupRow(this, pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override protected long allocatePageNoReuse() throws IgniteCheckedException { + return pageMem.allocatePage(grpId, PageIdUtils.partId(metaPageId), FLAG_DATA); + } + } + + /** + * Class holding mapping from old link to new link. + */ + private static class LinkMapping { + /** Old link. */ + private final long oldLink; + + /** New link. */ + private final long newLink; + + /** + * @param oldLink Old link. + * @param newLink New link. + */ + public LinkMapping(long oldLink, long newLink) { + this.oldLink = oldLink; + this.newLink = newLink; + } + + /** */ + public long getOldLink() { + return oldLink; + } + + /** */ + public long getNewLink() { + return newLink; + } + } + + /** */ + private static class LinkMappingInnerIO extends BPlusInnerIO { + /** */ + protected LinkMappingInnerIO() { + super(PageIO.T_DEFRAG_LINK_MAPPING_INNER, 1, true, Long.BYTES * 2); + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, LinkMapping row) { + PageUtils.putLong(pageAddr, off, row.getOldLink()); + PageUtils.putLong(pageAddr, off + Long.BYTES, row.getNewLink()); + } + + /** {@inheritDoc} */ + @Override public void store(long dst, int dstIdx, BPlusIO srcIo, long src, int srcIdx) + throws IgniteCheckedException { + assert srcIo == this; + + storeByOffset(dst, offset(dstIdx), srcIo.getLookupRow(null, src, srcIdx)); + } + + /** {@inheritDoc} */ + @Override public LinkMapping getLookupRow(BPlusTree tree, long pageAddr, int idx) { + long oldLink = PageUtils.getLong(pageAddr, offset(idx)); + long newLink = PageUtils.getLong(pageAddr, offset(idx) + Long.BYTES); + + return new LinkMapping(oldLink, newLink); + } + } + + /** */ + private static class LinkMappingLeafIO extends BPlusLeafIO { + /** */ + protected LinkMappingLeafIO() { + super(PageIO.T_DEFRAG_LINK_MAPPING_LEAF, 1, Long.BYTES * 2); + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, LinkMapping row) { + PageUtils.putLong(pageAddr, off, row.getOldLink()); + PageUtils.putLong(pageAddr, off + Long.BYTES, row.getNewLink()); + } + + /** {@inheritDoc} */ + @Override public void store(long dst, int dstIdx, BPlusIO srcIo, long src, int srcIdx) + throws IgniteCheckedException { + assert srcIo == this; + + storeByOffset(dst, offset(dstIdx), srcIo.getLookupRow(null, src, srcIdx)); + } + + /** {@inheritDoc} */ + @Override public LinkMapping getLookupRow(BPlusTree tree, long pageAddr, int idx) { + long oldLink = PageUtils.getLong(pageAddr, offset(idx)); + long newLink = PageUtils.getLong(pageAddr, offset(idx) + Long.BYTES); + + return new LinkMapping(oldLink, newLink); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/PageStoreMap.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/PageStoreMap.java new file mode 100644 index 00000000000000..946fea16c06684 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/PageStoreMap.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.util.Arrays; +import java.util.Collection; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.pagemem.store.PageStoreCollection; +import org.apache.ignite.internal.util.collection.IntMap; +import org.apache.ignite.internal.util.collection.IntRWHashMap; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** */ +class PageStoreMap implements PageStoreCollection { + /** GroupId -> PartId -> PageStore */ + private final IntMap> grpPageStoresMap = new IntRWHashMap<>(); + + /** */ + public void addPageStore( + int grpId, + int partId, + PageStore pageStore + ) { + IntMap pageStoresMap = grpPageStoresMap.get(grpId); + + //This code cannot be used concurrently. If we decide to parallel defragmentation then we should correct current class. + if (pageStoresMap == null) + grpPageStoresMap.put(grpId, pageStoresMap = new IntRWHashMap<>()); + + pageStoresMap.put(partId, pageStore); + } + + /** */ + public void removePageStore( + int grpId, + int partId + ) { + IntMap pageStoresMap = grpPageStoresMap.get(grpId); + + if (pageStoresMap != null) + pageStoresMap.remove(partId); + } + + /** */ + public void clear(int grpId) { + grpPageStoresMap.remove(grpId); + } + + /** {@inheritDoc} */ + @Override public PageStore getStore(int grpId, int partId) throws IgniteCheckedException { + IntMap partPageStoresMap = grpPageStoresMap.get(grpId); + + if (partPageStoresMap == null) { + throw new IgniteCheckedException(S.toString("Page store map not found. ", + "grpId", grpId, false, + "partId", partId, false, + "keys", Arrays.toString(grpPageStoresMap.keys()), false, + "this", hashCode(), false + )); + } + + PageStore pageStore = partPageStoresMap.get(partId); + + if (pageStore == null) { + throw new IgniteCheckedException(S.toString("Page store not found. ", + "grpId", grpId, false, + "partId", partId, false, + "keys", Arrays.toString(partPageStoresMap.keys()), false, + "this", hashCode(), false + )); + } + + return pageStore; + } + + /** {@inheritDoc} */ + @Override public Collection getStores(int grpId) throws IgniteCheckedException { + IntMap partPageStoresMap = grpPageStoresMap.get(grpId); + + if (partPageStoresMap == null) { + throw new IgniteCheckedException(S.toString("Page store map not found. ", + "grpId", grpId, false, + "keys", Arrays.toString(grpPageStoresMap.keys()), false, + "this", hashCode(), false + )); + } + + return Arrays.asList(partPageStoresMap.values()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/TreeIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/TreeIterator.java new file mode 100644 index 00000000000000..90e47c97439a94 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/TreeIterator.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.util.GridUnsafe; + +/** */ +public class TreeIterator { + /** Direct memory buffer with a size of one page. */ + private final ByteBuffer pageBuf; + + /** Offheap page size. */ + private final int pageSize; + + /** */ + public TreeIterator(int size) { + pageSize = size; + + pageBuf = ByteBuffer.allocateDirect(pageSize); + } + + /** */ + public void iterate( + BPlusTree tree, + PageMemoryEx pageMemory, + BPlusTree.TreeRowClosure c + ) throws IgniteCheckedException { + int grpId = tree.groupId(); + + long leafId = findFirstLeafId(grpId, tree.getMetaPageId(), pageMemory); + + long bufAddr = GridUnsafe.bufferAddress(pageBuf); + + while (leafId != 0L) { + long leafPage = pageMemory.acquirePage(grpId, leafId); + + BPlusIO io; + + try { + long leafPageAddr = pageMemory.readLock(grpId, leafId, leafPage); + + try { + io = PageIO.getBPlusIO(leafPageAddr); + + assert io instanceof BPlusLeafIO : io; + + GridUnsafe.copyMemory(leafPageAddr, bufAddr, pageSize); + } + finally { + pageMemory.readUnlock(grpId, leafId, leafPage); + } + } + finally { + pageMemory.releasePage(grpId, leafId, leafPage); + } + + int cnt = io.getCount(bufAddr); + + for (int idx = 0; idx < cnt; idx++) + c.apply(tree, io, bufAddr, idx); + + leafId = io.getForward(bufAddr); + } + } + + /** */ + private long findFirstLeafId(int grpId, long metaPageId, PageMemoryEx partPageMemory) throws IgniteCheckedException { + long metaPage = partPageMemory.acquirePage(grpId, metaPageId); + + try { + long metaPageAddr = partPageMemory.readLock(grpId, metaPageId, metaPage); + + try { + BPlusMetaIO metaIO = PageIO.getPageIO(metaPageAddr); + + return metaIO.getFirstPageId(metaPageAddr, 0); + } + finally { + partPageMemory.readUnlock(grpId, metaPageId, metaPage); + } + } + finally { + partPageMemory.releasePage(grpId, metaPageId, metaPage); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationParameters.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationParameters.java new file mode 100644 index 00000000000000..6bc3ddcd21cc18 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationParameters.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.ignite.maintenance.MaintenanceTask; + +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager.DEFRAGMENTATION_MNTC_TASK_NAME; + +/** + * Maintenance task for defragmentation. + */ +public class DefragmentationParameters { + /** */ + public static final String CACHE_GROUP_ID_SEPARATOR = ","; + + /** */ + private final List cacheGrpIds; + + /** + * @param cacheGrpIds Id of cache group for defragmentations. + */ + private DefragmentationParameters(List cacheGrpIds) { + this.cacheGrpIds = cacheGrpIds; + } + + /** + * Convert parameter to maintenance storage. + * + * @param cacheGroupIds Cache group ids for defragmentation. + * @return Maintenance task. + */ + public static MaintenanceTask toStore(List cacheGroupIds) { + return new MaintenanceTask( + DEFRAGMENTATION_MNTC_TASK_NAME, + "Cache group defragmentation", + cacheGroupIds.stream() + .map(String::valueOf) + .collect(Collectors.joining(CACHE_GROUP_ID_SEPARATOR)) + ); + } + + /** + * @param rawTask Task from maintenance storage. + * @return Defragmentation parameters. + */ + public static DefragmentationParameters fromStore(MaintenanceTask rawTask) { + return new DefragmentationParameters(Arrays.stream(rawTask.parameters() + .split(CACHE_GROUP_ID_SEPARATOR)) + .map(Integer::valueOf) + .collect(Collectors.toList()) + ); + } + + /** + * @return Cache groups ids. + */ + public List cacheGroupIds() { + return cacheGrpIds; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationWorkflowCallback.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationWorkflowCallback.java new file mode 100644 index 00000000000000..a809579d14d344 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationWorkflowCallback.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance; + +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager; +import org.apache.ignite.maintenance.MaintenanceAction; +import org.apache.ignite.maintenance.MaintenanceWorkflowCallback; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Defragmentation specific callback for maintenance mode. + */ +public class DefragmentationWorkflowCallback implements MaintenanceWorkflowCallback { + /** Defragmentation manager. */ + private final CachePartitionDefragmentationManager defrgMgr; + + /** Logger provider. */ + private final Function, IgniteLogger> logProvider; + + /** + * @param logProvider Logger provider. + * @param defrgMgr Defragmentation manager. + */ + public DefragmentationWorkflowCallback( + Function, IgniteLogger> logProvider, + CachePartitionDefragmentationManager defrgMgr + ) { + this.defrgMgr = defrgMgr; + this.logProvider = logProvider; + } + + /** {@inheritDoc} */ + @Override public boolean shouldProceedWithMaintenance() { + return true; + } + + /** {@inheritDoc} */ + @Override public @NotNull List> allActions() { + return Collections.singletonList(automaticAction()); + } + + /** {@inheritDoc} */ + @Override public @Nullable MaintenanceAction automaticAction() { + return new ExecuteDefragmentationAction(logProvider, defrgMgr); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/ExecuteDefragmentationAction.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/ExecuteDefragmentationAction.java new file mode 100644 index 00000000000000..42b2de7945bad3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/ExecuteDefragmentationAction.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance; + +import java.util.function.Function; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager; +import org.apache.ignite.maintenance.MaintenanceAction; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Action which allows to start the defragmentation process. + */ +class ExecuteDefragmentationAction implements MaintenanceAction { + /** Logger. */ + private final IgniteLogger log; + + /** Defragmentation manager. */ + private final CachePartitionDefragmentationManager defrgMgr; + + /** + * @param logFunction Logger provider. + * @param defrgMgr Defragmentation manager. + */ + public ExecuteDefragmentationAction( + Function, IgniteLogger> logFunction, + CachePartitionDefragmentationManager defrgMgr + ) { + this.log = logFunction.apply(ExecuteDefragmentationAction.class); + this.defrgMgr = defrgMgr; + } + + /** {@inheritDoc} */ + @Override public Boolean execute() { + try { + defrgMgr.executeDefragmentation(); + } + catch (IgniteCheckedException | IgniteException e) { + log.error("Defragmentation is failed", e); + + return false; + } + + return true; + } + + /** {@inheritDoc} */ + @Override public @NotNull String name() { + return "execute"; + } + + /** {@inheritDoc} */ + @Override public @Nullable String description() { + return "Starting the process of defragmentation."; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java index 60f5017f2f6fa1..88b619ebc0d93d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java @@ -18,10 +18,10 @@ package org.apache.ignite.internal.processors.cache.persistence.file; import java.io.IOException; -import java.io.Serializable; import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -63,11 +63,6 @@ public class EncryptedFileIO implements FileIO { */ private final EncryptionSpi encSpi; - /** - * Encryption key. - */ - private Serializable encKey; - /** * Extra bytes added by encryption. */ @@ -242,12 +237,16 @@ private void encrypt(ByteBuffer srcBuf, ByteBuffer res) throws IOException { srcBuf.limit(srcBuf.position() + plainDataSize()); - encSpi.encryptNoPadding(srcBuf, key(), res); + GroupKey grpKey = encMgr.groupKey(groupId); + + encSpi.encryptNoPadding(srcBuf, grpKey.key(), res); res.rewind(); storeCRC(res); + res.put(grpKey.id()); + srcBuf.limit(srcLimit); srcBuf.position(srcBuf.position() + encryptionOverhead); } @@ -260,11 +259,31 @@ private void decrypt(ByteBuffer encrypted, ByteBuffer destBuf) throws IOExceptio assert encrypted.remaining() >= pageSize; assert encrypted.limit() >= pageSize; - checkCRC(encrypted); + int crc = FastCrc.calcCrc(encrypted, encryptedDataSize()); + + int storedCrc = 0; + + storedCrc |= (int)encrypted.get() << 24; + storedCrc |= ((int)encrypted.get() & 0xff) << 16; + storedCrc |= ((int)encrypted.get() & 0xff) << 8; + storedCrc |= encrypted.get() & 0xff; + + if (crc != storedCrc) { + throw new IOException("Content of encrypted page is broken. [StoredCrc=" + storedCrc + + ", calculatedCrc=" + crc + "]"); + } + + int keyId = encrypted.get() & 0xff; + + encrypted.position(encrypted.position() - (encryptedDataSize() + 4 /* CRC size. */ + 1 /* key identifier. */)); encrypted.limit(encryptedDataSize()); - encSpi.decryptNoPadding(encrypted, key(), destBuf); + GroupKey grpKey = encMgr.groupKey(groupId, keyId); + + assert grpKey != null : keyId; + + encSpi.decryptNoPadding(encrypted, grpKey.key(), destBuf); destBuf.put(zeroes); //Forcibly purge page buffer tail. } @@ -283,29 +302,6 @@ private void storeCRC(ByteBuffer res) { res.put((byte) crc); } - /** - * Checks encrypted data integrity. - * - * @param encrypted Encrypted data buffer. - */ - private void checkCRC(ByteBuffer encrypted) throws IOException { - int crc = FastCrc.calcCrc(encrypted, encryptedDataSize()); - - int storedCrc = 0; - - storedCrc |= (int)encrypted.get() << 24; - storedCrc |= ((int)encrypted.get() & 0xff) << 16; - storedCrc |= ((int)encrypted.get() & 0xff) << 8; - storedCrc |= encrypted.get() & 0xff; - - if (crc != storedCrc) { - throw new IOException("Content of encrypted page is broken. [StoredCrc=" + storedCrc + - ", calculatedCrd=" + crc + "]"); - } - - encrypted.position(encrypted.position() - (encryptedDataSize() + 4 /* CRC size. */)); - } - /** * @return Encrypted data size. */ @@ -334,16 +330,6 @@ private boolean tailIsEmpty(ByteBuffer src, int pageType) { return true; } - /** - * @return Encryption key. - */ - private Serializable key() { - if (encKey == null) - return encKey = encMgr.groupKey(groupId); - - return encKey; - } - /** {@inheritDoc} */ @Override public int write(byte[] buf, int off, int len) throws IOException { throw new UnsupportedOperationException("Encrypted File doesn't support this operation"); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java index 29f7e6f891b876..4deb047569194b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java @@ -29,6 +29,7 @@ /** * {@link FileIO} factory definition. */ +@FunctionalInterface public interface FileIOFactory extends Serializable { /** * Creates I/O interface for file with default I/O mode. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java index adb49a35f228c0..6b63d315b42596 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java @@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.LongConsumer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; @@ -42,7 +43,6 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteOutClosure; @@ -101,7 +101,7 @@ public class FilePageStore implements PageStore { private final AtomicLong allocated; /** Region metrics updater. */ - private final LongAdderMetric allocatedTracker; + private final LongConsumer allocatedTracker; /** List of listeners for current page store to handle. */ private final List lsnrs = new CopyOnWriteArrayList<>(); @@ -130,7 +130,7 @@ public FilePageStore( IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, - LongAdderMetric allocatedTracker + LongConsumer allocatedTracker ) { this.type = type; this.pathProvider = pathProvider; @@ -357,7 +357,7 @@ private void stop0(boolean delete) throws IOException { } } finally { - allocatedTracker.add(-1L * allocated.getAndSet(0) / pageSize); + allocatedTracker.accept(-1L * allocated.getAndSet(0) / pageSize); inited = false; @@ -406,7 +406,7 @@ private void stop0(boolean delete) throws IOException { throw new StorageException("Failed to truncate partition file [file=" + filePath.toAbsolutePath() + "]", e); } finally { - allocatedTracker.add(-1L * allocated.getAndSet(0) / pageSize); + allocatedTracker.accept(-1L * allocated.getAndSet(0) / pageSize); inited = false; @@ -443,7 +443,7 @@ private void stop0(boolean delete) throws IOException { assert delta % pageSize == 0 : delta; - allocatedTracker.add(delta / pageSize); + allocatedTracker.accept(delta / pageSize); } recover = false; @@ -592,7 +592,7 @@ public void init() throws StorageException { // Order is important, update of total allocated pages must be called after allocated update // and setting inited to true, because it affects pages() returned value. - allocatedTracker.add(pages()); + allocatedTracker.accept(pages()); } catch (IOException e) { err = new StorageException( @@ -839,7 +839,7 @@ private long allocPage() { off = allocated.get(); if (allocated.compareAndSet(off, off + pageSize)) { - allocatedTracker.increment(); + allocatedTracker.accept(1); break; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java index 6607ebe12002a4..53e9fe9c711972 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java @@ -19,10 +19,9 @@ import java.io.File; import java.nio.file.Path; +import java.util.function.LongConsumer; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.store.PageStore; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.lang.IgniteOutClosure; /** @@ -32,13 +31,13 @@ public interface FilePageStoreFactory { /** * Creates instance of PageStore based on given file. * - * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA}. + * @param type Data type, can be {@link PageStore#TYPE_IDX} or {@link PageStore#TYPE_DATA}. * @param file File Page store file. * @param allocatedTracker metrics updater. * @return page store * @throws IgniteCheckedException if failed. */ - default PageStore createPageStore(byte type, File file, LongAdderMetric allocatedTracker) + default PageStore createPageStore(byte type, File file, LongConsumer allocatedTracker) throws IgniteCheckedException { return createPageStore(type, file::toPath, allocatedTracker); } @@ -46,12 +45,12 @@ default PageStore createPageStore(byte type, File file, LongAdderMetric allocate /** * Creates instance of PageStore based on file path provider. * - * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA} + * @param type Data type, can be {@link PageStore#TYPE_IDX} or {@link PageStore#TYPE_DATA} * @param pathProvider File Page store path provider. * @param allocatedTracker metrics updater * @return page store * @throws IgniteCheckedException if failed */ - PageStore createPageStore(byte type, IgniteOutClosure pathProvider, LongAdderMetric allocatedTracker) + PageStore createPageStore(byte type, IgniteOutClosure pathProvider, LongConsumer allocatedTracker) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index 024f52c85687d6..f8f28d80925023 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -49,9 +49,9 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Function; +import java.util.function.LongConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -64,22 +64,23 @@ import org.apache.ignite.internal.client.util.GridConcurrentHashSet; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; -import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.pagemem.store.PageStoreCollection; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; -import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheSharedManagerAdapter; import org.apache.ignite.internal.processors.cache.StoredCacheData; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.StorageException; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManagerImpl; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; -import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.GridStripedReadWriteLock; import org.apache.ignite.internal.util.typedef.X; @@ -87,6 +88,7 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.util.worker.GridWorker; import org.apache.ignite.lang.IgniteOutClosure; +import org.apache.ignite.maintenance.MaintenanceRegistry; import org.apache.ignite.maintenance.MaintenanceTask; import org.apache.ignite.marshaller.Marshaller; import org.apache.ignite.marshaller.MarshallerUtils; @@ -104,7 +106,8 @@ /** * File page store manager. */ -public class FilePageStoreManager extends GridCacheSharedManagerAdapter implements IgnitePageStoreManager { +public class FilePageStoreManager extends GridCacheSharedManagerAdapter implements IgnitePageStoreManager, + PageStoreCollection { /** File suffix. */ public static final String FILE_SUFFIX = ".bin"; @@ -118,7 +121,10 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen public static final String PART_FILE_PREFIX = "part-"; /** */ - public static final String INDEX_FILE_NAME = "index" + FILE_SUFFIX; + public static final String INDEX_FILE_PREFIX = "index"; + + /** */ + public static final String INDEX_FILE_NAME = INDEX_FILE_PREFIX + FILE_SUFFIX; /** */ public static final String PART_FILE_TEMPLATE = PART_FILE_PREFIX + "%d" + FILE_SUFFIX; @@ -157,6 +163,9 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen /** Marshaller. */ private final Marshaller marshaller; + /** Page manager. */ + private final PageReadWriteManager pmPageMgr; + /** * Executor to disallow running code that modifies data in idxCacheStores concurrently with cleanup of file page * store. @@ -187,9 +196,6 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen /** Absolute directory for file page store. Includes consistent id based folder. */ private File storeWorkDir; - /** */ - private final long metaPageId = PageIdUtils.pageId(-1, PageMemory.FLAG_IDX, 0); - /** */ private final Set grpsWithoutIdx = Collections.newSetFromMap(new ConcurrentHashMap()); @@ -217,6 +223,8 @@ public FilePageStoreManager(GridKernalContext ctx) { pageStoreV1FileIoFactory = pageStoreFileIoFactory = dsCfg.getFileIOFactory(); marshaller = MarshallerUtils.jdkMarshaller(ctx.igniteInstanceName()); + + pmPageMgr = new PageReadWriteManagerImpl(ctx, this, FilePageStoreManager.class.getSimpleName()); } /** {@inheritDoc} */ @@ -446,7 +454,7 @@ private List checkCachesWithDisabledWal() { } /** {@inheritDoc} */ - @Override public void initialize(int cacheId, int partitions, String workingDir, LongAdderMetric tracker) + @Override public void initialize(int cacheId, int partitions, String workingDir, LongConsumer tracker) throws IgniteCheckedException { assert storeWorkDir != null; @@ -493,7 +501,7 @@ private List checkCachesWithDisabledWal() { new File(storeWorkDir, META_STORAGE_NAME), grpId, PageIdAllocator.METASTORE_PARTITION + 1, - dataRegion.memoryMetrics().totalAllocatedPages(), + dataRegion.memoryMetrics().totalAllocatedPages()::add, false); CacheStoreHolder old = idxCacheStores.put(grpId, holder); @@ -596,34 +604,8 @@ public void removeConfigurationChangeListener(BiConsumer lsnr) { } /** {@inheritDoc} */ - @Override public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException { - read(grpId, pageId, pageBuf, false); - } - - /** - * Will preserve crc in buffer if keepCrc is true. - * - * @param grpId Group ID. - * @param pageId Page ID. - * @param pageBuf Page buffer. - * @param keepCrc Keep CRC flag. - * @throws IgniteCheckedException If failed. - */ - public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { - PageStore store = getStore(grpId, PageIdUtils.partId(pageId)); - - try { - store.read(pageId, pageBuf, keepCrc); - - assert keepCrc || PageIO.getCrc(pageBuf) == 0 : store.size() - store.pageOffset(pageId); - - cctx.kernalContext().compress().decompressPage(pageBuf, store.getPageSize()); - } - catch (StorageException e) { - cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); - - throw e; - } + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { + pmPageMgr.read(grpId, pageId, pageBuf, keepCrc); } /** {@inheritDoc} */ @@ -648,8 +630,8 @@ public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) th } /** {@inheritDoc} */ - @Override public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { - writeInternal(grpId, pageId, pageBuf, tag, true); + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { + return pmPageMgr.write(grpId, pageId, pageBuf, tag, calculateCrc); } /** {@inheritDoc} */ @@ -659,59 +641,6 @@ public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) th return store.pageOffset(pageId); } - /** - * @param cacheId Cache ID to write. - * @param pageId Page ID. - * @param pageBuf Page buffer. - * @param tag Partition tag (growing 1-based partition file version). Used to validate page is not outdated - * @param calculateCrc if {@code False} crc calculation will be forcibly skipped. - * @return PageStore to which the page has been written. - * @throws IgniteCheckedException If IO error occurred. - */ - public PageStore writeInternal(int cacheId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) - throws IgniteCheckedException { - int partId = PageIdUtils.partId(pageId); - - PageStore store = getStore(cacheId, partId); - - try { - int pageSize = store.getPageSize(); - int compressedPageSize = pageSize; - - GridCacheContext cctx0 = cctx.cacheContext(cacheId); - - if (cctx0 != null) { - assert pageBuf.position() == 0 && pageBuf.limit() == pageSize : pageBuf; - - ByteBuffer compressedPageBuf = cctx0.compress().compressPage(pageBuf, store); - - if (compressedPageBuf != pageBuf) { - compressedPageSize = PageIO.getCompressedSize(compressedPageBuf); - - if (!calculateCrc) { - calculateCrc = true; - PageIO.setCrc(compressedPageBuf, 0); // It will be recalculated over compressed data further. - } - - PageIO.setCrc(pageBuf, 0); // It is expected to be reset to 0 after each write. - pageBuf = compressedPageBuf; - } - } - - store.write(pageId, pageBuf, tag, calculateCrc); - - if (pageSize > compressedPageSize) - store.punchHole(pageId, compressedPageSize); // TODO maybe add async punch mode? - } - catch (StorageException e) { - cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); - - throw e; - } - - return store; - } - /** * */ @@ -741,7 +670,7 @@ private CacheStoreHolder initForCache(CacheGroupDescriptor grpDesc, CacheConfigu cacheWorkDir, grpDesc.groupId(), grpDesc.config().getAffinity().partitions(), - allocatedTracker, + allocatedTracker::add, ccfg.isEncryptionEnabled() ); } @@ -799,11 +728,18 @@ public FilePageStoreFactory getPageStoreFactory(int grpId, boolean encrypted) { private CacheStoreHolder initDir(File cacheWorkDir, int grpId, int partitions, - LongAdderMetric allocatedTracker, + LongConsumer allocatedTracker, boolean encrypted) throws IgniteCheckedException { try { boolean dirExisted = checkAndInitCacheWorkDir(cacheWorkDir); + if (dirExisted) { + MaintenanceRegistry mntcReg = cctx.kernalContext().maintenanceRegistry(); + + if (!mntcReg.isMaintenanceMode()) + DefragmentationFileUtils.beforeInitPageStores(cacheWorkDir, log); + } + File idxFile = new File(cacheWorkDir, INDEX_FILE_NAME); if (dirExisted && !idxFile.exists()) @@ -813,7 +749,7 @@ private CacheStoreHolder initDir(File cacheWorkDir, PageStore idxStore = pageStoreFactory.createPageStore( - PageMemory.FLAG_IDX, + PageStore.TYPE_IDX, idxFile, allocatedTracker); @@ -824,12 +760,12 @@ private CacheStoreHolder initDir(File cacheWorkDir, PageStore partStore = pageStoreFactory.createPageStore( - PageMemory.FLAG_DATA, + PageStore.TYPE_DATA, () -> getPartitionFilePath(cacheWorkDir, p), allocatedTracker); partStores[partId] = partStore; - } + } return new CacheStoreHolder(idxStore, partStores); } @@ -983,25 +919,7 @@ else if (lockF.exists()) { /** {@inheritDoc} */ @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException { - assert partId <= MAX_PARTITION_ID || partId == INDEX_PARTITION; - - PageStore store = getStore(grpId, partId); - - try { - long pageIdx = store.allocatePage(); - - return PageIdUtils.pageId(partId, flags, (int)pageIdx); - } - catch (StorageException e) { - cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public long metaPageId(final int grpId) { - return metaPageId; + return pmPageMgr.allocatePage(grpId, partId, flags); } /** {@inheritDoc} */ @@ -1341,7 +1259,7 @@ private CacheStoreHolder getHolder(int grpId) throws IgniteCheckedException { * @return Collection of related page stores. * @throws IgniteCheckedException If failed. */ - public Collection getStores(int grpId) throws IgniteCheckedException { + @Override public Collection getStores(int grpId) throws IgniteCheckedException { return getHolder(grpId); } @@ -1353,7 +1271,7 @@ public Collection getStores(int grpId) throws IgniteCheckedException * * Note: visible for testing. */ - public PageStore getStore(int grpId, int partId) throws IgniteCheckedException { + @Override public PageStore getStore(int grpId, int partId) throws IgniteCheckedException { CacheStoreHolder holder = getHolder(grpId); if (holder == null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java index 8109dbbfa8c330..84609c9cb09c88 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java @@ -17,8 +17,8 @@ package org.apache.ignite.internal.processors.cache.persistence.file; import java.nio.file.Path; +import java.util.function.LongConsumer; import org.apache.ignite.configuration.DataStorageConfiguration; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.lang.IgniteOutClosure; /** @@ -45,7 +45,7 @@ public FilePageStoreV2( IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, - LongAdderMetric allocatedTracker) { + LongConsumer allocatedTracker) { super(type, pathProvider, factory, cfg, allocatedTracker); hdrSize = cfg.getPageSize(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java index e54712247ab85a..e6502426f82f03 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java @@ -22,10 +22,10 @@ import java.nio.ByteOrder; import java.nio.file.Files; import java.nio.file.Path; +import java.util.function.LongConsumer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.pagemem.store.PageStore; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.lang.IgniteOutClosure; /** @@ -69,7 +69,7 @@ public FileVersionCheckingFactory( @Override public PageStore createPageStore( byte type, IgniteOutClosure pathProvider, - LongAdderMetric allocatedTracker) throws IgniteCheckedException { + LongConsumer allocatedTracker) throws IgniteCheckedException { Path filePath = pathProvider.apply(); if (!Files.exists(filePath)) @@ -124,7 +124,7 @@ private FilePageStore createPageStore( byte type, IgniteOutClosure pathProvider, int ver, - LongAdderMetric allocatedTracker) { + LongConsumer allocatedTracker) { switch (ver) { case FilePageStore.VERSION: diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java index b46417bb0ab48a..7ccaf378874ec7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java @@ -50,6 +50,8 @@ import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.typedef.internal.U; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; + /** */ public abstract class AbstractFreeList extends PagesList implements FreeList, ReuseList { @@ -425,6 +427,7 @@ else if (putIsNeeded) * @param wal Write ahead log manager. * @param metaPageId Metadata page ID. * @param initNew {@code True} if new metadata should be initialized. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public AbstractFreeList( @@ -438,9 +441,10 @@ public AbstractFreeList( boolean initNew, PageLockListener lockLsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { - super(cacheId, name, memPlc.pageMemory(), BUCKETS, wal, metaPageId, lockLsnr, ctx); + super(cacheId, name, memPlc.pageMemory(), BUCKETS, wal, metaPageId, lockLsnr, ctx, pageFlag); rmvRow = new RemoveRowHandler(cacheId == 0); @@ -565,9 +569,8 @@ private int bucket(int freeSpace, boolean allowReuse) { */ private long allocateDataPage(int part) throws IgniteCheckedException { assert part <= PageIdAllocator.MAX_PARTITION_ID; - assert part != PageIdAllocator.INDEX_PARTITION; - return pageMem.allocatePage(grpId, part, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, part, FLAG_DATA); } /** {@inheritDoc} */ @@ -719,17 +722,23 @@ private long takePage(int size, T row, IoStatisticsHolder statHolder) throws Ign } if (pageId == 0L) { // Handle reuse bucket. - pageId = reuseList == this ? - takeEmptyPage(REUSE_BUCKET, row.ioVersions(), statHolder) : reuseList.takeRecycledPage(); + if (reuseList == this) + pageId = takeEmptyPage(REUSE_BUCKET, row.ioVersions(), statHolder); + else { + pageId = reuseList.takeRecycledPage(); + + if (pageId != 0) + pageId = reuseList.initRecycledPage(pageId, FLAG_DATA, row.ioVersions().latest()); + } } if (pageId == 0L) return 0; - if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) // Page is taken from reuse bucket. - return initReusedPage(row, pageId, statHolder); - else // Page is taken from free space bucket. For in-memory mode partition must be changed. - return PageIdUtils.changePartitionId(pageId, row.partition()); + assert PageIdUtils.flag(pageId) == FLAG_DATA + : "rowVersions=" + row.ioVersions() + ", pageId=" + PageIdUtils.toDetailString(pageId); + + return PageIdUtils.changePartitionId(pageId, row.partition()); } /** @@ -911,6 +920,11 @@ public int emptyDataPages() { } } + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return initRecycledPage0(pageId, flag, initIO); + } + /** {@inheritDoc} */ @Override public long recycledPagesCount() throws IgniteCheckedException { assert reuseList == this : "not allowed to be a reuse list"; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java index a4a4363597391c..fdf50c9a028c78 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java @@ -41,6 +41,7 @@ public class CacheFreeList extends AbstractFreeList { * @param wal Wal. * @param metaPageId Meta page id. * @param initNew Initialize new. + * @param pageFlag Default flag value for allocated pages. */ public CacheFreeList( int cacheId, @@ -52,7 +53,8 @@ public CacheFreeList( boolean initNew, PageLockListener lockLsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { super( cacheId, @@ -65,7 +67,8 @@ public CacheFreeList( initNew, lockLsnr, ctx, - pageListCacheLimit + pageListCacheLimit, + pageFlag ); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java index e944b1ec0310b2..bf6650066d5786 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.managers.communication.GridIoPolicy; import org.apache.ignite.internal.metric.IoStatisticsHolder; import org.apache.ignite.internal.metric.IoStatisticsHolderNoOp; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; @@ -43,6 +44,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListRemovePageRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListSetNextRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListSetPreviousRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.RecycleRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.RotatedIdPartRecord; import org.apache.ignite.internal.processors.cache.persistence.DataStructure; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListMetaIO; @@ -65,8 +67,14 @@ import static java.lang.Boolean.TRUE; import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION; import static org.apache.ignite.internal.pagemem.PageIdUtils.MAX_ITEMID_NUM; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_DATA; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_DATA_METASTORAGE; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_DATA_PART; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_META; import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.getPageId; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver.DEFAULT_PAGE_IO_RESOLVER; /** * Striped doubly-linked list of page IDs optionally organized in buckets. @@ -200,6 +208,7 @@ private final class PutBucket extends PageHandler { * @param buckets Number of buckets. * @param wal Write ahead log manager. * @param metaPageId Metadata page ID. + * @param pageFlag Default flag value for allocated pages. */ protected PagesList( int cacheId, @@ -209,9 +218,10 @@ protected PagesList( IgniteWriteAheadLogManager wal, long metaPageId, PageLockListener lockLsnr, - GridKernalContext ctx + GridKernalContext ctx, + byte pageFlag ) { - super(cacheId, null, pageMem, wal, lockLsnr); + super(cacheId, null, pageMem, wal, lockLsnr, DEFAULT_PAGE_IO_RESOLVER, pageFlag); this.name = name; this.buckets = buckets; @@ -1304,6 +1314,8 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, ", pageId=" + pageId + ']'); } + assert !isReuseBucket(bucket) : "reuse bucket detected"; + return pageId; } @@ -1371,7 +1383,15 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, PageIdUtils.itemId(pageId) > 0 && PageIdUtils.itemId(pageId) <= MAX_ITEMID_NUM : "Incorrectly recycled pageId in reuse bucket: " + U.hexLong(pageId); - dataPageId = pageId; + if (isReuseBucket(bucket)) { + byte flag = getFlag(initIoVers); + + PageIO initIO = initIoVers == null ? null : initIoVers.latest(); + + dataPageId = initRecycledPage0(pageId, flag, initIO); + } + else + dataPageId = pageId; if (io.isEmpty(tailAddr)) { long prevId = io.getPreviousId(tailAddr); @@ -1409,12 +1429,11 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, decrementBucketSize(bucket); - if (initIoVers != null) { - int partId = PageIdUtils.partId(tailId); + byte flag = getFlag(initIoVers); - dataPageId = initReusedPage(tailId, tailPage, tailAddr, partId, FLAG_DATA, initIoVers.latest()); - } else - dataPageId = recyclePage(tailId, tailPage, tailAddr, null); + PageIO pageIO = initIoVers != null ? initIoVers.latest() : null; + + dataPageId = initReusedPage(tailId, tailPage, tailAddr, PageIdUtils.partId(tailId), flag, pageIO); dirty = true; } @@ -1449,7 +1468,56 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, } /** - * Reused page must obtain correctly assembled page id, then initialized by proper {@link PageIO} instance and + * @param initIoVers Optional IO versions list that will be used later to init the page. + * @return {@link PageIdAllocator#FLAG_DATA} for cache group metas and data pages, + * {@link #pageFlag} otherwise. + */ + private byte getFlag(IOVersions initIoVers) { + if (initIoVers != null) { + PageIO pageIO = initIoVers.latest(); + + switch (pageIO.getType()) { + case T_META: + case T_DATA: + case T_DATA_PART: + case T_DATA_METASTORAGE: + return FLAG_DATA; + } + } + + return pageFlag; + } + + /** + * Create new page id and update page content accordingly if it's necessary. + * + * @param pageId Id of the recycled page from reuse bucket. + * @param flag New flag for the page. + * @return New page id. + * @throws IgniteCheckedException If failed. + * + * @see PagesList#initReusedPage(long, long, long, int, byte, PageIO) + */ + protected long initRecycledPage0(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + long page = pageMem.acquirePage(grpId, pageId); + + try { + long pageAddr = pageMem.writeLock(grpId, pageId, page); + + try { + return initReusedPage(pageId, page, pageAddr, PageIdUtils.partId(pageId), flag, initIO); + } + finally { + pageMem.writeUnlock(grpId, pageId, page, null, true); + } + } + finally { + pageMem.releasePage(grpId, pageId, page); + } + } + + /** + * Reused page must obtain correctly assaembled page id, then initialized by proper {@link PageIO} instance and * non-zero {@code itemId} of reused page id must be saved into special place. * * @param reusedPageId Reused page id. @@ -1463,30 +1531,47 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, */ protected final long initReusedPage(long reusedPageId, long reusedPage, long reusedPageAddr, int partId, byte flag, PageIO initIo) throws IgniteCheckedException { + if (flag == FLAG_IDX) + partId = INDEX_PARTITION; long newPageId = PageIdUtils.pageId(partId, flag, PageIdUtils.pageIndex(reusedPageId)); - initIo.initNewPage(reusedPageAddr, newPageId, pageSize()); - boolean needWalDeltaRecord = needWalDeltaRecord(reusedPageId, reusedPage, null); - if (needWalDeltaRecord) { - assert PageIdUtils.partId(reusedPageId) == PageIdUtils.partId(newPageId) : - "Partition consistency failure: " + - "newPageId=" + Long.toHexString(newPageId) + " (newPartId: " + PageIdUtils.partId(newPageId) + ") " + - "reusedPageId=" + Long.toHexString(reusedPageId) + " (partId: " + PageIdUtils.partId(reusedPageId) + ")"; + if (initIo != null) { + initIo.initNewPage(reusedPageAddr, newPageId, pageSize()); + + if (needWalDeltaRecord) { + assert PageIdUtils.partId(reusedPageId) == PageIdUtils.partId(newPageId) : + "Partition consistency failure: " + + "newPageId=" + Long.toHexString(newPageId) + " (newPartId: " + PageIdUtils.partId(newPageId) + ") " + + "reusedPageId=" + Long.toHexString(reusedPageId) + " (partId: " + PageIdUtils.partId(reusedPageId) + ")"; - wal.log(new InitNewPageRecord(grpId, reusedPageId, initIo.getType(), - initIo.getVersion(), newPageId)); + wal.log(new InitNewPageRecord(grpId, reusedPageId, initIo.getType(), + initIo.getVersion(), newPageId)); + } } int itemId = PageIdUtils.itemId(reusedPageId); if (itemId != 0) { - PageIO.setRotatedIdPart(reusedPageAddr, itemId); + if (flag == FLAG_DATA) { + PageIO.setRotatedIdPart(reusedPageAddr, itemId); + + if (needWalDeltaRecord) + wal.log(new RotatedIdPartRecord(grpId, newPageId, itemId)); + } + else + newPageId = PageIdUtils.link(newPageId, itemId); + } + + long storedPageId = getPageId(reusedPageAddr); + + if (storedPageId != newPageId) { + PageIO.setPageId(reusedPageAddr, newPageId); if (needWalDeltaRecord) - wal.log(new RotatedIdPartRecord(grpId, newPageId, itemId)); + wal.log(new RecycleRecord(grpId, storedPageId, newPageId)); } return newPageId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java index 7dd1e80e80b3de..2c20a026487330 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java @@ -72,7 +72,7 @@ import org.apache.ignite.marshaller.jdk.JdkMarshaller; import org.jetbrains.annotations.NotNull; -import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; import static org.apache.ignite.internal.pagemem.PageIdAllocator.OLD_METASTORE_PARTITION; /** @@ -196,7 +196,7 @@ else if (db.temporaryMetaStorage() != null) { /** {@inheritDoc} */ @Override public void beforeCheckpointBegin(Context ctx) { } - }); + }, dataRegion); } } } @@ -266,10 +266,11 @@ else if (!readOnly || getOrAllocateMetas(partId = PageIdAllocator.OLD_METASTORE_ reuseListRoot.isAllocated(), diagnosticMgr.pageLockTracker().createPageLockTracker(freeListName), cctx.kernalContext(), - null + null, + FLAG_AUX ) { @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - return pageMem.allocatePage(grpId, partId, FLAG_DATA); + return pageMem.allocatePage(grpId, partId, FLAG_AUX); } }; @@ -291,7 +292,7 @@ else if (!readOnly || getOrAllocateMetas(partId = PageIdAllocator.OLD_METASTORE_ ); if (!readOnly) - ((GridCacheDatabaseSharedManager)db).addCheckpointListener(this); + ((GridCacheDatabaseSharedManager)db).addCheckpointListener(this, dataRegion); } } @@ -487,11 +488,13 @@ public void removeData(String key) throws IgniteCheckedException { /** */ private void checkRootsPageIdFlag(long treeRoot, long reuseListRoot) throws StorageException { - if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_AUX && + PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong tree root page id flag: treeRoot=" + U.hexLong(treeRoot) + ", METASTORAGE_CACHE_ID=" + METASTORAGE_CACHE_ID); - if (PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_AUX && + PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong reuse list root page id flag: reuseListRoot=" + U.hexLong(reuseListRoot) + ", METASTORAGE_CACHE_ID=" + METASTORAGE_CACHE_ID); } @@ -550,11 +553,11 @@ private boolean getOrAllocateMetas(int partId) throws IgniteCheckedException { //MetaStorage never encrypted so realPageSize == pageSize. io.initNewPage(pageAddr, partMetaId, pageMem.pageSize()); - treeRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_DATA); - reuseListRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_DATA); + treeRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_AUX); + reuseListRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_AUX); - assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_DATA; + assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_AUX; io.setTreeRoot(pageAddr, treeRoot); io.setReuseListRoot(pageAddr, reuseListRoot); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java index afce4e21507a8f..7b1bb6e88b6769 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java @@ -28,7 +28,7 @@ import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.jetbrains.annotations.Nullable; -import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; /** * @@ -79,6 +79,7 @@ public MetastorageTree( reuseList, MetastorageBPlusIO.INNER_IO_VERSIONS, MetastorageBPlusIO.LEAF_IO_VERSIONS, + FLAG_AUX, failureProcessor, lockLsnr ); @@ -115,6 +116,6 @@ public MetastorageRowStore rowStore() { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - return pageMem.allocatePage(grpId, partId, FLAG_DATA); + return pageMem.allocatePage(grpId, partId, FLAG_AUX); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java index 4499980a384613..6d06673a340677 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java @@ -22,6 +22,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.CacheGroupContext; @@ -150,7 +151,8 @@ private void processCacheGroup(CacheGroupContext grp) throws IgniteCheckedExcept pendingRootPage.pageId().pageId(), ((GridCacheOffheapManager)grp.offheap()).reuseListForIndex(null), false, - null + null, + PageIdAllocator.FLAG_IDX ); } finally { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java index 8bba4e1cf5506a..4ddcb941318998 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java @@ -66,23 +66,13 @@ void writeUnlock(int grpId, long pageId, long page, Boolean walPlc, boolean dirtyFlag, boolean restore); /** - * Gets or allocates metadata page for specified grpId. - * - * @param grpId Group ID. - * @return Meta page for grpId. - * @throws IgniteCheckedException If failed. - */ - public long metaPageId(int grpId) throws IgniteCheckedException; - - /** - * Gets or allocates partition metadata page for specified grpId and partId. + * Gets partition metadata page for specified grpId and partId. * * @param grpId Group ID. * @param partId Partition ID. * @return Meta page for grpId and partId. - * @throws IgniteCheckedException If failed. */ - public long partitionMetaPageId(int grpId, int partId) throws IgniteCheckedException; + public long partitionMetaPageId(int grpId, int partId); /** * @see #acquirePage(int, long) @@ -154,6 +144,9 @@ public void checkpointWritePage( CheckpointMetricsTracker tracker ) throws IgniteCheckedException; + /** */ + public PageReadWriteManager pageManager(); + /** * Marks partition as invalid / outdated. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index f349cad46a11b0..b0b28bee9d800b 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -57,7 +57,6 @@ import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageUtils; -import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.WALIterator; import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; @@ -196,8 +195,8 @@ public class PageMemoryImpl implements PageMemoryEx { /** */ private final ExecutorService asyncRunner; - /** Page store manager. */ - private IgnitePageStoreManager storeMgr; + /** Page manager. */ + private final PageReadWriteManager pmPageMgr; /** */ private IgniteWriteAheadLogManager walMgr; @@ -276,6 +275,7 @@ public class PageMemoryImpl implements PageMemoryEx { * @param directMemoryProvider Memory allocator to use. * @param sizes segments sizes, last is checkpoint pool size. * @param ctx Cache shared context. + * @param pmPageMgr Page store manager. * @param pageSize Page size. * @param flushDirtyPage write callback invoked when a dirty page is removed for replacement. * @param changeTracker Callback invoked to track changes in pages. @@ -288,6 +288,7 @@ public PageMemoryImpl( DirectMemoryProvider directMemoryProvider, long[] sizes, GridCacheSharedContext ctx, + PageReadWriteManager pmPageMgr, int pageSize, PageStoreWriter flushDirtyPage, @Nullable GridInClosure3X changeTracker, @@ -315,12 +316,12 @@ public PageMemoryImpl( this.throttlingPlc = throttlingPlc != null ? throttlingPlc : ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY; this.cpProgressProvider = cpProgressProvider; - storeMgr = ctx.pageStore(); + this.pmPageMgr = pmPageMgr; walMgr = ctx.wal(); encMgr = ctx.kernalContext().encryption(); encryptionDisabled = ctx.gridConfig().getEncryptionSpi() instanceof NoopEncryptionSpi; - assert storeMgr != null; + assert pmPageMgr != null; assert walMgr != null; assert encMgr != null; @@ -513,7 +514,7 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) /** {@inheritDoc} */ @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException { - assert flags == PageIdAllocator.FLAG_DATA && partId <= PageIdAllocator.MAX_PARTITION_ID || + assert flags != PageIdAllocator.FLAG_IDX && partId <= PageIdAllocator.MAX_PARTITION_ID || flags == PageIdAllocator.FLAG_IDX && partId == PageIdAllocator.INDEX_PARTITION : "flags = " + flags + ", partId = " + partId; @@ -523,7 +524,7 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) if (isThrottlingEnabled()) writeThrottle.onMarkDirty(false); - long pageId = storeMgr.allocatePage(grpId, partId, flags); + long pageId = pmPageMgr.allocatePage(grpId, partId, flags); assert PageIdUtils.pageIndex(pageId) > 0; //it's crucial for tracking pages (zero page is super one) @@ -535,12 +536,15 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) DelayedDirtyPageStoreWrite delayedWriter = delayedPageReplacementTracker != null ? delayedPageReplacementTracker.delayedPageWrite() : null; - FullPageId fullId = new FullPageId(pageId, grpId); - seg.writeLock().lock(); - boolean isTrackingPage = - changeTracker != null && trackingIO.trackingPageFor(pageId, realPageSize(grpId)) == pageId; + boolean isTrackingPage = changeTracker != null && + PageIdUtils.pageIndex(trackingIO.trackingPageFor(pageId, realPageSize(grpId))) == PageIdUtils.pageIndex(pageId); + + if (isTrackingPage && PageIdUtils.flag(pageId) == PageIdAllocator.FLAG_AUX) + pageId = PageIdUtils.pageId(PageIdUtils.partId(pageId), PageIdAllocator.FLAG_DATA, PageIdUtils.pageIndex(pageId)); + + FullPageId fullId = new FullPageId(pageId, grpId); try { long relPtr = seg.loadedPages.get( @@ -672,13 +676,6 @@ private DataRegionConfiguration getDataRegionConfiguration() { return false; } - /** {@inheritDoc} */ - @Override public long metaPageId(int grpId) { - assert started; - - return storeMgr.metaPageId(grpId); - } - /** {@inheritDoc} */ @Override public long partitionMetaPageId(int grpId, int partId) { assert started; @@ -881,7 +878,7 @@ else if (relPtr == OUTDATED_REL_PTR) { long actualPageId = 0; try { - storeMgr.read(grpId, pageId, buf); + pmPageMgr.read(grpId, pageId, buf, false); statHolder.trackPhysicalAndLogicalRead(pageAddr); @@ -1187,6 +1184,11 @@ private boolean isThrottlingEnabled() { writeThrottle.onFinishCheckpoint(); } + /** {@inheritDoc} */ + @Override public PageReadWriteManager pageManager() { + return pmPageMgr; + } + /** {@inheritDoc} */ @Override public void checkpointWritePage( FullPageId fullId, @@ -1827,14 +1829,14 @@ public int activePagesCount() { /** {@inheritDoc} */ @Override public int checkpointBufferPagesCount() { - return checkpointPool.size(); + return checkpointPool == null ? 0 : checkpointPool.size(); } /** * Number of used pages in checkpoint buffer. */ public int checkpointBufferPagesSize() { - return checkpointPool.pages(); + return checkpointPool == null ? 0 : checkpointPool.pages(); } /** @@ -2148,7 +2150,7 @@ private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, PageStore assert writeLock().isHeldByCurrentThread(); // Do not evict cache meta pages. - if (fullPageId.pageId() == storeMgr.metaPageId(fullPageId.groupId())) + if (fullPageId.pageId() == META_PAGE_ID) return false; if (PageHeader.isAcquired(absPtr)) @@ -2161,7 +2163,7 @@ private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, PageStore // Can evict a dirty page only if should be written by a checkpoint. // These pages does not have tmp buffer. if (checkpointPages != null && checkpointPages.allowToSave(fullPageId)) { - assert storeMgr != null; + assert pmPageMgr != null; memMetrics.updatePageReplaceRate(U.currentTimeMillis() - PageHeader.readTimestamp(absPtr)); @@ -2334,7 +2336,7 @@ private long removePageForReplacement(PageStoreWriter saveDirtyPage) throws Igni CheckpointPages checkpointPages = this.checkpointPages; if (relRmvAddr == rndAddr || pinned || skip || - fullId.pageId() == storeMgr.metaPageId(fullId.groupId()) || + fullId.pageId() == META_PAGE_ID || (dirty && (checkpointPages == null || !checkpointPages.contains(fullId))) ) { i--; @@ -2487,10 +2489,14 @@ private long tryToFindSequentially(int cap, PageStoreWriter saveDirtyPage) throw ", pinnedInSegment=" + pinnedCnt + ", failedToPrepare=" + failToPrepare + ']' + U.nl() + "Out of memory in data region [" + - "name=" + dataRegionCfg.getName() + - ", initSize=" + U.readableSize(dataRegionCfg.getInitialSize(), false) + - ", maxSize=" + U.readableSize(dataRegionCfg.getMaxSize(), false) + - ", persistenceEnabled=" + dataRegionCfg.isPersistenceEnabled() + "] Try the following:" + U.nl() + + (dataRegionCfg == null ? "NULL" : ( + "name=" + dataRegionCfg.getName() + + ", initSize=" + U.readableSize(dataRegionCfg.getInitialSize(), false) + + ", maxSize=" + U.readableSize(dataRegionCfg.getMaxSize(), false) + + ", persistenceEnabled=" + dataRegionCfg.isPersistenceEnabled() + )) + + "]" + + " Try the following:" + U.nl() + " ^-- Increase maximum off-heap memory size (DataRegionConfiguration.maxSize)" + U.nl() + " ^-- Enable Ignite persistence (DataRegionConfiguration.persistenceEnabled)" + U.nl() + " ^-- Enable eviction or expiration policies" diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManager.java new file mode 100644 index 00000000000000..531da8bbef817c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManager.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.pagemem; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; + +/** */ +public interface PageReadWriteManager { + /** + * Reads a page for the given cache ID. Cache ID may be {@code 0} if the page is a meta page. + * + * @param grpId Cache group ID. + * @param pageId PageID to read. + * @param pageBuf Page buffer to write to. + * @param keepCrc Keep CRC flag. + * @throws IgniteCheckedException If failed to read the page. + */ + public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException; + + /** + * Writes the page for the given cache ID. Cache ID may be {@code 0} if the page is a meta page. + * + * @param grpId Cache group ID. + * @param pageId Page ID. + * @param pageBuf Page buffer to write. + * @throws IgniteCheckedException If failed to write page. + */ + public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException; + + /** + * Allocates a page for the given page space. + * + * @param grpId Cache group ID. + * @param partId Partition ID. Used only if {@code flags} is equal to {@link PageMemory#FLAG_DATA}. + * @param flags Page allocation flags. + * @return Allocated page ID. + * @throws IgniteCheckedException If IO exception occurred while allocating a page ID. + */ + public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManagerImpl.java new file mode 100644 index 00000000000000..5fbd7c15354c81 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManagerImpl.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.pagemem; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.pagemem.store.PageStoreCollection; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.StorageException; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.typedef.internal.S; + +import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.MAX_PARTITION_ID; + +/** */ +public class PageReadWriteManagerImpl implements PageReadWriteManager { + /** */ + private final GridKernalContext ctx; + + /** */ + @GridToStringExclude + protected final PageStoreCollection pageStores; + + /** */ + @SuppressWarnings("unused") + private final String name; + + /** + * @param ctx Kernal context. + * @param pageStores Page stores. + */ + public PageReadWriteManagerImpl( + GridKernalContext ctx, + PageStoreCollection pageStores, + String name + ) { + this.ctx = ctx; + this.pageStores = pageStores; + this.name = name; + } + + /** {@inheritDoc} */ + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { + PageStore store = pageStores.getStore(grpId, PageIdUtils.partId(pageId)); + + try { + store.read(pageId, pageBuf, keepCrc); + + ctx.compress().decompressPage(pageBuf, store.getPageSize()); + } + catch (StorageException e) { + ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { + int partId = PageIdUtils.partId(pageId); + + PageStore store = pageStores.getStore(grpId, partId); + + try { + int pageSize = store.getPageSize(); + int compressedPageSize = pageSize; + + GridCacheContext cctx0 = ctx.cache().context().cacheContext(grpId); + + if (cctx0 != null) { + assert pageBuf.position() == 0 && pageBuf.limit() == pageSize : pageBuf; + + ByteBuffer compressedPageBuf = cctx0.compress().compressPage(pageBuf, store); + + if (compressedPageBuf != pageBuf) { + compressedPageSize = PageIO.getCompressedSize(compressedPageBuf); + + if (!calculateCrc) { + calculateCrc = true; + PageIO.setCrc(compressedPageBuf, 0); // It will be recalculated over compressed data further. + } + + PageIO.setCrc(pageBuf, 0); // It is expected to be reset to 0 after each write. + pageBuf = compressedPageBuf; + } + } + + store.write(pageId, pageBuf, tag, calculateCrc); + + if (pageSize > compressedPageSize) + store.punchHole(pageId, compressedPageSize); // TODO maybe add async punch mode? + } + catch (StorageException e) { + ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + throw e; + } + + return store; + } + + /** {@inheritDoc} */ + @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException { + assert partId <= MAX_PARTITION_ID || partId == INDEX_PARTITION; + + PageStore store = pageStores.getStore(grpId, partId); + + try { + long pageIdx = store.allocatePage(); + + return PageIdUtils.pageId(partId, flags, (int)pageIdx); + } + catch (StorageException e) { + ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(PageReadWriteManagerImpl.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java index c2368276349667..275fb551d6e02c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java @@ -20,7 +20,7 @@ import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; -import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.util.typedef.internal.S; import org.jetbrains.annotations.NotNull; @@ -53,7 +53,15 @@ public GroupPartitionId(final int grpId, final int partId) { * @return flag to be used for partition */ public static byte getFlagByPartId(final int partId) { - return partId == PageIdAllocator.INDEX_PARTITION ? PageMemory.FLAG_IDX : PageMemory.FLAG_DATA; + return partId == PageIdAllocator.INDEX_PARTITION ? PageIdAllocator.FLAG_IDX : PageIdAllocator.FLAG_DATA; + } + + /** + * @param partId Partition ID. + * @return page store type to be used for partition + */ + public static byte getTypeByPartId(final int partId) { + return partId == PageIdAllocator.INDEX_PARTITION ? PageStore.TYPE_IDX : PageStore.TYPE_DATA; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java index 0e9062a09b8951..acf83342212b45 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java @@ -49,6 +49,7 @@ public class PartitionMetaStorageImpl extends AbstractFreeLi * @param wal Wal. * @param metaPageId Meta page id. * @param initNew Initialize new. + * @param pageFlag Default flag value for allocated pages. */ public PartitionMetaStorageImpl( int cacheId, String name, @@ -60,9 +61,10 @@ public PartitionMetaStorageImpl( boolean initNew, PageLockListener lsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { - super(cacheId, name, memMetrics, memPlc, reuseList, wal, metaPageId, initNew, lsnr, ctx, pageListCacheLimit); + super(cacheId, name, memMetrics, memPlc, reuseList, wal, metaPageId, initNew, lsnr, ctx, pageListCacheLimit, pageFlag); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java index 03e7e1c336687a..1d9f385fec5363 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java @@ -87,7 +87,6 @@ import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState; import org.apache.ignite.internal.processors.marshaller.MappedName; import org.apache.ignite.internal.processors.metric.MetricRegistry; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.util.GridBusyLock; import org.apache.ignite.internal.util.distributed.DistributedProcess; @@ -133,7 +132,7 @@ import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_TEMPLATE; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.getPartitionFile; import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.DB_DEFAULT_FOLDER; -import static org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId.getFlagByPartId; +import static org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId.getTypeByPartId; import static org.apache.ignite.internal.util.IgniteUtils.isLocalNodeCoordinator; import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.END_SNAPSHOT; import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.START_SNAPSHOT; @@ -1207,9 +1206,9 @@ public LocalSnapshotSender(String snpName) { try (FileIO fileIo = ioFactory.create(delta, READ); FilePageStore pageStore = (FilePageStore)storeFactory .apply(pair.getGroupId(), false) - .createPageStore(getFlagByPartId(pair.getPartitionId()), + .createPageStore(getTypeByPartId(pair.getPartitionId()), snpPart::toPath, - new LongAdderMetric("NO_OP", null)) + val -> {}) ) { ByteBuffer pageBuf = ByteBuffer.allocate(pageSize) .order(ByteOrder.nativeOrder()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index a46b3a8a7dcf3f..ca92a71c0800b2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -59,9 +59,11 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.InsertLast; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandlerWrapper; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; @@ -90,6 +92,7 @@ import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Result.NOT_FOUND; import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Result.RETRY; import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Result.RETRY_ROOT; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver.DEFAULT_PAGE_IO_RESOLVER; /** * Abstract B+Tree. @@ -751,6 +754,7 @@ private class InitRoot extends PageHandler { * @param reuseList Reuse list. * @param innerIos Inner IO versions. * @param leafIos Leaf IO versions. + * @param pageFlag Default flag value for allocated pages. * @param failureProcessor if the tree is corrupted. * @throws IgniteCheckedException If failed. */ @@ -765,6 +769,7 @@ protected BPlusTree( ReuseList reuseList, IOVersions> innerIos, IOVersions> leafIos, + byte pageFlag, @Nullable FailureProcessor failureProcessor, @Nullable PageLockListener lockLsnr ) throws IgniteCheckedException { @@ -777,8 +782,10 @@ protected BPlusTree( globalRmvId, metaPageId, reuseList, + pageFlag, failureProcessor, - lockLsnr + lockLsnr, + DEFAULT_PAGE_IO_RESOLVER ); setIos(innerIos, leafIos); @@ -793,6 +800,7 @@ protected BPlusTree( * @param globalRmvId Remove ID. * @param metaPageId Meta page ID. * @param reuseList Reuse list. + * @param pageFlag Default flag value for allocated pages. * @param failureProcessor if the tree is corrupted. * @throws IgniteCheckedException If failed. */ @@ -805,10 +813,12 @@ protected BPlusTree( AtomicLong globalRmvId, long metaPageId, ReuseList reuseList, + byte pageFlag, @Nullable FailureProcessor failureProcessor, - @Nullable PageLockListener lsnr + @Nullable PageLockListener lsnr, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { - super(cacheGrpId, grpName, pageMem, wal, lsnr); + super(cacheGrpId, grpName, pageMem, wal, lsnr, pageIoRslvr, pageFlag); assert !F.isEmpty(name); @@ -5289,6 +5299,9 @@ private int findInsertionPoint(int lvl, BPlusIO io, long buf, int low, int cn throws IgniteCheckedException { assert row != null; + if (row instanceof InsertLast) + return -cnt - 1; + int high = cnt - 1; while (low <= high) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java index 070d426813eab9..eb90b2f01fbe9c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java @@ -30,6 +30,7 @@ import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLogInnerIO; import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLogLeafIO; import org.apache.ignite.internal.processors.cache.persistence.IndexStorageImpl; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListMetaIO; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListNodeIO; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageBPlusIO; @@ -258,6 +259,12 @@ public abstract class PageIO { /** */ public static final short T_MARKER_PAGE = 33; + /** */ + public static final short T_DEFRAG_LINK_MAPPING_INNER = 34; + + /** */ + public static final short T_DEFRAG_LINK_MAPPING_LEAF = 35; + /** Index for payload == 1. */ public static final short T_H2_EX_REF_LEAF_START = 10_000; @@ -799,6 +806,12 @@ public static > Q getBPlusIO(int type, int ver) throws Igni case T_DATA_REF_METASTORAGE_LEAF: return (Q)MetastorageBPlusIO.LEAF_IO_VERSIONS.forVersion(ver); + case T_DEFRAG_LINK_MAPPING_INNER: + return (Q) LinkMap.INNER_IO_VERSIONS.forVersion(ver); + + case T_DEFRAG_LINK_MAPPING_LEAF: + return (Q) LinkMap.LEAF_IO_VERSIONS.forVersion(ver); + default: // For tests. if (innerTestIO != null && innerTestIO.getType() == type && innerTestIO.getVersion() == ver) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIoResolver.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIoResolver.java new file mode 100644 index 00000000000000..2354c09bc65b63 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIoResolver.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.io; + +import org.apache.ignite.IgniteCheckedException; + +/** */ +public interface PageIoResolver { + /** */ + public static final PageIoResolver DEFAULT_PAGE_IO_RESOLVER = PageIO::getPageIO; + + /** */ + PageIO resolve(long pageAddr) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java index b25bae4ec8e93b..84735c2bef6c7f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java @@ -56,7 +56,8 @@ public class PageMetaIO extends PageIO { /** */ public static final IOVersions VERSIONS = new IOVersions<>( - new PageMetaIO(1) + new PageMetaIO(1), + new PageMetaIOV2(2) ); /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIOV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIOV2.java new file mode 100644 index 00000000000000..f9f956d45082fa --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIOV2.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.io; + +import org.apache.ignite.internal.pagemem.PageUtils; + +/** + * IO for index partition metadata page. + */ +public class PageMetaIOV2 extends PageMetaIO { + /** Total pages for reencryption offset. */ + private static final int ENCRYPT_PAGE_IDX_OFF = END_OF_PAGE_META; + + /** Last reencrypted page index offset. */ + private static final int ENCRYPT_PAGE_MAX_OFF = ENCRYPT_PAGE_IDX_OFF + 4; + + /** + * @param ver Version. + */ + public PageMetaIOV2(int ver) { + super(ver); + } + + /** + * @param pageAddr Page address. + * @return Index of the last reencrypted page. + */ + public int getEncryptedPageIndex(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_IDX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pageIdx Index of the last reencrypted page. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageIndex(long pageAddr, int pageIdx) { + if (getEncryptedPageIndex(pageAddr) == pageIdx) + return false; + + PageUtils.putLong(pageAddr, ENCRYPT_PAGE_IDX_OFF, pageIdx); + + return true; + } + + /** + * @param pageAddr Page address. + * @return Total pages to be reencrypted. + */ + public int getEncryptedPageCount(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_MAX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pagesCnt Total pages to be reencrypted. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageCount(long pageAddr, int pagesCnt) { + if (getEncryptedPageCount(pageAddr) == pagesCnt) + return false; + + PageUtils.putInt(pageAddr, ENCRYPT_PAGE_MAX_OFF, pagesCnt); + + return true; + } + + /** {@inheritDoc} */ + @Override public void initNewPage(long pageAddr, long pageId, int pageSize) { + super.initNewPage(pageAddr, pageId, pageSize); + + setEncryptedPageCount(pageAddr, 0); + setEncryptedPageIndex(pageAddr, 0); + } + + /** + * Upgrade page to PageMetaIOV2. + * + * @param pageAddr Page address. + */ + public void upgradePage(long pageAddr) { + assert PageIO.getType(pageAddr) == getType(); + + PageIO.setVersion(pageAddr, getVersion()); + + setEncryptedPageIndex(pageAddr, 0); + setEncryptedPageCount(pageAddr, 0); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java index b58b5c0ab1a055..bb0634011e5ace 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java @@ -48,7 +48,8 @@ public class PagePartitionMetaIO extends PageMetaIO { /** */ public static final IOVersions VERSIONS = new IOVersions<>( new PagePartitionMetaIO(1), - new PagePartitionMetaIOV2(2) + new PagePartitionMetaIOV2(2), + new PagePartitionMetaIOV3(3) ); /** {@inheritDoc} */ @@ -242,13 +243,24 @@ public boolean setGapsLink(long pageAddr, long link) { @Override protected void printPage(long pageAddr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException { super.printPage(pageAddr, pageSize, sb); + sb.a(",\nPagePartitionMeta[\n"); + + printFields(pageAddr, sb); + + sb.a("\n]"); + } + + /** + * @param pageAddr Address. + * @param sb String builder. + */ + protected void printFields(long pageAddr, GridStringBuilder sb) { byte state = getPartitionState(pageAddr); - sb.a(",\nPagePartitionMeta[\n\tsize=").a(getSize(pageAddr)) + sb.a("\tsize=").a(getSize(pageAddr)) .a(",\n\tupdateCounter=").a(getUpdateCounter(pageAddr)) .a(",\n\tglobalRemoveId=").a(getGlobalRemoveId(pageAddr)) .a(",\n\tpartitionState=").a(state).a("(").a(GridDhtPartitionState.fromOrdinal(state)).a(")") - .a(",\n\tcountersPageId=").a(getCountersPageId(pageAddr)) - .a("\n]"); + .a(",\n\tcountersPageId=").a(getCountersPageId(pageAddr)).toString(); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java index efdfecd699ff1f..6c307754097246 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java @@ -18,9 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.tree.io; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageUtils; -import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.util.GridStringBuilder; /** @@ -37,6 +35,9 @@ public class PagePartitionMetaIOV2 extends PagePartitionMetaIO { /** */ private static final int GAPS_LINK = PART_META_REUSE_LIST_ROOT_OFF + 8; + /** */ + public static final int END_OF_PARTITION_PAGE_META_V2 = GAPS_LINK + 8; + /** * @param ver Version. */ @@ -102,10 +103,10 @@ public boolean setGapsLink(long pageAddr, long link) { } /** {@inheritDoc} */ - @Override protected void printPage(long pageAddr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException { - byte state = getPartitionState(pageAddr); + @Override protected void printFields(long pageAddr, GridStringBuilder sb) { + super.printFields(pageAddr, sb); - sb.a("PagePartitionMeta[\n\ttreeRoot=").a(getReuseListRoot(pageAddr)); + sb.a("\ttreeRoot=").a(getReuseListRoot(pageAddr)); sb.a(",\n\tpendingTreeRoot=").a(getLastSuccessfulFullSnapshotId(pageAddr)); sb.a(",\n\tlastSuccessfulFullSnapshotId=").a(getLastSuccessfulFullSnapshotId(pageAddr)); sb.a(",\n\tlastSuccessfulSnapshotId=").a(getLastSuccessfulSnapshotId(pageAddr)); @@ -113,13 +114,7 @@ public boolean setGapsLink(long pageAddr, long link) { sb.a(",\n\tlastSuccessfulSnapshotTag=").a(getLastSuccessfulSnapshotTag(pageAddr)); sb.a(",\n\tlastAllocatedPageCount=").a(getLastAllocatedPageCount(pageAddr)); sb.a(",\n\tcandidatePageCount=").a(getCandidatePageCount(pageAddr)); - sb.a(",\n\tsize=").a(getSize(pageAddr)); - sb.a(",\n\tupdateCounter=").a(getUpdateCounter(pageAddr)); - sb.a(",\n\tglobalRemoveId=").a(getGlobalRemoveId(pageAddr)); - sb.a(",\n\tpartitionState=").a(state).a("(").a(GridDhtPartitionState.fromOrdinal(state)).a(")"); - sb.a(",\n\tcountersPageId=").a(getCountersPageId(pageAddr)); sb.a(",\n\tcntrUpdDataPageId=").a(getGapsLink(pageAddr)); - sb.a("\n]"); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV3.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV3.java new file mode 100644 index 00000000000000..1c81d2ec84d805 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV3.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.io; + +import org.apache.ignite.internal.pagemem.PageUtils; +import org.apache.ignite.internal.util.GridStringBuilder; + +/** + * IO for partition metadata pages. + */ +public class PagePartitionMetaIOV3 extends PagePartitionMetaIOV2 { + /** Last reencrypted page index offset. */ + private static final int ENCRYPT_PAGE_IDX_OFF = END_OF_PARTITION_PAGE_META_V2; + + /** Total pages to be reencrypted offset. */ + private static final int ENCRYPT_PAGE_MAX_OFF = ENCRYPT_PAGE_IDX_OFF + 4; + + /** + * @param ver Version. + */ + public PagePartitionMetaIOV3(int ver) { + super(ver); + } + + /** {@inheritDoc} */ + @Override public void initNewPage(long pageAddr, long pageId, int pageSize) { + super.initNewPage(pageAddr, pageId, pageSize); + + setEncryptedPageIndex(pageAddr, 0); + setEncryptedPageCount(pageAddr, 0); + } + + /** + * @param pageAddr Page address. + * @return Index of the last reencrypted page. + */ + public int getEncryptedPageIndex(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_IDX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pageIdx Index of the last reencrypted page. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageIndex(long pageAddr, int pageIdx) { + if (getEncryptedPageIndex(pageAddr) == pageIdx) + return false; + + PageUtils.putLong(pageAddr, ENCRYPT_PAGE_IDX_OFF, pageIdx); + + return true; + } + + /** + * @param pageAddr Page address. + * @return Total pages to be reencrypted. + */ + public int getEncryptedPageCount(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_MAX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pagesCnt Total pages to be reencrypted. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageCount(long pageAddr, int pagesCnt) { + if (getEncryptedPageCount(pageAddr) == pagesCnt) + return false; + + PageUtils.putInt(pageAddr, ENCRYPT_PAGE_MAX_OFF, pagesCnt); + + return true; + } + + /** {@inheritDoc} */ + @Override protected void printFields(long pageAddr, GridStringBuilder sb) { + super.printFields(pageAddr, sb); + + sb.a(",\n\tencryptedPageIndex=").a(getEncryptedPageIndex(pageAddr)); + sb.a(",\n\tencryptedPageCount=").a(getEncryptedPageCount(pageAddr)); + } + + /** + * Upgrade page to PagePartitionMetaIOV3. + * + * @param pageAddr Page address. + */ + @Override public void upgradePage(long pageAddr) { + assert PageIO.getType(pageAddr) == getType(); + + int ver = PageIO.getVersion(pageAddr); + + assert ver < getVersion(); + + if (ver < 2) + super.upgradePage(pageAddr); + + PageIO.setVersion(pageAddr, getVersion()); + + setEncryptedPageIndex(pageAddr, 0); + setEncryptedPageCount(pageAddr, 0); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java index b2f52a5860a2b3..5fa1cddd7b7528 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java @@ -19,6 +19,7 @@ import java.nio.ByteBuffer; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.processors.cache.persistence.snapshot.TrackingPageIsCorruptedException; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -356,7 +357,11 @@ public long trackingPageFor(long pageId, int pageSize) { int pageIdx = ((PageIdUtils.pageIndex(pageId) - COUNT_OF_EXTRA_PAGE) / countOfPageToTrack(pageSize)) * countOfPageToTrack(pageSize) + COUNT_OF_EXTRA_PAGE; - long trackingPageId = PageIdUtils.pageId(PageIdUtils.partId(pageId), PageIdUtils.flag(pageId), pageIdx); + byte flag = PageIdUtils.partId(pageId) == PageIdAllocator.INDEX_PARTITION ? + PageIdAllocator.FLAG_IDX : + PageIdAllocator.FLAG_DATA; + + long trackingPageId = PageIdUtils.pageId(PageIdUtils.partId(pageId), flag, pageIdx); assert PageIdUtils.pageIndex(trackingPageId) <= PageIdUtils.pageIndex(pageId); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java index aaab186618bc23..d2a1ba057b6140 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java @@ -18,6 +18,10 @@ package org.apache.ignite.internal.processors.cache.persistence.tree.reuse; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.jetbrains.annotations.Nullable; /** * Reuse list. @@ -40,4 +44,18 @@ public interface ReuseList { * @throws IgniteCheckedException If failed. */ public long recycledPagesCount() throws IgniteCheckedException; + + /** + * Converts recycled page id back to a usable id. Might modify page content as well if flag is changing. + * + * @param pageId Id of the recycled page. + * @param flag Flag value for the page. One of {@link PageIdAllocator#FLAG_DATA}, {@link PageIdAllocator#FLAG_IDX} + * or {@link PageIdAllocator#FLAG_AUX}. + * @param initIO Page IO to reinit reused page. + * @return Updated page id. + * @throws IgniteCheckedException If failed. + * + * @see FullPageId + */ + long initRecycledPage(long pageId, byte flag, @Nullable PageIO initIO) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java index cf3897f4703360..5d2789be305c30 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java @@ -25,6 +25,7 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.freelist.PagesList; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; /** @@ -48,6 +49,7 @@ public class ReuseListImpl extends PagesList implements ReuseList { * @param wal Write ahead log manager. * @param metaPageId Metadata page ID. * @param initNew {@code True} if new metadata should be initialized. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public ReuseListImpl( @@ -59,7 +61,8 @@ public ReuseListImpl( boolean initNew, PageLockListener lockLsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { super( cacheId, @@ -69,7 +72,8 @@ public ReuseListImpl( wal, metaPageId, lockLsnr, - ctx + ctx, + pageFlag ); bucketCache = new PagesCache(pageListCacheLimit); @@ -96,6 +100,11 @@ public ReuseListImpl( return takeEmptyPage(0, null, IoStatisticsHolderNoOp.INSTANCE); } + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return initRecycledPage0(pageId, flag, initIO); + } + /** {@inheritDoc} */ @Override public long recycledPagesCount() throws IgniteCheckedException { return storedPagesCount(0); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java new file mode 100644 index 00000000000000..e7cfd411f37552 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.util; + +/** + * Rows with this marker interface will always be inserted in the very end of the tree. + */ +public interface InsertLast { +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java index 4d05e5e7ec1daa..9461cca81e60d8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java @@ -25,11 +25,13 @@ import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.record.delta.InitNewPageRecord; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.util.GridUnsafe; import org.jetbrains.annotations.Nullable; import static java.lang.Boolean.FALSE; import static java.lang.Boolean.TRUE; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver.DEFAULT_PAGE_IO_RESOLVER; /** * Page handler. @@ -112,7 +114,8 @@ public static R readPage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { long page = pageMem.acquirePage(cacheId, pageId, statHolder); try { @@ -121,7 +124,7 @@ public static R readPage( if (pageAddr == 0L) return lockFailed; try { - PageIO io = PageIO.getPageIO(pageAddr); + PageIO io = pageIoRslvr.resolve(pageAddr); return h.run(cacheId, pageId, page, pageAddr, io, null, arg, intArg, statHolder); } finally { @@ -157,7 +160,8 @@ public static R readPage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { long pageAddr = 0L; @@ -165,7 +169,7 @@ public static R readPage( if ((pageAddr = readLock(pageMem, cacheId, pageId, page, lsnr)) == 0L) return lockFailed; - PageIO io = PageIO.getPageIO(pageAddr); + PageIO io = pageIoRslvr.resolve(pageAddr); return h.run(cacheId, pageId, page, pageAddr, io, null, arg, intArg, statHolder); } finally { @@ -236,7 +240,7 @@ public static void initPage( PageLockListener lsnr, IoStatisticsHolder statHolder ) throws IgniteCheckedException { - Boolean res = writePage(pageMem, grpId, pageId, lsnr, PageHandler.NO_OP, init, wal, null, null, 0, FALSE, statHolder); + Boolean res = writePage(pageMem, grpId, pageId, lsnr, PageHandler.NO_OP, init, wal, null, null, 0, FALSE, statHolder, DEFAULT_PAGE_IO_RESOLVER); assert res != FALSE; } @@ -269,7 +273,8 @@ public static R writePage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { boolean releaseAfterWrite = true; long page = pageMem.acquirePage(grpId, pageId, statHolder); @@ -288,7 +293,7 @@ public static R writePage( walPlc = FALSE; } else - init = PageIO.getPageIO(pageAddr); + init = pageIoRslvr.resolve(pageAddr); R res = h.run(grpId, pageId, page, pageAddr, init, walPlc, arg, intArg, statHolder); @@ -339,7 +344,8 @@ public static R writePage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { long pageAddr = writeLock(pageMem, grpId, pageId, page, lsnr, false); @@ -355,7 +361,7 @@ public static R writePage( walPlc = FALSE; } else - init = PageIO.getPageIO(pageAddr); + init = pageIoRslvr.resolve(pageAddr); R res = h.run(grpId, pageId, page, pageAddr, init, walPlc, arg, intArg, statHolder); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java index f2653765cb1e54..2f088d19f6979f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java @@ -25,14 +25,12 @@ import org.apache.ignite.internal.processors.cache.persistence.file.UnzipFileIO; import org.apache.ignite.internal.processors.cache.persistence.wal.io.SegmentIO; import org.apache.ignite.internal.util.typedef.internal.SB; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * WAL file descriptor. */ public class FileDescriptor implements Comparable, AbstractWalRecordsIterator.AbstractFileDescriptor { - /** file extension of WAL segment. */ private static final String WAL_SEGMENT_FILE_EXT = ".wal"; @@ -50,15 +48,17 @@ public class FileDescriptor implements Comparable, AbstractWalRe * * @param file WAL segment file. */ - public FileDescriptor(@NotNull File file) { + public FileDescriptor(File file) { this(file, null); } /** + * Creates file descriptor. + * * @param file WAL segment file. * @param idx Absolute WAL segment file index. For null value index is restored from file name. */ - public FileDescriptor(@NotNull File file, @Nullable Long idx) { + public FileDescriptor(File file, @Nullable Long idx) { this.file = file; String fileName = file.getName(); @@ -69,13 +69,15 @@ public FileDescriptor(@NotNull File file, @Nullable Long idx) { } /** - * @param segment Segment index. + * Getting segment file name. + * + * @param idx Segment index. * @return Segment file name. */ - public static String fileName(long segment) { + public static String fileName(long idx) { SB b = new SB(); - String segmentStr = Long.toString(segment); + String segmentStr = Long.toString(idx); for (int i = segmentStr.length(); i < WAL_SEGMENT_FILE_NAME_LENGTH; i++) b.a('0'); @@ -86,7 +88,7 @@ public static String fileName(long segment) { } /** {@inheritDoc} */ - @Override public int compareTo(@NotNull FileDescriptor o) { + @Override public int compareTo(FileDescriptor o) { return Long.compare(idx, o.idx); } @@ -109,14 +111,18 @@ public static String fileName(long segment) { } /** - * @return Absolute WAL segment file index + * Return absolute WAL segment file index. + * + * @return Absolute WAL segment file index. */ public long getIdx() { return idx; } /** - * @return absolute pathname string of this file descriptor pathname. + * Return absolute pathname string of this file descriptor pathname. + * + * @return Absolute pathname string of this file descriptor pathname. */ public String getAbsolutePath() { return file.getAbsolutePath(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index b79d637c115f60..ff64f5b034156c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -44,6 +44,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLongArray; @@ -86,7 +87,6 @@ import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; -import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware; @@ -106,7 +106,6 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactory; import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactoryImpl; import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer; -import org.apache.ignite.internal.processors.compress.CompressionProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor; @@ -127,7 +126,6 @@ import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.thread.IgniteThread; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static java.nio.file.StandardOpenOption.CREATE; @@ -144,11 +142,14 @@ import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.TMP_SUFFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.ZIP_SUFFIX; import static org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor.fileName; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactory.LATEST_SERIALIZER_VERSION; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.HEADER_RECORD_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readPosition; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readSegmentHeader; +import static org.apache.ignite.internal.processors.compress.CompressionProcessor.checkCompressionLevelBounds; +import static org.apache.ignite.internal.processors.compress.CompressionProcessor.getDefaultCompressionLevel; /** * File WAL manager. @@ -262,7 +263,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** */ private final boolean alwaysWriteFullPages; - /** WAL segment size in bytes. . This is maximum value, actual segments may be shorter. */ + /** WAL segment size in bytes. This is maximum value, actual segments may be shorter. */ private final long maxWalSegmentSize; /** @@ -295,10 +296,10 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Persistence metrics tracker. */ private DataStorageMetricsImpl metrics; - /** */ + /** WAL work directory (including consistent ID as subfolder). */ private File walWorkDir; - /** WAL archive directory (including consistent ID as subfolder) */ + /** WAL archive directory (including consistent ID as subfolder). */ private File walArchiveDir; /** Serializer of latest version, used to read header record and for write records */ @@ -317,7 +318,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Holder of actual information of latest manipulation on WAL segments. */ private volatile SegmentAware segmentAware; - /** Updater for {@link #currHnd}, used for verify there are no concurrent update for current log segment handle */ + /** Updater for {@link #currHnd}, used for verify there are no concurrent update for current log segment handle. */ private static final AtomicReferenceFieldUpdater CURR_HND_UPD = AtomicReferenceFieldUpdater.newUpdater(FileWriteAheadLogManager.class, FileWriteHandle.class, "currHnd"); @@ -328,10 +329,10 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl @Nullable private FileArchiver archiver; /** Compressor. */ - private FileCompressor compressor; + @Nullable private FileCompressor compressor; /** Decompressor. */ - private FileDecompressor decompressor; + @Nullable private FileDecompressor decompressor; /** Current log segment handle. */ private volatile FileWriteHandle currHnd; @@ -384,7 +385,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl private final FileHandleManagerFactory fileHandleManagerFactory; /** Switch segment record offset. */ - private final AtomicLongArray switchSegmentRecordOffset; + @Nullable private final AtomicLongArray switchSegmentRecordOffset; /** Page snapshot records compression algorithm. */ private DiskPageCompression pageCompression; @@ -392,10 +393,17 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Page snapshot records compression level. */ private int pageCompressionLevel; + /** + * Local segment sizes: absolute segment index -> size in bytes. + * For segments from {@link #walWorkDir} and {@link #walArchiveDir}. + * If there is a raw and compressed segment, compressed size is getting. + */ + private final Map segmentSize = new ConcurrentHashMap<>(); + /** * @param ctx Kernal context. */ - public FileWriteAheadLogManager(@NotNull final GridKernalContext ctx) { + public FileWriteAheadLogManager(final GridKernalContext ctx) { igCfg = ctx.config(); DataStorageConfiguration dsCfg = igCfg.getDataStorageConfiguration(); @@ -467,8 +475,9 @@ public void setFileIOFactory(FileIOFactory ioFactory) { checkOrPrepareFiles(); - if (metrics != null) + if (metrics != null) { metrics.setWalSizeProvider(new CO() { + /** {@inheritDoc} */ @Override public Long apply() { long size = 0; @@ -481,6 +490,7 @@ public void setFileIOFactory(FileIOFactory ioFactory) { return size; } }); + } segmentAware = new SegmentAware(dsCfg.getWalSegments(), dsCfg.isWalCompactionEnabled()); @@ -520,8 +530,8 @@ public void setFileIOFactory(FileIOFactory ioFactory) { cctx.kernalContext().compress().checkPageCompressionSupported(); pageCompressionLevel = dsCfg.getWalPageCompressionLevel() != null ? - CompressionProcessor.checkCompressionLevelBounds(dsCfg.getWalPageCompressionLevel(), pageCompression) : - CompressionProcessor.getDefaultCompressionLevel(pageCompression); + checkCompressionLevelBounds(dsCfg.getWalPageCompressionLevel(), pageCompression) : + getDefaultCompressionLevel(pageCompression); } } } @@ -585,10 +595,10 @@ public Collection getAndReserveWalFiles(WALPointer low, WALPointer high) t List res = new ArrayList<>(); for (long i = low.index(); i < high.index(); i++) { - String segmentName = FileDescriptor.fileName(i); + String segmentName = fileName(i); File file = new File(walArchiveDir, segmentName); - File fileZip = new File(walArchiveDir, segmentName + FilePageStoreManager.ZIP_SUFFIX); + File fileZip = new File(walArchiveDir, segmentName + ZIP_SUFFIX); if (file.exists()) res.add(file); @@ -640,7 +650,7 @@ private void checkWalConfiguration() throws IgniteCheckedException { fileHandleManager.onDeactivate(); } catch (Exception e) { - U.error(log, "Failed to gracefully close WAL segment: " + this.currHnd, e); + U.error(log, "Failed to gracefully close WAL segment: " + currHnd, e); } segmentAware.interrupt(); @@ -691,13 +701,12 @@ private void checkWalConfiguration() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void resumeLogging(WALPointer filePtr) throws IgniteCheckedException { - if (log.isDebugEnabled()) + if (log.isDebugEnabled()) { log.debug("File write ahead log manager resuming logging [nodeId=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); + } - /* - walDisableContext is started after FileWriteAheadLogManager, so we obtain actual walDisableContext ref here. - */ + // walDisableContext is started after FileWriteAheadLogManager, so we obtain actual walDisableContext ref here. synchronized (this) { walDisableContext = cctx.walState().walDisableContext(); } @@ -711,17 +720,18 @@ private void checkWalConfiguration() throws IgniteCheckedException { fileHandleManager.resumeLogging(); - currHnd = restoreWriteHandle(filePtr); + updateCurrentHandle(restoreWriteHandle(filePtr), null); // For new handle write serializer version to it. if (filePtr == null) currHnd.writeHeader(); if (currHnd.serializerVersion() != serializer.version()) { - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Record serializer version change detected, will start logging with a new WAL record " + "serializer to a new WAL segment [curFile=" + currHnd + ", newVer=" + serializer.version() + ", oldVer=" + currHnd.serializerVersion() + ']'); + } rollOver(currHnd, null); } @@ -1010,9 +1020,9 @@ private FileWriteHandle closeBufAndRollover( * @return {@code true} if has this index. */ private boolean hasIndex(long absIdx) { - String segmentName = FileDescriptor.fileName(absIdx); + String segmentName = fileName(absIdx); - String zipSegmentName = FileDescriptor.fileName(absIdx) + FilePageStoreManager.ZIP_SUFFIX; + String zipSegmentName = segmentName + ZIP_SUFFIX; boolean inArchive = new File(walArchiveDir, segmentName).exists() || new File(walArchiveDir, zipSegmentName).exists(); @@ -1053,15 +1063,21 @@ private boolean hasIndex(long absIdx) { // We need to leave at least one archived segment to correctly determine the archive index. if (desc.idx < high.index() && desc.idx < lastArchived) { - if (!desc.file.delete()) + if (!desc.file.delete()) { U.warn(log, "Failed to remove obsolete WAL segment (make sure the process has enough rights): " + desc.file.getAbsolutePath()); - else + } + else { deleted++; + segmentSize.remove(desc.idx()); + } + // Bump up the oldest archive segment index. if (segmentAware.lastTruncatedArchiveIdx() < desc.idx) segmentAware.lastTruncatedArchiveIdx(desc.idx); + + cctx.kernalContext().encryption().onWalSegmentRemoved(desc.idx); } } @@ -1087,6 +1103,11 @@ private boolean segmentReservedOrLocked(long absIdx) { segmentAware.keepUncompressedIdxFrom(ptr.index()); } + /** {@inheritDoc} */ + @Override public long currentSegment() { + return segmentAware.curAbsWalIdx(); + } + /** {@inheritDoc} */ @Override public int walArchiveSegments() { long lastTruncated = segmentAware.lastTruncatedArchiveIdx(); @@ -1167,11 +1188,11 @@ private long lastArchivedIndex() { * @param file File to read. * @param ioFactory IO factory. */ - private FileDescriptor readFileDescriptor(File file, FileIOFactory ioFactory) { + @Nullable private FileDescriptor readFileDescriptor(File file, FileIOFactory ioFactory) { FileDescriptor ds = new FileDescriptor(file); try (SegmentIO fileIO = ds.toIO(ioFactory)) { - // File may be empty when LOG_ONLY mode is enabled and mmap is disabled + // File may be empty when LOG_ONLY mode is enabled and mmap is disabled. if (fileIO.size() == 0) return null; @@ -1276,9 +1297,9 @@ private FileWriteHandle rollOver(FileWriteHandle cur, @Nullable WALRecord rec) t if (next.getSegmentId() - lashCheckpointFileIdx() >= maxSegCountWithoutCheckpoint) cctx.database().forceCheckpoint("too big size of WAL without checkpoint"); - boolean swapped = CURR_HND_UPD.compareAndSet(this, hnd, next); + boolean updated = updateCurrentHandle(next, hnd); - assert swapped : "Concurrent updates on rollover are not allowed"; + assert updated : "Concurrent updates on rollover are not allowed"; if (walAutoArchiveAfterInactivity > 0) lastRecordLoggedMs.set(0); @@ -1306,14 +1327,14 @@ private long lashCheckpointFileIdx() { * @return Initialized file write handle. * @throws StorageException If failed to initialize WAL write handle. */ - private FileWriteHandle restoreWriteHandle(WALPointer lastReadPtr) throws StorageException { + private FileWriteHandle restoreWriteHandle(@Nullable WALPointer lastReadPtr) throws StorageException { long absIdx = lastReadPtr == null ? 0 : lastReadPtr.index(); @Nullable FileArchiver archiver0 = archiver; long segNo = archiver0 == null ? absIdx : absIdx % dsCfg.getWalSegments(); - File curFile = new File(walWorkDir, FileDescriptor.fileName(segNo)); + File curFile = new File(walWorkDir, fileName(segNo)); int off = lastReadPtr == null ? 0 : lastReadPtr.fileOffset(); int len = lastReadPtr == null ? 0 : lastReadPtr.length(); @@ -1341,9 +1362,10 @@ private FileWriteHandle restoreWriteHandle(WALPointer lastReadPtr) throws Storag RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(serVer); - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Resuming logging to WAL segment [file=" + curFile.getAbsolutePath() + ", offset=" + off + ", ver=" + serVer + ']'); + } FileWriteHandle hnd = fileHandleManager.initHandle(fileIO, off + len, ser); @@ -1352,6 +1374,24 @@ private FileWriteHandle restoreWriteHandle(WALPointer lastReadPtr) throws Storag else segmentAware.setLastArchivedAbsoluteIndex(absIdx - 1); + // Getting segment sizes. + F.asList(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)).stream() + .map(FileDescriptor::new) + .forEach(fd -> { + if (fd.isCompressed()) + segmentSize.put(fd.idx(), fd.file().length()); + else + segmentSize.putIfAbsent(fd.idx(), fd.file().length()); + }); + + // If walArchiveDir != walWorkDir, then need to get size of all segments that were not in archive. + // For example, absIdx == 8, and there are 0-4 segments in archive, then we need to get sizes of 5-7 segments. + // Size of the 8th segment will be set in #resumeLogging. + if (archiver0 != null) { + for (long i = absIdx - (absIdx % dsCfg.getWalSegments()); i < absIdx; i++) + segmentSize.putIfAbsent(i, maxWalSegmentSize); + } + return hnd; } catch (IgniteCheckedException | IOException e) { @@ -1460,25 +1500,24 @@ private void checkOrPrepareFiles() throws StorageException { if (!F.isEmpty(tmpFiles)) { for (File tmp : tmpFiles) { - boolean deleted = tmp.delete(); - - if (!deleted) + if (!tmp.delete()) { throw new StorageException("Failed to delete previously created temp file " + "(make sure Ignite process has enough rights): " + tmp.getAbsolutePath()); + } } } } File[] allFiles = walWorkDir.listFiles(WAL_SEGMENT_FILE_FILTER); - if (isArchiverEnabled()) - if (allFiles.length != 0 && allFiles.length > dsCfg.getWalSegments()) - throw new StorageException("Failed to initialize wal (work directory contains " + - "incorrect number of segments) [cur=" + allFiles.length + ", expected=" + dsCfg.getWalSegments() + ']'); + if (isArchiverEnabled() && !F.isEmpty(allFiles) && allFiles.length > dsCfg.getWalSegments()) { + throw new StorageException("Failed to initialize wal (work directory contains incorrect " + + "number of segments) [cur=" + allFiles.length + ", expected=" + dsCfg.getWalSegments() + ']'); + } // Allocate the first segment synchronously. All other segments will be allocated by archiver in background. - if (allFiles.length == 0) { - File first = new File(walWorkDir, FileDescriptor.fileName(0)); + if (F.isEmpty(allFiles)) { + File first = new File(walWorkDir, fileName(0)); createFile(first); } @@ -1568,7 +1607,7 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte if (archiver0 == null) { segmentAware.setLastArchivedAbsoluteIndex(curIdx); - return new File(walWorkDir, FileDescriptor.fileName(curIdx + 1)); + return new File(walWorkDir, fileName(curIdx + 1)); } long absNextIdxStartTime = System.nanoTime(); @@ -1591,7 +1630,7 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte long segmentIdx = absNextIdx % dsCfg.getWalSegments(); - return new File(walWorkDir, FileDescriptor.fileName(segmentIdx)); + return new File(walWorkDir, fileName(segmentIdx)); } /** @@ -1631,7 +1670,7 @@ private FileDescriptor[] walArchiveFiles() { /** * @return Sorted WAL files descriptors. */ - public static FileDescriptor[] scan(File[] allFiles) { + public static FileDescriptor[] scan(@Nullable File[] allFiles) { if (allFiles == null) return EMPTY_DESCRIPTORS; @@ -1694,7 +1733,10 @@ private class FileArchiver extends GridWorker { private int formatted; /** + * Constructor. * + * @param segmentAware Segment aware. + * @param log Logger. */ private FileArchiver(SegmentAware segmentAware, IgniteLogger log) throws IgniteCheckedException { super(cctx.igniteInstanceName(), "wal-file-archiver%" + cctx.igniteInstanceName(), log, @@ -1704,6 +1746,8 @@ private FileArchiver(SegmentAware segmentAware, IgniteLogger log) throws IgniteC } /** + * Initialization. + * * @param segmentAware Segment aware. * @throws IgniteCheckedException If initialization failed. */ @@ -1730,13 +1774,13 @@ private IgniteBiTuple scanMinMaxArchiveIndices() throws IgniteChecke for (File file : walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)) { try { - long idx = Long.parseLong(file.getName().substring(0, 16)); + long idx = new FileDescriptor(file).idx(); FileDescriptor desc = readFileDescriptor(file, ioFactory); if (desc != null) { if (desc.idx() == idx) - archiveIndices.put(desc.idx(), desc); + archiveIndices.put(idx, desc); } else log.warning("Skip file, failed read file header " + file); @@ -1755,7 +1799,7 @@ private IgniteBiTuple scanMinMaxArchiveIndices() throws IgniteChecke // Try to find min and max if we have skipped range semgnets in archive. Find firs gap. for (Long idx : archiveIndices.descendingKeySet()) { - if (!archiveIndices.keySet().contains(idx - 1)) + if (!archiveIndices.containsKey(idx - 1)) return F.t(idx, max); } @@ -1957,41 +2001,41 @@ public void releaseWorkSegment(long absIdx) { } /** - * Moves WAL segment from work folder to archive folder. Temp file is used to do movement + * Moves WAL segment from work folder to archive folder. Temp file is used to do movement. * * @param absIdx Absolute index to archive. + * @throws StorageException If failed. */ public SegmentArchiveResult archiveSegment(long absIdx) throws StorageException { long segIdx = absIdx % dsCfg.getWalSegments(); - File origFile = new File(walWorkDir, FileDescriptor.fileName(segIdx)); + File origFile = new File(walWorkDir, fileName(segIdx)); - String name = FileDescriptor.fileName(absIdx); + String name = fileName(absIdx); File dstTmpFile = new File(walArchiveDir, name + TMP_SUFFIX); File dstFile = new File(walArchiveDir, name); - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Starting to copy WAL segment [absIdx=" + absIdx + ", segIdx=" + segIdx + ", origFile=" + origFile.getAbsolutePath() + ", dstFile=" + dstFile.getAbsolutePath() + ']'); + } try { Files.deleteIfExists(dstTmpFile.toPath()); boolean copied = false; - if (switchSegmentRecordOffset != null) { - long offs = switchSegmentRecordOffset.get((int)segIdx); + long offs = switchSegmentRecordOffset.get((int)segIdx); - if (offs > 0) { - switchSegmentRecordOffset.set((int)segIdx, 0); + if (offs > 0) { + switchSegmentRecordOffset.set((int)segIdx, 0); - if (offs < origFile.length()) { - GridFileUtils.copy(ioFactory, origFile, ioFactory, dstTmpFile, offs); + if (offs < origFile.length()) { + GridFileUtils.copy(ioFactory, origFile, ioFactory, dstTmpFile, offs); - copied = true; - } + copied = true; } } @@ -2005,6 +2049,8 @@ public SegmentArchiveResult archiveSegment(long absIdx) throws StorageException f0.force(); } } + + segmentSize.put(absIdx, dstFile.length()); } catch (IOException e) { throw new StorageException("Failed to archive WAL segment [" + @@ -2012,9 +2058,10 @@ public SegmentArchiveResult archiveSegment(long absIdx) throws StorageException ", dstFile=" + dstTmpFile.getAbsolutePath() + ']', e); } - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Copied file [src=" + origFile.getAbsolutePath() + ", dst=" + dstFile.getAbsolutePath() + ']'); + } return new SegmentArchiveResult(absIdx, origFile, dstFile); } @@ -2071,7 +2118,11 @@ private class FileCompressor extends FileCompressorWorker { /** Workers queue. */ private final List workers = new ArrayList<>(); - /** */ + /** + * Constructor. + * + * @param log Logger. + */ FileCompressor(IgniteLogger log) { super(0, log); @@ -2208,12 +2259,13 @@ private void body0() { deleteObsoleteRawSegments(); - File tmpZip = new File(walArchiveDir, FileDescriptor.fileName(segIdx) - + FilePageStoreManager.ZIP_SUFFIX + TMP_SUFFIX); + String segmentFileName = fileName(segIdx); + + File tmpZip = new File(walArchiveDir, segmentFileName + ZIP_SUFFIX + TMP_SUFFIX); - File zip = new File(walArchiveDir, FileDescriptor.fileName(segIdx) + FilePageStoreManager.ZIP_SUFFIX); + File zip = new File(walArchiveDir, segmentFileName + ZIP_SUFFIX); - File raw = new File(walArchiveDir, FileDescriptor.fileName(segIdx)); + File raw = new File(walArchiveDir, segmentFileName); if (!Files.exists(raw.toPath())) throw new IgniteCheckedException("WAL archive segment is missing: " + raw); @@ -2228,13 +2280,8 @@ private void body0() { segmentAware.onSegmentCompressed(segIdx); - if (evt.isRecordable(EVT_WAL_SEGMENT_COMPACTED) && !cctx.kernalContext().recoveryMode()) { - evt.record(new WalSegmentCompactedEvent( - cctx.localNode(), - segIdx, - zip.getAbsoluteFile()) - ); - } + if (evt.isRecordable(EVT_WAL_SEGMENT_COMPACTED) && !cctx.kernalContext().recoveryMode()) + evt.record(new WalSegmentCompactedEvent(cctx.localNode(), segIdx, zip.getAbsoluteFile())); } catch (IgniteInterruptedCheckedException ignore) { Thread.currentThread().interrupt(); @@ -2243,7 +2290,7 @@ private void body0() { lastCompressionError = e; U.error(log, "Compression of WAL segment [idx=" + segIdx + - "] was skipped due to unexpected error", lastCompressionError); + "] was skipped due to unexpected error", lastCompressionError); segmentAware.onSegmentCompressed(segIdx); } @@ -2255,26 +2302,30 @@ private void body0() { } /** - * @param nextSegment Next segment absolute idx. - * @param raw Raw file. - * @param zip Zip file. + * Segment compression. + * + * @param idx Segment absolute index. + * @param raw Raw segment file. + * @param zip Zip file to writing. + * @throws IOException If failed. + * @throws IgniteCheckedException If failed. */ - private void compressSegmentToFile(long nextSegment, File raw, File zip) - throws IOException, IgniteCheckedException { - int segmentSerializerVer; + private void compressSegmentToFile(long idx, File raw, File zip) throws IOException, IgniteCheckedException { + int serializerVer; try (FileIO fileIO = ioFactory.create(raw)) { - segmentSerializerVer = readSegmentHeader(new SegmentIO(nextSegment, fileIO), segmentFileInputFactory).getSerializerVersion(); + serializerVer = readSegmentHeader(new SegmentIO(idx, fileIO), segmentFileInputFactory) + .getSerializerVersion(); } try (ZipOutputStream zos = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(zip)))) { zos.setLevel(dsCfg.getWalCompactionLevel()); - zos.putNextEntry(new ZipEntry(nextSegment + ".wal")); + zos.putNextEntry(new ZipEntry(idx + ".wal")); ByteBuffer buf = ByteBuffer.allocate(HEADER_RECORD_SIZE); buf.order(ByteOrder.nativeOrder()); - zos.write(prepareSerializerVersionBuffer(nextSegment, segmentSerializerVer, true, buf).array()); + zos.write(prepareSerializerVersionBuffer(idx, serializerVer, true, buf).array()); final CIX1 appendToZipC = new CIX1() { @Override public void applyx(WALRecord record) throws IgniteCheckedException { @@ -2290,32 +2341,36 @@ private void compressSegmentToFile(long nextSegment, File raw, File zip) }; try (SingleSegmentLogicalRecordsIterator iter = new SingleSegmentLogicalRecordsIterator( - log, cctx, ioFactory, BUF_SIZE, nextSegment, walArchiveDir, appendToZipC)) { + log, cctx, ioFactory, BUF_SIZE, idx, walArchiveDir, appendToZipC)) { while (iter.hasNextX()) iter.nextX(); } - RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(segmentSerializerVer); + RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVer); - ByteBuffer heapBuf = prepareSwitchSegmentRecordBuffer(nextSegment, ser); + ByteBuffer heapBuf = prepareSwitchSegmentRecordBuffer(idx, ser); zos.write(heapBuf.array()); } + + segmentSize.put(idx, zip.length()); } /** - * @param nextSegment Segment index. + * @param idx Segment index. * @param ser Record Serializer. */ - @NotNull private ByteBuffer prepareSwitchSegmentRecordBuffer(long nextSegment, RecordSerializer ser) - throws IgniteCheckedException { + private ByteBuffer prepareSwitchSegmentRecordBuffer( + long idx, + RecordSerializer ser + ) throws IgniteCheckedException { SwitchSegmentRecord switchRecord = new SwitchSegmentRecord(); int switchRecordSize = ser.size(switchRecord); switchRecord.size(switchRecordSize); - switchRecord.position(new WALPointer(nextSegment, 0, switchRecordSize)); + switchRecord.position(new WALPointer(idx, 0, switchRecordSize)); ByteBuffer heapBuf = ByteBuffer.allocate(switchRecordSize); @@ -2346,9 +2401,11 @@ private void deleteObsoleteRawSegments() { return; if (desc.idx < segmentAware.keepUncompressedIdxFrom() && duplicateIndices.contains(desc.idx)) { - if (desc.file.exists() && !desc.file.delete()) - U.warn(log, "Failed to remove obsolete WAL segment (make sure the process has enough rights): " + - desc.file.getAbsolutePath() + ", exists: " + desc.file.exists()); + if (desc.file.exists() && !desc.file.delete()) { + U.warn(log, "Failed to remove obsolete WAL segment " + + "(make sure the process has enough rights): " + desc.file.getAbsolutePath() + + ", exists: " + desc.file.exists()); + } } } } @@ -2396,11 +2453,11 @@ private class FileDecompressor extends GridWorker { if (isCancelled()) break; - File zip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) - + FilePageStoreManager.ZIP_SUFFIX); - File unzipTmp = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) - + TMP_SUFFIX); - File unzip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress)); + String segmentFileName = fileName(segmentToDecompress); + + File zip = new File(walArchiveDir, segmentFileName + ZIP_SUFFIX); + File unzipTmp = new File(walArchiveDir, segmentFileName + TMP_SUFFIX); + File unzip = new File(walArchiveDir, segmentFileName); try (ZipInputStream zis = new ZipInputStream(new BufferedInputStream(new FileInputStream(zip))); FileIO io = ioFactory.create(unzipTmp)) { @@ -2468,7 +2525,7 @@ synchronized IgniteInternalFuture decompressFile(long idx) { if (decompressionFutures.containsKey(idx)) return decompressionFutures.get(idx); - File f = new File(walArchiveDir, FileDescriptor.fileName(idx)); + File f = new File(walArchiveDir, fileName(idx)); if (f.exists()) return new GridFinishedFuture<>(); @@ -2511,33 +2568,36 @@ void restart() { * @param startWith Start with. * @param create Flag create file. * @param p Predicate Exit condition. + * @param completionCb Callback after verification segment. * @throws StorageException if validation or create file fail. */ private void checkFiles( int startWith, boolean create, @Nullable IgnitePredicate p, - @Nullable IgniteInClosure completionCallback + @Nullable IgniteInClosure completionCb ) throws StorageException { for (int i = startWith; i < dsCfg.getWalSegments() && (p == null || p.apply(i)); i++) { - File checkFile = new File(walWorkDir, FileDescriptor.fileName(i)); + File checkFile = new File(walWorkDir, fileName(i)); if (checkFile.exists()) { - if (checkFile.isDirectory()) + if (checkFile.isDirectory()) { throw new StorageException("Failed to initialize WAL log segment (a directory with " + "the same name already exists): " + checkFile.getAbsolutePath()); - else if (checkFile.length() != dsCfg.getWalSegmentSize() && mode == WALMode.FSYNC) + } + else if (checkFile.length() != dsCfg.getWalSegmentSize() && mode == WALMode.FSYNC) { throw new StorageException("Failed to initialize WAL log segment " + "(WAL segment size change is not supported in 'DEFAULT' WAL mode) " + "[filePath=" + checkFile.getAbsolutePath() + ", fileSize=" + checkFile.length() + ", configSize=" + dsCfg.getWalSegmentSize() + ']'); + } } else if (create) createFile(checkFile); - if (completionCallback != null) - completionCallback.apply(i); + if (completionCb != null) + completionCb.apply(i); } } @@ -2548,7 +2608,7 @@ else if (create) * @param ver Version. * @param compacted Compacted flag. */ - @NotNull public static ByteBuffer prepareSerializerVersionBuffer(long idx, int ver, boolean compacted, ByteBuffer buf) { + public static ByteBuffer prepareSerializerVersionBuffer(long idx, int ver, boolean compacted, ByteBuffer buf) { // Write record type. buf.put((byte) (WALRecord.RecordType.HEADER_RECORD.ordinal() + 1)); @@ -2705,7 +2765,7 @@ private RecordsIterator( @Nullable WALPointer start, @Nullable WALPointer end, DataStorageConfiguration dsCfg, - @NotNull RecordSerializerFactory serializerFactory, + RecordSerializerFactory serializerFactory, FileIOFactory ioFactory, @Nullable FileArchiver archiver, FileDecompressor decompressor, @@ -2735,15 +2795,14 @@ private RecordsIterator( /** {@inheritDoc} */ @Override protected ReadFileHandle initReadHandle( - @NotNull AbstractFileDescriptor desc, + AbstractFileDescriptor desc, @Nullable WALPointer start ) throws IgniteCheckedException, FileNotFoundException { AbstractFileDescriptor currDesc = desc; if (!desc.file().exists()) { FileDescriptor zipFile = new FileDescriptor( - new File(walArchiveDir, FileDescriptor.fileName(desc.idx()) - + FilePageStoreManager.ZIP_SUFFIX)); + new File(walArchiveDir, fileName(desc.idx()) + ZIP_SUFFIX)); if (!zipFile.file.exists()) { throw new FileNotFoundException("Both compressed and raw segment files are missing in archive " + @@ -2895,10 +2954,7 @@ private static List listFileNames(File dir) { } /** {@inheritDoc} */ - @Override protected IgniteCheckedException handleRecordException( - @NotNull Exception e, - @Nullable WALPointer ptr) { - + @Override protected IgniteCheckedException handleRecordException(Exception e, @Nullable WALPointer ptr) { if (e instanceof IgniteCheckedException) if (X.hasCause(e, IgniteDataIntegrityViolationException.class)) // This means that there is no explicit last sengment, so we iterate unil the very end. @@ -2964,12 +3020,10 @@ private boolean isArchiverEnabled() { private boolean canIgnoreCrcError( long workIdx, long walSegmentIdx, - @NotNull Exception e, - @Nullable WALPointer ptr) { - FileDescriptor fd = new FileDescriptor( - new File(walWorkDir, FileDescriptor.fileName(workIdx)), - walSegmentIdx - ); + Exception e, + @Nullable WALPointer ptr + ) { + FileDescriptor fd = new FileDescriptor(new File(walWorkDir, fileName(workIdx)), walSegmentIdx); try { if (!fd.file().exists()) @@ -3016,7 +3070,7 @@ private void doFlush() { * @param walFilesDir directory to scan * @return found WAL file descriptors */ - public static FileDescriptor[] loadFileDescriptors(@NotNull final File walFilesDir) throws IgniteCheckedException { + public static FileDescriptor[] loadFileDescriptors(final File walFilesDir) throws IgniteCheckedException { final File[] files = walFilesDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER); if (files == null) { @@ -3025,4 +3079,45 @@ public static FileDescriptor[] loadFileDescriptors(@NotNull final File walFilesD } return scan(files); } + + /** {@inheritDoc} */ + @Override public long segmentSize(long idx) { + return segmentSize.getOrDefault(idx, 0L); + } + + /** {@inheritDoc} */ + @Override public WALPointer lastWritePointer() { + return currHnd.position(); + } + + /** + * Concurrent {@link #currHnd} update. + * + * @param n New handle. + * @param c Current handle, if not {@code null} CAS will be used. + * @return {@code True} if updated. + */ + private boolean updateCurrentHandle(FileWriteHandle n, @Nullable FileWriteHandle c) { + boolean res = true; + + if (c == null) + currHnd = n; + else + res = CURR_HND_UPD.compareAndSet(this, c, n); + + segmentSize.put(n.getSegmentId(), maxWalSegmentSize); + + return res; + } + + /** + * Check that file name matches segment name. + * + * @param name File name. + * @return {@code True} if file name matches segment name. + */ + public static boolean isSegmentFileName(@Nullable String name) { + return name != null && (WAL_NAME_PATTERN.matcher(name).matches() || + WAL_SEGMENT_FILE_COMPACTED_PATTERN.matcher(name).matches()); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java index 46fbd7780ba412..2da912824a1a3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java @@ -20,7 +20,6 @@ import java.io.DataInput; import java.io.EOFException; import java.io.IOException; -import java.io.Serializable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; @@ -32,6 +31,8 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.GroupKey; +import org.apache.ignite.internal.managers.encryption.GroupKeyEncrypted; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.wal.record.CacheState; import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; @@ -39,10 +40,11 @@ import org.apache.ignite.internal.pagemem.wal.record.DataRecord; import org.apache.ignite.internal.pagemem.wal.record.EncryptedRecord; import org.apache.ignite.internal.pagemem.wal.record.LazyDataEntry; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord; import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord; import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; import org.apache.ignite.internal.pagemem.wal.record.TxRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType; @@ -68,12 +70,14 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineFlagsCreatedVersionRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateIndexDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastAllocatedIndex; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulFullSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateNextSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV3; import org.apache.ignite.internal.pagemem.wal.record.delta.NewRootInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PageListMetaResetCountRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListAddPageRecord; @@ -117,8 +121,10 @@ import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_RECORD; -import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD_V2; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.REC_TYPE_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.putRecordType; @@ -190,7 +196,7 @@ public RecordDataV1Serializer(GridCacheSharedContext cctx) { int clSz = plainSize(record); if (needEncryption(record)) - return encSpi.encryptedSize(clSz) + 4 /* groupId */ + 4 /* data size */ + REC_TYPE_SIZE; + return encSpi.encryptedSize(clSz) + 4 /* groupId */ + 4 /* data size */ + 1 /* key ID */ + REC_TYPE_SIZE; return clSz; } @@ -198,7 +204,7 @@ public RecordDataV1Serializer(GridCacheSharedContext cctx) { /** {@inheritDoc} */ @Override public WALRecord readRecord(RecordType type, ByteBufferBackedDataInput in, int size) throws IOException, IgniteCheckedException { - if (type == ENCRYPTED_RECORD) { + if (type == ENCRYPTED_RECORD || type == ENCRYPTED_RECORD_V2) { if (encSpi == null) { T2 knownData = skipEncryptedRecord(in, true); @@ -206,7 +212,8 @@ public RecordDataV1Serializer(GridCacheSharedContext cctx) { return new EncryptedRecord(knownData.get1(), knownData.get2()); } - T3 clData = readEncryptedData(in, true); + T3 clData = + readEncryptedData(in, true, type == ENCRYPTED_RECORD_V2); //This happen during startup. On first WAL iteration we restore only metastore. //So, no encryption keys available. See GridCacheDatabaseSharedManager#readMetastore @@ -270,30 +277,36 @@ private boolean needEncryption(int grpId) { * * @param in Input stream. * @param readType If {@code true} plain record type will be read from {@code in}. + * @param readKeyId If {@code true} encryption key identifier will be read from {@code in}. * @return Plain data stream, group id, plain record type, * @throws IOException If failed. * @throws IgniteCheckedException If failed. */ - private T3 readEncryptedData(ByteBufferBackedDataInput in, - boolean readType) - throws IOException, IgniteCheckedException { + private T3 readEncryptedData( + ByteBufferBackedDataInput in, + boolean readType, + boolean readKeyId + ) throws IOException, IgniteCheckedException { int grpId = in.readInt(); int encRecSz = in.readInt(); + RecordType plainRecType = null; if (readType) plainRecType = RecordV1Serializer.readRecordType(in); + int keyId = readKeyId ? in.readUnsignedByte() : GridEncryptionManager.INITIAL_KEY_ID; + byte[] encData = new byte[encRecSz]; in.readFully(encData); - Serializable key = encMgr.groupKey(grpId); + GroupKey grpKey = encMgr.groupKey(grpId, keyId); - if (key == null) + if (grpKey == null) return new T3<>(null, grpId, plainRecType); - byte[] clData = encSpi.decrypt(encData, key); + byte[] clData = encSpi.decrypt(encData, grpKey.key()); return new T3<>(new ByteBufferBackedDataInputImpl().buffer(ByteBuffer.wrap(clData)), grpId, plainRecType); } @@ -339,11 +352,11 @@ private void writeEncryptedData(int grpId, @Nullable RecordType plainRecType, By if (plainRecType != null) putRecordType(dst, plainRecType); - Serializable key = encMgr.groupKey(grpId); + GroupKey grpKey = encMgr.groupKey(grpId); - assert key != null; + dst.put(grpKey.id()); - encSpi.encrypt(clData, key, dst); + encSpi.encrypt(clData, grpKey.key(), dst); } /** @@ -372,6 +385,9 @@ assert record instanceof PageSnapshot; case META_PAGE_INIT: return /*cache ID*/4 + /*page ID*/8 + /*ioType*/2 + /*ioVer*/2 + /*tree root*/8 + /*reuse root*/8; + case INDEX_META_PAGE_DELTA_RECORD: + return /*cache ID*/4 + /*page ID*/8 + /*encrypt page index*/ 4 + /*encrypt pages count*/4; + case PARTITION_META_PAGE_UPDATE_COUNTERS: return /*cache ID*/4 + /*page ID*/8 + /*upd cntr*/8 + /*rmv id*/8 + /*part size*/4 + /*counters page id*/8 + /*state*/ 1 + /*allocatedIdxCandidate*/ 4; @@ -380,6 +396,10 @@ assert record instanceof PageSnapshot; return /*cache ID*/4 + /*page ID*/8 + /*upd cntr*/8 + /*rmv id*/8 + /*part size*/4 + /*counters page id*/8 + /*state*/ 1 + /*allocatedIdxCandidate*/ 4 + /*link*/ 8; + case PARTITION_META_PAGE_DELTA_RECORD_V3: + return /*cache ID*/4 + /*page ID*/8 + /*upd cntr*/8 + /*rmv id*/8 + /*part size*/4 + /*counters page id*/8 + /*state*/ 1 + + /*allocatedIdxCandidate*/ 4 + /*link*/ 8 + /*encrypt page index*/ 4 + /*encrypt pages count*/4; + case MEMORY_RECOVERY: return 8; @@ -536,10 +556,12 @@ assert record instanceof PageSnapshot; case TX_RECORD: return txRecordSerializer.size((TxRecord)record); - case MASTER_KEY_CHANGE_RECORD: - MasterKeyChangeRecord rec = (MasterKeyChangeRecord)record; + case MASTER_KEY_CHANGE_RECORD_V2: + return ((MasterKeyChangeRecordV2)record).dataSize(); + + case REENCRYPTION_START_RECORD: + return ((ReencryptionStartRecord)record).dataSize(); - return rec.dataSize(); default: throw new UnsupportedOperationException("Type: " + record.type()); } @@ -609,6 +631,11 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; + case INDEX_META_PAGE_DELTA_RECORD: + res = new MetaPageUpdateIndexDataRecord(in); + + break; + case PARTITION_META_PAGE_UPDATE_COUNTERS: res = new MetaPageUpdatePartitionDataRecord(in); @@ -619,6 +646,11 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; + case PARTITION_META_PAGE_DELTA_RECORD_V3: + res = new MetaPageUpdatePartitionDataRecordV3(in); + + break; + case MEMORY_RECOVERY: long ts = in.readLong(); @@ -647,12 +679,13 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; case ENCRYPTED_DATA_RECORD: + case ENCRYPTED_DATA_RECORD_V2: entryCnt = in.readInt(); entries = new ArrayList<>(entryCnt); for (int i = 0; i < entryCnt; i++) - entries.add(readEncryptedDataEntry(in)); + entries.add(readEncryptedDataEntry(in, type == ENCRYPTED_DATA_RECORD_V2)); res = new DataRecord(entries, 0L); @@ -1184,6 +1217,7 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; case MASTER_KEY_CHANGE_RECORD: + case MASTER_KEY_CHANGE_RECORD_V2: int keyNameLen = in.readInt(); byte[] keyNameBytes = new byte[keyNameLen]; @@ -1194,21 +1228,39 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, int keysCnt = in.readInt(); - HashMap grpKeys = new HashMap<>(keysCnt); + List> grpKeys = new ArrayList<>(keysCnt); + + boolean readKeyId = type == MASTER_KEY_CHANGE_RECORD_V2; for (int i = 0; i < keysCnt; i++) { int grpId = in.readInt(); + int keyId = readKeyId ? in.readByte() & 0xff : 0; int grpKeySize = in.readInt(); - byte[] grpKey = new byte[grpKeySize]; in.readFully(grpKey); - grpKeys.put(grpId, grpKey); + grpKeys.add(new T2<>(grpId, new GroupKeyEncrypted(keyId, grpKey))); } - res = new MasterKeyChangeRecord(masterKeyName, grpKeys); + res = new MasterKeyChangeRecordV2(masterKeyName, grpKeys); + + break; + + case REENCRYPTION_START_RECORD: + int grpsCnt = in.readInt(); + + Map map = U.newHashMap(grpsCnt); + + for (int i = 0; i < grpsCnt; i++) { + int grpId = in.readInt(); + byte keyId = in.readByte(); + + map.put(grpId, keyId); + } + + res = new ReencryptionStartRecord(map); break; @@ -1265,8 +1317,14 @@ void writePlainRecord(WALRecord rec, ByteBuffer buf) throws IgniteCheckedExcepti break; + case INDEX_META_PAGE_DELTA_RECORD: + ((MetaPageUpdateIndexDataRecord)rec).toBytes(buf); + + break; + case PARTITION_META_PAGE_UPDATE_COUNTERS: case PARTITION_META_PAGE_UPDATE_COUNTERS_V2: + case PARTITION_META_PAGE_DELTA_RECORD_V3: ((MetaPageUpdatePartitionDataRecord)rec).toBytes(buf); break; @@ -1795,23 +1853,40 @@ void writePlainRecord(WALRecord rec, ByteBuffer buf) throws IgniteCheckedExcepti case SWITCH_SEGMENT_RECORD: break; - case MASTER_KEY_CHANGE_RECORD: - MasterKeyChangeRecord mkChangeRec = (MasterKeyChangeRecord)rec; + case MASTER_KEY_CHANGE_RECORD_V2: + MasterKeyChangeRecordV2 mkChangeRec = (MasterKeyChangeRecordV2)rec; byte[] keyIdBytes = mkChangeRec.getMasterKeyName().getBytes(); buf.putInt(keyIdBytes.length); buf.put(keyIdBytes); - Map grpKeys = mkChangeRec.getGrpKeys(); + List> grpKeys = mkChangeRec.getGrpKeys(); buf.putInt(grpKeys.size()); - for (Entry entry : grpKeys.entrySet()) { - buf.putInt(entry.getKey()); + for (T2 entry : grpKeys) { + GroupKeyEncrypted grpKey = entry.get2(); + + buf.putInt(entry.get1()); + buf.put((byte)grpKey.id()); + + buf.putInt(grpKey.key().length); + buf.put(grpKey.key()); + } + + break; + + case REENCRYPTION_START_RECORD: + ReencryptionStartRecord statusRecord = (ReencryptionStartRecord)rec; + + Map grps = statusRecord.groups(); + + buf.putInt(grps.size()); - buf.putInt(entry.getValue().length); - buf.put(entry.getValue()); + for (Map.Entry e : grps.entrySet()) { + buf.putInt(e.getKey()); + buf.put(e.getValue()); } break; @@ -1927,11 +2002,12 @@ private static void putRow(ByteBuffer buf, byte[] rowBytes) { /** * @param in Input to read from. + * @param readKeyId If {@code true} encryption key identifier will be read from {@code in}. * @return Read entry. * @throws IOException If failed. * @throws IgniteCheckedException If failed. */ - DataEntry readEncryptedDataEntry(ByteBufferBackedDataInput in) throws IOException, IgniteCheckedException { + DataEntry readEncryptedDataEntry(ByteBufferBackedDataInput in, boolean readKeyId) throws IOException, IgniteCheckedException { boolean needDecryption = in.readByte() == ENCRYPTED; if (needDecryption) { @@ -1941,7 +2017,7 @@ DataEntry readEncryptedDataEntry(ByteBufferBackedDataInput in) throws IOExceptio return new EncryptedDataEntry(); } - T3 clData = readEncryptedData(in, false); + T3 clData = readEncryptedData(in, false, readKeyId); if (clData.get1() == null) return null; @@ -2035,12 +2111,12 @@ RecordType recordType(WALRecord rec) { return rec.type(); if (needEncryption(rec)) - return ENCRYPTED_RECORD; + return ENCRYPTED_RECORD_V2; if (rec.type() != DATA_RECORD) return rec.type(); - return isDataRecordEncrypted((DataRecord)rec) ? ENCRYPTED_DATA_RECORD : DATA_RECORD; + return isDataRecordEncrypted((DataRecord)rec) ? ENCRYPTED_DATA_RECORD_V2 : DATA_RECORD; } /** @@ -2129,7 +2205,7 @@ protected int dataSize(DataRecord dataRec) throws IgniteCheckedException { int clSz = entrySize(entry); if (!encryptionDisabled && needEncryption(cctx.cacheContext(entry.cacheId()).groupId())) - sz += encSpi.encryptedSize(clSz) + 1 /* encrypted flag */ + 4 /* groupId */ + 4 /* data size */; + sz += encSpi.encryptedSize(clSz) + 1 /*encrypted flag*/ + 4 /*groupId*/ + 4 /*data size*/ + 1 /*key ID*/; else { sz += clSz; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java index 6e70ee60f0542c..8622629497b199 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java @@ -56,6 +56,8 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.record.HeaderRecord; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD_V2; + /** * Record data V2 serializer. */ @@ -180,13 +182,14 @@ public RecordDataV2Serializer(GridCacheSharedContext cctx) { return new MvccDataRecord(entries, timeStamp); case ENCRYPTED_DATA_RECORD: + case ENCRYPTED_DATA_RECORD_V2: entryCnt = in.readInt(); timeStamp = in.readLong(); entries = new ArrayList<>(entryCnt); for (int i = 0; i < entryCnt; i++) - entries.add(readEncryptedDataEntry(in)); + entries.add(readEncryptedDataEntry(in, type == ENCRYPTED_DATA_RECORD_V2)); return new DataRecord(entries, timeStamp); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java index 5462ed20e054a7..f36ebb2caf33da 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java @@ -1239,8 +1239,8 @@ protected void runQuery(GridCacheQueryInfo qryInfo) { V val0 = null; if (readEvt && cctx.gridEvents().hasListener(EVT_CACHE_QUERY_OBJECT_READ)) { - key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false); - val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false); + key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false, null); + val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false, null); switch (type) { case SQL: @@ -1289,9 +1289,9 @@ protected void runQuery(GridCacheQueryInfo qryInfo) { if (rdc != null) { if (key0 == null) - key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false); + key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false, null); if (val0 == null) - val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false); + val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false, null); Cache.Entry entry = new CacheEntryImpl(key0, val0); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java index e66455499318a7..58aadb13c28b81 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java @@ -65,17 +65,17 @@ public int partitionId() { /** {@inheritDoc} */ @Override public K getKey() { - return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.key(), e.isKeepBinary(), false); + return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.key(), e.isKeepBinary(), false, null); } /** {@inheritDoc} */ @Override public V getValue() { - return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.value(), e.isKeepBinary(), false); + return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.value(), e.isKeepBinary(), false, null); } /** {@inheritDoc} */ @Override public V getOldValue() { - return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.oldValue(), e.isKeepBinary(), false); + return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.oldValue(), e.isKeepBinary(), false, null); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java index ad7fa076e48c97..24ac8dc8675cae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java @@ -311,7 +311,7 @@ private CacheStore cacheStoreWrapper(GridKernalContext ctx, // Never load internal keys from store as they are never persisted. return null; - Object storeKey = cctx.unwrapBinaryIfNeeded(key, !convertBinary()); + Object storeKey = cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null); if (log.isDebugEnabled()) log.debug(S.toString("Loading value from store for key", @@ -446,7 +446,7 @@ private void loadAllFromStore(@Nullable IgniteInternalTx tx, Collection keys0 = F.viewReadOnly(keys, new C1() { @Override public Object apply(KeyCacheObject key) { - return cctx.unwrapBinaryIfNeeded(key, !convertBinary()); + return cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null); } }); @@ -568,8 +568,8 @@ private void loadAllFromStore(@Nullable IgniteInternalTx tx, if (key instanceof GridCacheInternal) return true; - Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary()); - Object val0 = cctx.unwrapBinaryIfNeeded(val, !convertBinary()); + Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null); + Object val0 = cctx.unwrapBinaryIfNeeded(val, !convertBinary(), null); if (log.isDebugEnabled()) { log.debug(S.toString("Storing value in cache store", @@ -680,7 +680,7 @@ private void loadAllFromStore(@Nullable IgniteInternalTx tx, if (key instanceof GridCacheInternal) return false; - Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary()); + Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null); if (log.isDebugEnabled()) log.debug(S.toString("Removing value from cache store", "key", key0, true)); @@ -1200,8 +1200,8 @@ private void checkNext() { Object v = locStore ? e.getValue() : e.getValue().get1(); - k = cctx.unwrapBinaryIfNeeded(k, !convertBinary()); - v = cctx.unwrapBinaryIfNeeded(v, !convertBinary()); + k = cctx.unwrapBinaryIfNeeded(k, !convertBinary(), null); + v = cctx.unwrapBinaryIfNeeded(v, !convertBinary(), null); if (rmvd != null && rmvd.contains(k)) continue; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java index a69f17ee9cad50..5a552a92d93c9b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java @@ -186,6 +186,10 @@ public abstract class IgniteTxAdapter extends GridMetadataAwareAdapter implement @GridToStringInclude protected long timeout; + /** Deployment class loader id which will be used for deserialization of entries on a distributed task. */ + @GridToStringExclude + protected IgniteUuid deploymentLdrId; + /** Invalidate flag. */ protected volatile boolean invalidate; @@ -323,6 +327,7 @@ protected IgniteTxAdapter( this.txSize = txSize; this.subjId = subjId; this.taskNameHash = taskNameHash; + this.deploymentLdrId = U.contextDeploymentClassLoaderId(cctx.kernalContext()); nodeId = cctx.discovery().localNode().id(); @@ -1486,7 +1491,7 @@ protected final void batchStoreCommit(Iterable writeEntries) thro key, e.cached().rawGet(), e.keepBinary()), - cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(val, e.keepBinary(), false)); + cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(val, e.keepBinary(), false, null)); if (interceptorVal == null) continue; @@ -1624,8 +1629,14 @@ protected IgniteBiTuple applyTransformClosures( return F.t(cacheCtx.writeThrough() ? RELOAD : DELETE, null); if (F.isEmpty(txEntry.entryProcessors())) { - if (ret != null) - ret.value(cacheCtx, txEntry.value(), txEntry.keepBinary()); + if (ret != null) { + ret.value( + cacheCtx, + txEntry.value(), + txEntry.keepBinary(), + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId) + ); + } return F.t(txEntry.op(), txEntry.value()); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java index bd9ec342dc2b7b..7b6cd7f10dc98f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java @@ -1240,8 +1240,14 @@ protected final void postLockWrite( addInvokeResult(txEntry, v, ret, ver); } } - else - ret.value(cacheCtx, v, txEntry.keepBinary()); + else { + ret.value( + cacheCtx, + v, + txEntry.keepBinary(), + U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId) + ); + } } boolean pass = F.isEmpty(filter) || cacheCtx.isAll(cached, filter); @@ -1312,6 +1318,9 @@ protected final void addInvokeResult(IgniteTxEntry txEntry, IgniteThread.onEntryProcessorEntered(true); + if (cctx.kernalContext().deploy().enabled() && deploymentLdrId != null) + U.restoreDeploymentContext(cctx.kernalContext(), deploymentLdrId); + try { Object res = null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java index 103280131729d0..f06677135b0f08 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java @@ -31,7 +31,7 @@ */ public class CacheDataRowStore extends RowStore { /** Whether version should be skipped. */ - private static ThreadLocal SKIP_VER = ThreadLocal.withInitial(() -> false); + private static final ThreadLocal SKIP_VER = ThreadLocal.withInitial(() -> false); /** * @return Skip version flag. @@ -74,17 +74,8 @@ public int getPartitionId() { * @param link Link. * @return Search row. */ - CacheSearchRow keySearchRow(int cacheId, int hash, long link) { - DataRow dataRow = new DataRow( - grp, - hash, - link, - partId, - CacheDataRowAdapter.RowData.KEY_ONLY, - SKIP_VER.get() - ); - - return initDataRow(dataRow, cacheId); + protected CacheSearchRow keySearchRow(int cacheId, int hash, long link) { + return dataRow(cacheId, hash, link, CacheDataRowAdapter.RowData.KEY_ONLY); } /** @@ -97,7 +88,7 @@ CacheSearchRow keySearchRow(int cacheId, int hash, long link) { * @param opCntr Mvcc operation counter. * @return Search row. */ - MvccDataRow mvccRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData, long crdVer, long mvccCntr, int opCntr) { + protected MvccDataRow mvccRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData, long crdVer, long mvccCntr, int opCntr) { MvccDataRow row = new MvccDataRow( grp, hash, @@ -120,7 +111,7 @@ MvccDataRow mvccRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowDat * @param rowData Required row data. * @return Data row. */ - CacheDataRow dataRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData) { + protected CacheDataRow dataRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData) { DataRow dataRow = new DataRow( grp, hash, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index e9d652c6b1be1c..e9a88bab0eb63e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -84,6 +84,7 @@ public class CacheDataTree extends BPlusTree { * @param rowStore Row store. * @param metaPageId Meta page ID. * @param initNew Initialize new index. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public CacheDataTree( @@ -93,7 +94,8 @@ public CacheDataTree( CacheDataRowStore rowStore, long metaPageId, boolean initNew, - PageLockListener lockLsnr + PageLockListener lockLsnr, + byte pageFlag ) throws IgniteCheckedException { super( name, @@ -106,6 +108,7 @@ public CacheDataTree( reuseList, innerIO(grp), leafIO(grp), + pageFlag, grp.shared().kernalContext().failure(), lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java index 9cfb2c6c5032d1..6070aca145b4e1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java @@ -43,6 +43,7 @@ public class PendingEntriesTree extends BPlusTree { * @param metaPageId Meta page ID. * @param reuseList Reuse list. * @param initNew Initialize new index. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public PendingEntriesTree( @@ -52,7 +53,8 @@ public PendingEntriesTree( long metaPageId, ReuseList reuseList, boolean initNew, - PageLockListener lockLsnr + PageLockListener lockLsnr, + byte pageFlag ) throws IgniteCheckedException { super( name, @@ -65,6 +67,7 @@ public PendingEntriesTree( reuseList, grp.sharedGroup() ? CacheIdAwarePendingEntryInnerIO.VERSIONS : PendingEntryInnerIO.VERSIONS, grp.sharedGroup() ? CacheIdAwarePendingEntryLeafIO.VERSIONS : PendingEntryLeafIO.VERSIONS, + pageFlag, grp.shared().kernalContext().failure(), lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java index b93c12273fc61f..c2ba3b166e4763 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.pagemem.PageIdAllocator; @@ -43,6 +42,10 @@ import org.apache.ignite.lang.IgniteInClosure; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX; + /** * Utility class for idle verify command. */ @@ -75,7 +78,8 @@ public static void checkPartitionsPageCrcSum( * @param pageStore Page store. * @param grpCtx Passed cache group context. * @param partId Partition id. - * @param pageType Page type. Possible types {@link PageIdAllocator#FLAG_DATA}, {@link PageIdAllocator#FLAG_IDX}. + * @param pageType Page type. Possible types {@link PageIdAllocator#FLAG_DATA}, {@link PageIdAllocator#FLAG_IDX} + * and {@link PageIdAllocator#FLAG_AUX}. * @throws IgniteCheckedException If reading page failed. * @throws GridNotIdleException If cluster not idle. */ @@ -83,11 +87,11 @@ public static void checkPartitionsPageCrcSum( FilePageStore pageStore, CacheGroupContext grpCtx, int partId, - byte pageType + @Deprecated byte pageType ) throws IgniteCheckedException, GridNotIdleException { - assert pageType == PageIdAllocator.FLAG_DATA || pageType == PageIdAllocator.FLAG_IDX : pageType; + assert pageType == FLAG_DATA || pageType == FLAG_IDX || pageType == FLAG_AUX : pageType; - long pageId = PageIdUtils.pageId(partId, pageType, 0); + long pageId = PageIdUtils.pageId(partId, (byte)0, 0); ByteBuffer buf = ByteBuffer.allocateDirect(grpCtx.dataRegion().pageMemory().pageSize()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java index 84dfae15d497e3..7cd779318f3b3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java @@ -82,7 +82,7 @@ public GridCacheLazyPlainVersionedEntry(GridCacheContext cctx, /** {@inheritDoc} */ @Override public K key() { if (key == null) - key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary); + key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary, null); return key; } @@ -100,7 +100,7 @@ public GridCacheLazyPlainVersionedEntry(GridCacheContext cctx, */ public V value(boolean keepBinary) { if (val == null) - val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true); + val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true, null); return val; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java index aa4d5f573f2e69..dcb72b72dc4374 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java @@ -47,7 +47,12 @@ public UserCacheObjectByteArrayImpl(byte[] val) { /** {@inheritDoc} */ @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy) { - return super.value(ctx, false); // Do not need copy since user value is not in cache. + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { + return super.value(ctx, false, ldr); // Do not need copy since user value is not in cache. } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java index faacc71716fc5c..01ded979951892 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java @@ -1049,6 +1049,12 @@ public IgniteInternalFuture changeGlobalState( boolean forceChangeBaselineTopology, boolean isAutoAdjust ) { + if (ctx.maintenanceRegistry().isMaintenanceMode()) { + return new GridFinishedFuture<>( + new IgniteCheckedException("Failed to " + prettyStr(state) + " (node is in maintenance mode).") + ); + } + BaselineTopology blt = (compatibilityMode && !forceChangeBaselineTopology) ? null : calculateNewBaselineTopology(state, baselineNodes, forceChangeBaselineTopology); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java index 27a4520f908a46..f3fdc35481f354 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java @@ -85,7 +85,7 @@ public DataStreamerEntry(KeyCacheObject key, CacheObject val) { public Map.Entry toEntry(final GridCacheContext ctx, final boolean keepBinary) { return new Map.Entry() { @Override public K getKey() { - return (K)ctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false); + return (K)ctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false, null); } @Override public V setValue(V val) { @@ -93,7 +93,7 @@ public Map.Entry toEntry(final GridCacheContext ctx, final boolean } @Override public V getValue() { - return (V)ctx.cacheObjectContext().unwrapBinaryIfNeeded(val, keepBinary, false); + return (V)ctx.cacheObjectContext().unwrapBinaryIfNeeded(val, keepBinary, false, null); } }; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java index e72692d6de3140..232f5fc54e1e22 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java @@ -105,6 +105,9 @@ * Manager of data structures. */ public final class DataStructuresProcessor extends GridProcessorAdapter implements IgniteChangeGlobalStateSupport { + /** DataRegionConfiguration name reserved for volatile caches. */ + public static final String VOLATILE_DATA_REGION_NAME = "volatileDsMemPlc"; + /** */ public static final String DEFAULT_VOLATILE_DS_GROUP_NAME = "default-volatile-ds-group"; @@ -359,8 +362,9 @@ public static boolean isDataStructureCache(String cacheName) { * @return {@code True} if group name is reserved to store data structures. */ public static boolean isReservedGroup(@Nullable String grpName) { - return DEFAULT_DS_GROUP_NAME.equals(grpName) || - DEFAULT_VOLATILE_DS_GROUP_NAME.equals(grpName); + return grpName != null && + (DEFAULT_DS_GROUP_NAME.equals(grpName) || + grpName.startsWith(DEFAULT_VOLATILE_DS_GROUP_NAME)); } /** @@ -511,11 +515,18 @@ public final IgniteAtomicLong atomicLong(final String name, cfg = dfltAtomicCfg; } + String dataRegionName = null; final String grpName; - if (type.isVolatile()) - grpName = DEFAULT_VOLATILE_DS_GROUP_NAME; - else if (cfg.getGroupName() != null) + if (type.isVolatile()) { + String volatileGrpName = DEFAULT_VOLATILE_DS_GROUP_NAME; + + dataRegionName = VOLATILE_DATA_REGION_NAME; + + volatileGrpName += "@" + dataRegionName; + + grpName = volatileGrpName; + } else if (cfg.getGroupName() != null) grpName = cfg.getGroupName(); else grpName = DEFAULT_DS_GROUP_NAME; @@ -528,7 +539,7 @@ else if (cfg.getGroupName() != null) if (!create && ctx.cache().cacheDescriptor(cacheName) == null) return null; - ctx.cache().dynamicStartCache(cacheConfiguration(cfg, cacheName, grpName), + ctx.cache().dynamicStartCache(cacheConfiguration(cfg, cacheName, grpName, dataRegionName), cacheName, null, CacheType.DATA_STRUCTURES, @@ -888,9 +899,12 @@ private boolean isCollocated(CollectionConfiguration cfg) { * @param cfg Atomic configuration. * @param name Cache name. * @param grpName Group name. + * @param dataRegionName Name of data region for this cache. + * * @return Cache configuration. */ - private CacheConfiguration cacheConfiguration(AtomicConfiguration cfg, String name, String grpName) { + private CacheConfiguration cacheConfiguration(AtomicConfiguration cfg, String name, String grpName, + String dataRegionName) { CacheConfiguration ccfg = new CacheConfiguration(); ccfg.setName(name); @@ -901,6 +915,7 @@ private CacheConfiguration cacheConfiguration(AtomicConfiguration cfg, String na ccfg.setCacheMode(cfg.getCacheMode()); ccfg.setNodeFilter(CacheConfiguration.ALL_NODES); ccfg.setAffinity(cfg.getAffinity()); + ccfg.setDataRegionName(dataRegionName); if (cfg.getCacheMode() == PARTITIONED) ccfg.setBackups(cfg.getBackups()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java index 9dfdd1c1a80f37..2eab2c552f0ac6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java @@ -48,10 +48,11 @@ public class FailureProcessor extends GridProcessorAdapter { public static final int DFLT_FAILURE_HANDLER_RESERVE_BUFFER_SIZE = 64 * 1024; /** Value of the system property that enables threads dumping on failure. */ - private final boolean igniteDumpThreadsOnFailure = IgniteSystemProperties.getBoolean(IGNITE_DUMP_THREADS_ON_FAILURE); + private final boolean igniteDumpThreadsOnFailure = + IgniteSystemProperties.getBoolean(IGNITE_DUMP_THREADS_ON_FAILURE, true); /** Timeout for throttling of thread dumps generation. */ - long dumpThreadsTrottlingTimeout; + private long dumpThreadsTrottlingTimeout; /** Ignored failure log message. */ static final String IGNORED_FAILURE_LOG_MSG = "Possible failure suppressed accordingly to a configured handler "; @@ -61,7 +62,7 @@ public class FailureProcessor extends GridProcessorAdapter { "Will be handled accordingly to configured handler "; /** Thread dump per failure type timestamps. */ - private Map threadDumpPerFailureTypeTime; + private final Map threadDumpPerFailureTypeTs; /** Ignite. */ private final Ignite ignite; @@ -83,6 +84,8 @@ public FailureProcessor(GridKernalContext ctx) { ignite = ctx.grid(); + Map threadDumpPerFailureTypeTs = null; + if (igniteDumpThreadsOnFailure) { dumpThreadsTrottlingTimeout = IgniteSystemProperties.getLong( @@ -91,12 +94,14 @@ public FailureProcessor(GridKernalContext ctx) { ); if (dumpThreadsTrottlingTimeout > 0) { - threadDumpPerFailureTypeTime = new EnumMap<>(FailureType.class); + threadDumpPerFailureTypeTs = new EnumMap<>(FailureType.class); for (FailureType type : FailureType.values()) - threadDumpPerFailureTypeTime.put(type, 0L); + threadDumpPerFailureTypeTs.put(type, 0L); } } + + this.threadDumpPerFailureTypeTs = threadDumpPerFailureTypeTs; } /** {@inheritDoc} */ @@ -124,7 +129,8 @@ public boolean nodeStopping() { } /** - * This method is used to initialize local failure handler if {@link IgniteConfiguration} don't contain configured one. + * This method is used to initialize local failure handler if {@link IgniteConfiguration} + * doesn't contain configured one. * * @return Default {@link FailureHandler} implementation. */ @@ -202,7 +208,16 @@ public synchronized boolean process(FailureContext failureCtx, FailureHandler hn } /** - * Defines whether thread dump should be throttled for givn failure type or not. + * Returns timeout for throttling of thread dumps generation. + * + * @return Timeout for throttling of thread dumps generation. + */ + long dumpThreadsTrottlingTimeout() { + return dumpThreadsTrottlingTimeout; + } + + /** + * Defines whether thread dump should be throttled for given failure type or not. * * @param type Failure type. * @return {@code True} if thread dump generation should be throttled fro given failure type. @@ -213,14 +228,14 @@ private boolean throttleThreadDump(FailureType type) { long curr = U.currentTimeMillis(); - Long last = threadDumpPerFailureTypeTime.get(type); + Long last = threadDumpPerFailureTypeTs.get(type); assert last != null : "Unknown failure type " + type; boolean throttle = curr - last < dumpThreadsTrottlingTimeout; if (!throttle) - threadDumpPerFailureTypeTime.put(type, curr); + threadDumpPerFailureTypeTs.put(type, curr); else { if (log.isInfoEnabled()) { log.info("Thread dump is hidden due to throttling settings. " + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java index 3d52487ba7c8b7..2833f38fa8fbfe 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java @@ -291,7 +291,7 @@ public class GridJobProcessor extends GridProcessorAdapter { }; /** Current session. */ - private final ThreadLocal currSess = new ThreadLocal<>(); + private final ThreadLocal currSess = new ThreadLocal<>(); /** * @param ctx Kernal context. @@ -1377,7 +1377,7 @@ else if (jobAlwaysActivate) { * * @param ses Session. */ - public void currentTaskSession(ComputeTaskSession ses) { + public void currentTaskSession(GridJobSessionImpl ses) { currSess.set(ses); } @@ -1409,6 +1409,20 @@ public String currentTaskName() { return ses.getTaskName(); } + /** + * Returns current deployment. + * + * @return Deployment. + */ + public GridDeployment currentDeployment() { + GridJobSessionImpl session = currSess.get(); + + if (session == null || session.deployment() == null) + return null; + + return session.deployment(); + } + /** * @param jobWorker Worker. * @return {@code True} if job has not been cancelled and should be activated. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java index b66c65eb35a76c..1ff0daad0c7ad5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java @@ -38,7 +38,6 @@ import org.apache.ignite.events.JobEvent; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; -import org.apache.ignite.internal.GridInternalException; import org.apache.ignite.internal.GridJobContextImpl; import org.apache.ignite.internal.GridJobExecuteResponse; import org.apache.ignite.internal.GridJobSessionImpl; @@ -484,7 +483,8 @@ boolean initialize(GridDeployment dep, Class taskCls) { job = SecurityUtils.sandboxedProxy(ctx, ComputeJob.class, job); } catch (IgniteCheckedException e) { - U.error(log, "Failed to initialize job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); + if (log.isDebugEnabled()) + U.error(log, "Failed to initialize job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); ex = new IgniteException(e); } @@ -618,28 +618,29 @@ private void execute0(boolean skipNtf) { assert ex != null; } else { - if (X.hasCause(e, GridInternalException.class)) { - // Print exception for internal errors only if debug is enabled. - if (log.isDebugEnabled()) - U.error(log, "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); - } - else if (X.hasCause(e, InterruptedException.class)) { - String msg = "Job was cancelled [jobId=" + ses.getJobId() + ", ses=" + ses + ']'; - - if (log.isDebugEnabled()) - U.error(log, msg, e); - else - U.warn(log, msg); + if (X.hasCause(e, InterruptedException.class)) { + if (log.isDebugEnabled()) { + U.error(log, + "Job was cancelled [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); + } } else if (X.hasCause(e, GridServiceNotFoundException.class) || - X.hasCause(e, ClusterTopologyCheckedException.class)) - // Should be throttled, because GridServiceProxy continuously retry getting service. - LT.error(log, e, "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']'); + X.hasCause(e, ClusterTopologyCheckedException.class)) { + if (log.isDebugEnabled()) { + // Should be throttled, because GridServiceProxy continuously retry getting service. + LT.error(log, e, "Failed to execute job [jobId=" + ses.getJobId() + + ", ses=" + ses + ']'); + } + } else { - U.error(log, "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); + String msg = "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']'; + + if (X.hasCause(e, OutOfMemoryError.class)) { + U.error(log, msg, e); - if (X.hasCause(e, OutOfMemoryError.class)) ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + } else if (log.isDebugEnabled()) + U.error(log, msg, e); } ex = e; @@ -722,7 +723,8 @@ else if (sysStopping && X.hasCause(e, InterruptedException.class, IgniteInterrup assert msg != null; assert ex != null; - U.error(log, msg, e); + if (log.isDebugEnabled()) + U.error(log, msg, e); return ex; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java index 01be7424218955..309b7a09ddcb3c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java @@ -24,7 +24,14 @@ import org.apache.ignite.marshaller.jdk.JdkMarshaller; /** */ -class DistributedMetaStorageUtil { +final class DistributedMetaStorageUtil { + /** + * + */ + private DistributedMetaStorageUtil() { + // No-op. + } + /** * Common prefix for everything that is going to be written into {@link MetaStorage}. Something that has minimal * chance of collision with the existing keys. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java index 17e69678ccb707..fddf7ff91fab2a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java @@ -410,28 +410,32 @@ private T readFromMetastorage(String key) { * @param regName Metric registry name. */ public void remove(String regName) { - ReadOnlyMetricRegistry mreg = registries.remove(regName); + GridCompoundFuture opsFut = new GridCompoundFuture<>(); - if (mreg == null) - return; - - notifyListeners(mreg, metricRegRemoveLsnrs, log); + registries.computeIfPresent(regName, (key, mreg) -> { + notifyListeners(mreg, metricRegRemoveLsnrs, log); - DistributedMetaStorage metastorage0 = metastorage; + DistributedMetaStorage metastorage0 = metastorage; - if (metastorage0 == null) - return; + if (metastorage0 == null) + return null; - try { - GridCompoundFuture opsFut = new GridCompoundFuture<>(); - - for (Metric m : mreg) { - if (m instanceof HitRateMetric) - opsFut.add(metastorage0.removeAsync(metricName(HITRATE_CFG_PREFIX, m.name()))); - else if (m instanceof HistogramMetric) - opsFut.add(metastorage0.removeAsync(metricName(HISTOGRAM_CFG_PREFIX, m.name()))); + try { + for (Metric m : mreg) { + if (m instanceof HitRateMetric) + opsFut.add(metastorage0.removeAsync(metricName(HITRATE_CFG_PREFIX, m.name()))); + else if (m instanceof HistogramMetric) + opsFut.add(metastorage0.removeAsync(metricName(HISTOGRAM_CFG_PREFIX, m.name()))); + } + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); } + return null; + }); + + try { opsFut.markInitialized(); opsFut.get(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java index 61e7370391a956..a8f749e548c617 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java @@ -19,6 +19,7 @@ import org.apache.ignite.binary.BinaryRawWriter; import org.apache.ignite.internal.binary.BinaryUtils; +import org.apache.ignite.internal.binary.GridBinaryMarshaller; import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.util.typedef.internal.S; @@ -128,7 +129,7 @@ public void write(BinaryRawWriter writer) { writer.writeString(tableName); writer.writeString(columnName); - byte typeId = BinaryUtils.typeByClass(dataType); + byte typeId = getTypeId(dataType); writer.writeByte(typeId); @@ -138,6 +139,18 @@ public void write(BinaryRawWriter writer) { } } + /** + * Get ODBC type ID for the type. + * @param dataType Data type class. + * @return Type ID. + */ + private static byte getTypeId(Class dataType) { + if (dataType.equals(java.sql.Date.class)) + return GridBinaryMarshaller.DATE; + + return BinaryUtils.typeByClass(dataType); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(OdbcColumnMeta.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java index c9b779f237bc32..33f1c4b464d1bb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java @@ -211,6 +211,15 @@ public OdbcMessageParser(GridKernalContext ctx, ClientListenerProtocolVersion ve break; } + case OdbcRequest.META_RESULTSET: { + String schema = reader.readString(); + String sqlQuery = reader.readString(); + + res = new OdbcQueryGetResultsetMetaRequest(schema, sqlQuery); + + break; + } + case OdbcRequest.MORE_RESULTS: { long queryId = reader.readLong(); int pageSize = reader.readInt(); @@ -281,12 +290,7 @@ else if (res0 instanceof OdbcQueryExecuteResult) { Collection metas = res.columnsMetadata(); - assert metas != null; - - writer.writeInt(metas.size()); - - for (OdbcColumnMeta meta : metas) - meta.write(writer); + writeResultsetMeta(writer, metas); writeAffectedRows(writer, res.affectedRows()); } @@ -378,12 +382,7 @@ else if (res0 instanceof OdbcQueryGetColumnsMetaResult) { Collection columnsMeta = res.meta(); - assert columnsMeta != null; - - writer.writeInt(columnsMeta.size()); - - for (OdbcColumnMeta columnMeta : columnsMeta) - columnMeta.write(writer); + writeResultsetMeta(writer, columnsMeta); } else if (res0 instanceof OdbcQueryGetTablesMetaResult) { OdbcQueryGetTablesMetaResult res = (OdbcQueryGetTablesMetaResult) res0; @@ -404,12 +403,31 @@ else if (res0 instanceof OdbcQueryGetParamsMetaResult) { SqlListenerUtils.writeObject(writer, typeIds, true); } + else if (res0 instanceof OdbcQueryGetResultsetMetaResult) { + OdbcQueryGetResultsetMetaResult res = (OdbcQueryGetResultsetMetaResult) res0; + + writeResultsetMeta(writer, res.columnsMetadata()); + } else assert false : "Should not reach here."; return new ClientMessage(writer.array()); } + /** + * Write resultset columns metadata in a unified way. + * @param writer Writer. + * @param meta Metadata + */ + private static void writeResultsetMeta(BinaryWriterExImpl writer, Collection meta) { + assert meta != null; + + writer.writeInt(meta.size()); + + for (OdbcColumnMeta columnMeta : meta) + columnMeta.write(writer); + } + /** {@inheritDoc} */ @Override public int decodeCommandType(ClientMessage msg) { assert msg != null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java index 0ae29161b38468..072b4fc51c1204 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java @@ -17,45 +17,15 @@ package org.apache.ignite.internal.processors.odbc.odbc; -import org.apache.ignite.internal.util.typedef.internal.S; - /** * ODBC query get params meta request. */ -public class OdbcQueryGetParamsMetaRequest extends OdbcRequest { - /** Schema. */ - private final String schema; - - /** Query. */ - private final String query; - +public class OdbcQueryGetParamsMetaRequest extends OdbcQueryGetQueryMetaRequest { /** * @param schema Schema. * @param query SQL Query. */ public OdbcQueryGetParamsMetaRequest(String schema, String query) { - super(META_PARAMS); - - this.schema = schema; - this.query = query; - } - - /** - * @return SQL Query. - */ - public String query() { - return query; - } - - /** - * @return Schema name. - */ - public String schema() { - return schema; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return S.toString(OdbcQueryGetParamsMetaRequest.class, this); + super(META_PARAMS, schema, query); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetQueryMetaRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetQueryMetaRequest.java new file mode 100644 index 00000000000000..d606b14a1ab9bc --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetQueryMetaRequest.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * ODBC query get query meta request. + */ +public class OdbcQueryGetQueryMetaRequest extends OdbcRequest { + /** Schema. */ + protected final String schema; + + /** Query. */ + protected final String query; + + /** + * @param cmd Command code. + * @param schema Schema. + * @param query SQL Query. + */ + public OdbcQueryGetQueryMetaRequest(byte cmd, String schema, String query) { + super(cmd); + + this.schema = schema; + this.query = query; + } + + /** + * @return SQL Query. + */ + public String query() { + return query; + } + + /** + * @return Schema name. + */ + public String schema() { + return schema; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(OdbcQueryGetQueryMetaRequest.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaRequest.java new file mode 100644 index 00000000000000..99a3263e19f3c9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaRequest.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +/** + * ODBC query get params meta request. + */ +public class OdbcQueryGetResultsetMetaRequest extends OdbcQueryGetQueryMetaRequest { + /** + * @param schema Schema. + * @param query SQL Query. + */ + public OdbcQueryGetResultsetMetaRequest(String schema, String query) { + super(META_RESULTSET, schema, query); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaResult.java new file mode 100644 index 00000000000000..0cf0b24059f161 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaResult.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import java.util.Collection; + +/** + * SQL listener query resultset metadata result. + */ +public class OdbcQueryGetResultsetMetaResult { + /** Resultset columns metadata. */ + private final Collection columnsMetadata; + + /** + * @param columnsMetadata Columns metadata. + */ + public OdbcQueryGetResultsetMetaResult(Collection columnsMetadata) { + this.columnsMetadata = columnsMetadata; + } + + /** + * @return Columns metadata. + */ + public Collection columnsMetadata() { + return columnsMetadata; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java index 5b02cfe9924b52..bc4d179d8b6c30 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java @@ -50,6 +50,9 @@ public class OdbcRequest extends ClientListenerRequestNoId { /** Process ordered streaming batch. */ public static final byte STREAMING_BATCH = 10; + /** Get resultset columns meta. */ + public static final byte META_RESULTSET = 11; + /** Command. */ private final byte cmd; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java index 9bf2c12f8801a1..a21caebf7e1795 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java @@ -28,6 +28,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import javax.cache.configuration.Factory; + import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -48,6 +49,7 @@ import org.apache.ignite.internal.processors.odbc.ClientListenerResponseSender; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta; import org.apache.ignite.internal.processors.odbc.odbc.escape.OdbcEscapeUtils; +import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.processors.query.GridQueryProperty; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.IgniteSQLException; @@ -69,6 +71,7 @@ import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_COLS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_PARAMS; +import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_RESULTSET; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_TBLS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.MORE_RESULTS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.QRY_CLOSE; @@ -254,6 +257,9 @@ public ClientListenerResponse doHandle(OdbcRequest req) { case META_PARAMS: return getParamsMeta((OdbcQueryGetParamsMetaRequest)req); + case META_RESULTSET: + return getResultMeta((OdbcQueryGetResultsetMetaRequest)req); + case MORE_RESULTS: return moreResults((OdbcQueryMoreResultsRequest)req); } @@ -411,7 +417,7 @@ private ClientListenerResponse executeQuery(OdbcQueryExecuteRequest req) { if (set == null) fieldsMeta = new ArrayList<>(); else { - fieldsMeta = results.currentResultSet().fieldsMeta(); + fieldsMeta = set.fieldsMeta(); if (log.isDebugEnabled()) { for (OdbcColumnMeta meta : fieldsMeta) @@ -747,7 +753,8 @@ private ClientListenerResponse getTablesMeta(OdbcQueryGetTablesMetaRequest req) } /** - * {@link OdbcQueryGetParamsMetaRequest} command handler. + * {@link OdbcQueryGetQueryMetaRequest} command handler. + * Returns metadata for the parameters to be set. * * @param req Get params metadata request. * @return Response. @@ -780,6 +787,34 @@ private ClientListenerResponse getParamsMeta(OdbcQueryGetParamsMetaRequest req) } } + /** + * {@link OdbcQueryGetQueryMetaRequest} command handler. + * Returns metadata for a columns of the result set. + * + * @param req Get resultset metadata request. + * @return Response. + */ + private ClientListenerResponse getResultMeta(OdbcQueryGetResultsetMetaRequest req) { + try { + String sql = OdbcEscapeUtils.parse(req.query()); + String schema = OdbcUtils.prepareSchema(req.schema()); + + SqlFieldsQueryEx qry = makeQuery(schema, sql); + + List columns = ctx.query().getIndexing().resultMetaData(schema, qry); + Collection meta = OdbcUtils.convertMetadata(columns, ver); + + OdbcQueryGetResultsetMetaResult res = new OdbcQueryGetResultsetMetaResult(meta); + + return new OdbcResponse(res); + } + catch (Exception e) { + U.error(log, "Failed to get resultset metadata [reqId=" + req.requestId() + ", req=" + req + ']', e); + + return exceptionToResult(e); + } + } + /** * {@link OdbcQueryMoreResultsRequest} command handler. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java index b319366b7071c1..1eba63df1cdc37 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java @@ -24,7 +24,6 @@ import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; -import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; /** * Represents single result set. @@ -39,6 +38,9 @@ public class OdbcResultSet { /** Client version. */ private ClientListenerProtocolVersion ver; + /** Result columns metadata. */ + private Collection meta; + /** * Constructor. * @param cursor Result set cursor. @@ -50,10 +52,14 @@ public class OdbcResultSet { this.cursor = (QueryCursorImpl>)cursor; this.ver = ver; - if (this.cursor.isQuery()) + if (this.cursor.isQuery()) { iter = this.cursor.iterator(); - else + meta = OdbcUtils.convertMetadata(this.cursor.fieldsMeta(), ver); + } + else { iter = null; + meta = new ArrayList<>(); + } } /** @@ -67,10 +73,7 @@ public boolean hasUnfetchedRows() { * @return Fields metadata of the current result set. */ public Collection fieldsMeta() { - if (!cursor.isQuery()) - return new ArrayList<>(); - - return convertMetadata(cursor.fieldsMeta(), ver); + return meta; } /** @@ -89,24 +92,4 @@ public List fetch(int maxSize) { return items; } - - /** - * Convert metadata in collection from {@link GridQueryFieldMetadata} to - * {@link OdbcColumnMeta}. - * - * @param meta Internal query field metadata. - * @param ver Client version. - * @return Odbc query field metadata. - */ - private static Collection convertMetadata(Collection meta, - ClientListenerProtocolVersion ver) { - List res = new ArrayList<>(); - - if (meta != null) { - for (GridQueryFieldMetadata info : meta) - res.add(new OdbcColumnMeta(info, ver)); - } - - return res; - } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java index a687b9697997af..110f71c0dba74a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java @@ -17,14 +17,18 @@ package org.apache.ignite.internal.processors.odbc.odbc; +import java.util.ArrayList; +import java.util.Collection; import java.util.Iterator; import java.util.List; import org.apache.ignite.IgniteException; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; import org.apache.ignite.internal.processors.odbc.SqlListenerDataTypes; import org.apache.ignite.internal.processors.odbc.SqlListenerUtils; +import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.util.typedef.F; @@ -242,4 +246,24 @@ public static long rowsAffected(QueryCursor> qryCur) { return 0; } + + /** + * Convert metadata in collection from {@link GridQueryFieldMetadata} to + * {@link OdbcColumnMeta}. + * + * @param meta Internal query field metadata. + * @param ver Client version. + * @return Odbc query field metadata. + */ + public static Collection convertMetadata(Collection meta, + ClientListenerProtocolVersion ver) { + List res = new ArrayList<>(); + + if (meta != null) { + for (GridQueryFieldMetadata info : meta) + res.add(new OdbcColumnMeta(info, ver)); + } + + return res; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java index 61dec3a4768b4d..266b9de026183e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java @@ -52,6 +52,7 @@ import org.apache.ignite.internal.processors.platform.cache.PlatformCacheExtension; import org.apache.ignite.internal.processors.platform.cache.PlatformCacheManager; import org.apache.ignite.internal.processors.platform.cache.affinity.PlatformAffinity; +import org.apache.ignite.internal.processors.platform.cache.affinity.PlatformAffinityManager; import org.apache.ignite.internal.processors.platform.cache.store.PlatformCacheStore; import org.apache.ignite.internal.processors.platform.cluster.PlatformClusterGroup; import org.apache.ignite.internal.processors.platform.datastreamer.PlatformDataStreamer; @@ -193,6 +194,9 @@ public class PlatformProcessorImpl extends GridProcessorAdapter implements Platf /** */ private static final int OP_GET_OR_CREATE_LOCK = 38; + /** */ + private static final int OP_GET_AFFINITY_MANAGER = 39; + /** Start latch. */ private final CountDownLatch startLatch = new CountDownLatch(1); @@ -637,7 +641,7 @@ private void loggerLog(int level, String message, String category, String errorI } case OP_GET_AFFINITY: { - return new PlatformAffinity(platformCtx, ctx, reader.readString()); + return new PlatformAffinity(platformCtx, reader.readString()); } case OP_GET_DATA_STREAMER: { @@ -739,6 +743,12 @@ private void loggerLog(int level, String message, String category, String errorI return lock == null ? null : new PlatformLock(platformCtx, lock); } + + case OP_GET_AFFINITY_MANAGER: { + int cacheId = reader.readInt(); + + return new PlatformAffinityManager(platformCtx, cacheId); + } } return PlatformAbstractTarget.throwUnsupported(type); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java index c67d8e01e561ab..d5e53e1bb4801b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java @@ -977,6 +977,7 @@ private IgniteFuture loadCacheAsync0(BinaryRawReaderEx reader, boolean loc case OP_QRY_CONTINUOUS: { long ptr = reader.readLong(); boolean loc = reader.readBoolean(); + boolean includeExpired = reader.readBoolean(); boolean hasFilter = reader.readBoolean(); Object filter = reader.readObjectDetached(); int bufSize = reader.readInt(); @@ -986,7 +987,7 @@ private IgniteFuture loadCacheAsync0(BinaryRawReaderEx reader, boolean loc PlatformContinuousQuery qry = platformCtx.createContinuousQuery(ptr, hasFilter, filter); - qry.start(cache, loc, bufSize, timeInterval, autoUnsubscribe, initQry); + qry.start(cache, loc, bufSize, timeInterval, autoUnsubscribe, initQry, includeExpired); return new PlatformContinuousQueryProxy(platformCtx, qry); } @@ -1455,6 +1456,8 @@ private Query readFieldsQuery(BinaryRawReaderEx reader) { boolean replicated = reader.readBoolean(); boolean collocated = reader.readBoolean(); String schema = reader.readString(); + int[] partitions = reader.readIntArray(); + int updateBatchSize = reader.readInt(); SqlFieldsQuery qry = QueryUtils.withQueryTimeout(new SqlFieldsQuery(sql), timeout, TimeUnit.MILLISECONDS) .setPageSize(pageSize) @@ -1465,7 +1468,9 @@ private Query readFieldsQuery(BinaryRawReaderEx reader) { .setLazy(lazy) .setReplicatedOnly(replicated) .setCollocated(collocated) - .setSchema(schema); + .setSchema(schema) + .setPartitions(partitions) + .setUpdateBatchSize(updateBatchSize); return qry; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java index e18be6485b97c8..a0d79f24f55352 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java @@ -23,22 +23,17 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.affinity.Affinity; import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.binary.BinaryRawReaderEx; import org.apache.ignite.internal.binary.BinaryRawWriterEx; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; -import org.apache.ignite.internal.processors.cache.GridCacheAffinityManager; -import org.apache.ignite.internal.processors.cache.GridCacheUtils; import org.apache.ignite.internal.processors.platform.PlatformAbstractTarget; import org.apache.ignite.internal.processors.platform.PlatformContext; import org.apache.ignite.internal.processors.platform.utils.PlatformUtils; -import org.apache.ignite.internal.util.typedef.C1; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; /** - * Native cache wrapper implementation. + * Affinity wrapper for platforms. */ public class PlatformAffinity extends PlatformAbstractTarget { /** */ @@ -86,45 +81,28 @@ public class PlatformAffinity extends PlatformAbstractTarget { /** */ public static final int OP_PARTITIONS = 15; - /** */ - public static final int OP_IS_ASSIGNMENT_VALID = 16; - - /** */ - private static final C1 TO_NODE_ID = new C1() { - @Nullable @Override public UUID apply(ClusterNode node) { - return node != null ? node.id() : null; - } - }; - /** Underlying cache affinity. */ private final Affinity aff; /** Discovery manager */ private final GridDiscoveryManager discovery; - /** Affinity manager. */ - private final GridCacheAffinityManager affMgr; - /** * Constructor. * * @param platformCtx Context. - * @param igniteCtx Ignite context. * @param name Cache name. */ - public PlatformAffinity(PlatformContext platformCtx, GridKernalContext igniteCtx, @Nullable String name) + public PlatformAffinity(PlatformContext platformCtx, @Nullable String name) throws IgniteCheckedException { super(platformCtx); - this.aff = igniteCtx.grid().affinity(name); + aff = platformCtx.kernalContext().grid().affinity(name); if (aff == null) throw new IgniteCheckedException("Cache with the given name doesn't exist: " + name); - this.affMgr = this.platformCtx.kernalContext().cache().context().cacheContext(GridCacheUtils.cacheId(name)) - .affinity(); - - discovery = igniteCtx.discovery(); + discovery = platformCtx.kernalContext().discovery(); } /** {@inheritDoc} */ @@ -172,24 +150,6 @@ public PlatformAffinity(PlatformContext platformCtx, GridKernalContext igniteCtx return aff.isPrimaryOrBackup(node, key) ? TRUE : FALSE; } - case OP_IS_ASSIGNMENT_VALID: { - AffinityTopologyVersion ver = new AffinityTopologyVersion(reader.readLong(), reader.readInt()); - int part = reader.readInt(); - AffinityTopologyVersion endVer = affMgr.affinityTopologyVersion(); - - if (!affMgr.primaryChanged(part, ver, endVer)) { - return TRUE; - } - - if (!affMgr.partitionLocalNode(part, endVer)) { - return FALSE; - } - - // Special case: late affinity assignment when primary changes to local node due to a node join. - // Specified partition is local, and near cache entries are valid for primary keys. - return ver.topologyVersion() == endVer.topologyVersion() ? TRUE : FALSE; - } - default: return super.processInStreamOutLong(type, reader); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinityManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinityManager.java new file mode 100644 index 00000000000000..92306a6326fd08 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinityManager.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.cache.affinity; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.GridCacheAffinityManager; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.platform.PlatformAbstractTarget; +import org.apache.ignite.internal.processors.platform.PlatformContext; + +/** + * AffinityManager wrapper for platforms. + */ +public class PlatformAffinityManager extends PlatformAbstractTarget { + /** */ + public static final int OP_IS_ASSIGNMENT_VALID = 1; + + /** Affinity manager. */ + private final GridCacheAffinityManager affMgr; + + /** + * Constructor. + * + * @param platformCtx Context. + */ + public PlatformAffinityManager(PlatformContext platformCtx, int cacheId) { + super(platformCtx); + + GridCacheContext ctx = platformCtx.kernalContext().cache().context().cacheContext(cacheId); + + if (ctx == null) + throw new IgniteException("Cache doesn't exist: " + cacheId); + + affMgr = ctx.affinity(); + } + + /** {@inheritDoc} */ + @Override public long processInStreamOutLong(int type, BinaryRawReaderEx reader) throws IgniteCheckedException { + if (type == OP_IS_ASSIGNMENT_VALID) + { + AffinityTopologyVersion ver = new AffinityTopologyVersion(reader.readLong(), reader.readInt()); + int part = reader.readInt(); + AffinityTopologyVersion endVer = affMgr.affinityTopologyVersion(); + + if (!affMgr.primaryChanged(part, ver, endVer)) { + return TRUE; + } + + if (!affMgr.partitionLocalNode(part, endVer)) { + return FALSE; + } + + // Special case: late affinity assignment when primary changes to local node due to a node join. + // Specified partition is local, and near cache entries are valid for primary keys. + return ver.topologyVersion() == endVer.topologyVersion() ? TRUE : FALSE; + } + + return super.processInStreamOutLong(type, reader); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java index 2916da2e49cc24..4714760994d31a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java @@ -36,10 +36,11 @@ public interface PlatformContinuousQuery extends CacheEntryUpdatedListener, Plat * @param timeInterval Time interval. * @param autoUnsubscribe Auto-unsubscribe flag. * @param initialQry Initial query. + * @param includeExpired Whether to include expired events. * @throws org.apache.ignite.IgniteCheckedException If failed. */ public void start(IgniteCacheProxy cache, boolean loc, int bufSize, long timeInterval, boolean autoUnsubscribe, - Query initialQry) throws IgniteCheckedException; + Query initialQry, boolean includeExpired) throws IgniteCheckedException; /** * Close continuous query. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java index f9269a4f7957a8..3adc296457e116 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java @@ -128,11 +128,12 @@ private static CacheEntryEventFilter getJavaFilter(Object filter, GridKernalCont * @param bufSize Buffer size. * @param timeInterval Time interval. * @param autoUnsubscribe Auto-unsubscribe flag. + * @param includeExpired Whether to include expired events. * @param initialQry Initial query. */ @SuppressWarnings("unchecked") @Override public void start(IgniteCacheProxy cache, boolean loc, int bufSize, long timeInterval, - boolean autoUnsubscribe, Query initialQry) throws IgniteCheckedException { + boolean autoUnsubscribe, Query initialQry, boolean includeExpired) throws IgniteCheckedException { lock.writeLock().lock(); try { @@ -148,6 +149,7 @@ private static CacheEntryEventFilter getJavaFilter(Object filter, GridKernalCont qry.setTimeInterval(timeInterval); qry.setAutoUnsubscribe(autoUnsubscribe); qry.setInitialQuery(initialQry); + qry.setIncludeExpired(includeExpired); cursor = cache.query(qry.setLocal(loc)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java index f53c0a9b452e57..91a4fda44b961a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java @@ -39,11 +39,14 @@ public enum ClientBitmaskFeature implements ThinProtocolFeature { /** Cluster groups. */ CLUSTER_GROUPS(4), - /** Service invocation. */ + /** Service invocation. This flag is not necessary and exists for legacy reasons. */ SERVICE_INVOKE(5), /** Feature for use default query timeout if the qry timeout isn't set explicitly. */ - DEFAULT_QRY_TIMEOUT(6); + DEFAULT_QRY_TIMEOUT(6), + + /** Additional SqlFieldsQuery properties: partitions, updateBatchSize */ + QRY_PARTITIONS_BATCH_SIZE(7); /** */ private static final EnumSet ALL_FEATURES_AS_ENUM_SET = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java index 81391c53b14ffa..505499e015f1c7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java @@ -48,6 +48,12 @@ public class ClientCacheSqlFieldsQueryRequest extends ClientCacheDataRequest imp /** Include field names flag. */ private final boolean includeFieldNames; + /** Partitions. */ + private final int[] partitions; + + /** Update batch size. */ + private final Integer updateBatchSize; + /** * Ctor. * @@ -95,10 +101,33 @@ public ClientCacheSqlFieldsQueryRequest(BinaryRawReaderEx reader, QueryUtils.withQueryTimeout(qry, timeout, TimeUnit.MILLISECONDS); this.qry = qry; + + if (protocolCtx.isFeatureSupported(ClientBitmaskFeature.QRY_PARTITIONS_BATCH_SIZE)) { + // Set qry values in process method so that validation errors are reported to the client. + int partCnt = reader.readInt(); + + if (partCnt >= 0) { + partitions = new int[partCnt]; + + for (int i = 0; i < partCnt; i++) + partitions[i] = reader.readInt(); + } else + partitions = null; + + updateBatchSize = reader.readInt(); + } else { + partitions = null; + updateBatchSize = null; + } } /** {@inheritDoc} */ @Override public ClientResponse process(ClientConnectionContext ctx) { + qry.setPartitions(partitions); + + if (updateBatchSize != null) + qry.setUpdateBatchSize(updateBatchSize); + ctx.incrementCursors(); try { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java index 9cbef1264f559b..750c3511a223e3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java @@ -111,6 +111,7 @@ import org.apache.ignite.ssl.SslContextFactory; import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionIsolation; +import org.apache.ignite.util.AttributeNodeFilter; /** * Configuration utils. @@ -246,6 +247,8 @@ public static CacheConfiguration readCacheConfiguration(BinaryRawReaderEx in) { ccfg.setAffinity(readAffinityFunction(in)); ccfg.setExpiryPolicyFactory(readExpiryPolicyFactory(in)); + ccfg.setNodeFilter(readAttributeNodeFilter(in)); + int keyCnt = in.readInt(); if (keyCnt > 0) { @@ -346,6 +349,48 @@ public static PlatformCacheConfiguration readPlatformCacheConfiguration(BinaryRa .setKeepBinary(in.readBoolean()); } + /** + * Reads the node filter config. + * + * @param in Stream. + * @return AttributeNodeFilter. + */ + public static AttributeNodeFilter readAttributeNodeFilter(BinaryRawReader in) { + if (!in.readBoolean()) + return null; + + int cnt = in.readInt(); + + Map attrs = new HashMap<>(cnt); + for (int i = 0; i < cnt; i++) + attrs.put(in.readString(), in.readObject()); + + return new AttributeNodeFilter(attrs); + } + + /** + * Writes the node filter. + * @param out Stream. + * @param nodeFilter IgnitePredicate. + */ + private static void writeAttributeNodeFilter(BinaryRawWriter out, IgnitePredicate nodeFilter) { + if (!(nodeFilter instanceof AttributeNodeFilter)) { + out.writeBoolean(false); + return; + } + + out.writeBoolean(true); + + Map attrs = ((AttributeNodeFilter) nodeFilter).getAttrs(); + + out.writeInt(attrs.size()); + + for (Map.Entry entry : attrs.entrySet()) { + out.writeString(entry.getKey()); + out.writeObject(entry.getValue()); + } + } + /** * Reads the eviction policy. * @@ -417,7 +462,7 @@ public static PlatformAffinityFunction readAffinityFunction(BinaryRawReaderEx in } /** - * Reads the near config. + * Writes the near config. * * @param out Stream. * @param cfg NearCacheConfiguration. @@ -1084,6 +1129,8 @@ public static void writeCacheConfiguration(BinaryRawWriter writer, CacheConfigur writeAffinityFunction(writer, ccfg.getAffinity()); writeExpiryPolicyFactory(writer, ccfg.getExpiryPolicyFactory()); + writeAttributeNodeFilter(writer, ccfg.getNodeFilter()); + CacheKeyConfiguration[] keys = ccfg.getKeyConfiguration(); if (keys != null) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java index d282f4d8b4d1f7..236de0c4e620f5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java @@ -33,16 +33,21 @@ import org.apache.ignite.internal.managers.IgniteMBeansManager; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheContextInfo; import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.RootPage; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; import org.apache.ignite.internal.util.GridAtomicLong; import org.apache.ignite.internal.util.GridSpinBusyLock; +import org.apache.ignite.internal.util.collection.IntMap; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; @@ -487,4 +492,23 @@ default long indexSize(String schemaName, String tblName, String idxName) throws default Map secondaryIndexesInlineSize() { return Collections.emptyMap(); } + + /** + * Defragment index partition. + * + * @param grpCtx Old group context. + * @param newCtx New group context. + * @param partPageMem Partition page memory. + * @param mappingByPart Mapping page memory. + * @param cpLock Defragmentation checkpoint read lock. + * + * @throws IgniteCheckedException If failed. + */ + void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPart, + CheckpointTimeoutLock cpLock + ) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java index c97c26a4223fb0..16dc5f03972010 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java @@ -223,7 +223,10 @@ public enum GridRestCommand { NODE_STATE_BEFORE_START("nodestatebeforestart"), /** Warm-up. */ - WARM_UP("warmup"); + WARM_UP("warmup"), + + /** probe. */ + PROBE("probe"); /** Enum values. */ private static final GridRestCommand[] VALS = values(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java index 358f75b23e5f37..21c5eb35505ea1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java @@ -56,6 +56,7 @@ import org.apache.ignite.internal.processors.rest.handlers.datastructures.DataStructuresCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.log.GridLogCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.memory.MemoryMetricsCommandHandler; +import org.apache.ignite.internal.processors.rest.handlers.probe.GridProbeCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.query.QueryCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.task.GridTaskCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.top.GridTopologyCommandHandler; @@ -557,6 +558,7 @@ public GridRestProcessor(GridKernalContext ctx) { addHandler(new GridBaselineCommandHandler(ctx)); addHandler(new MemoryMetricsCommandHandler(ctx)); addHandler(new NodeStateBeforeStartCommandHandler(ctx)); + addHandler(new GridProbeCommandHandler(ctx)); // Start protocols. startTcpProtocol(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java index 0c3ac0499c954c..adefd9e641b320 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java @@ -51,6 +51,9 @@ public class GridRestResponse implements Externalizable { /** Success status. */ private int successStatus = STATUS_SUCCESS; + /** HTTP REQUEST not allowed */ + public static final int SERVICE_UNAVAILABLE = 503; + /** Session token. */ private byte[] sesTokBytes; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/probe/GridProbeCommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/probe/GridProbeCommandHandler.java new file mode 100644 index 00000000000000..844dd5b9ffa30f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/probe/GridProbeCommandHandler.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest.handlers.probe; + +import java.util.Collection; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgnitionEx; +import org.apache.ignite.internal.processors.rest.GridRestCommand; +import org.apache.ignite.internal.processors.rest.GridRestResponse; +import org.apache.ignite.internal.processors.rest.handlers.GridRestCommandHandlerAdapter; +import org.apache.ignite.internal.processors.rest.request.GridRestRequest; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.typedef.internal.U; + +import static org.apache.ignite.internal.processors.rest.GridRestCommand.PROBE; + +/** + * Handler for {@link GridRestCommand#PROBE}. + */ +public class GridProbeCommandHandler extends GridRestCommandHandlerAdapter { + /** + * @param ctx Context. + */ + public GridProbeCommandHandler(GridKernalContext ctx) { + super(ctx); + } + + /** Supported commands. */ + private static final Collection SUPPORTED_COMMANDS = U.sealList(PROBE); + + /** {@inheritDoc} */ + @Override public Collection supportedCommands() { + return SUPPORTED_COMMANDS; + } + + /** {@inheritDoc} */ + @Override public IgniteInternalFuture handleAsync(GridRestRequest req) { + assert req != null; + + assert SUPPORTED_COMMANDS.contains(req.command()); + + switch (req.command()) { + case PROBE: { + if (log.isDebugEnabled()) + log.debug("probe command handler invoked."); + + return new GridFinishedFuture<>(IgnitionEx.hasKernalStarted(ctx.igniteInstanceName()) ? new GridRestResponse("grid has started") : new GridRestResponse(GridRestResponse.SERVICE_UNAVAILABLE, "grid has not started")); + + } + } + + return new GridFinishedFuture<>(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java index ecb48e9b8e6525..1b74977c161ed5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java @@ -52,7 +52,6 @@ import org.apache.ignite.events.JobEvent; import org.apache.ignite.events.TaskEvent; import org.apache.ignite.internal.ComputeTaskInternalFuture; -import org.apache.ignite.internal.GridInternalException; import org.apache.ignite.internal.GridJobCancelRequest; import org.apache.ignite.internal.GridJobExecuteRequest; import org.apache.ignite.internal.GridJobExecuteResponse; @@ -79,7 +78,6 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.util.worker.GridWorker; -import org.apache.ignite.internal.visor.util.VisorClusterGroupEmptyException; import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.marshaller.Marshaller; @@ -423,7 +421,8 @@ public boolean isInternal() { return; } - U.warn(log, "Task has timed out: " + ses); + if (log.isDebugEnabled()) + U.warn(log, "Task has timed out: " + ses); recordTaskEvent(EVT_TASK_TIMEDOUT, "Task has timed out."); @@ -541,13 +540,14 @@ private void initializeSpis() { processDelayedResponses(); } catch (ClusterGroupEmptyCheckedException e) { - U.warn(log, "Failed to map task jobs to nodes (topology projection is empty): " + ses); + if (log.isDebugEnabled()) + U.warn(log, "Failed to map task jobs to nodes (topology projection is empty): " + ses); finishTask(null, e); } catch (IgniteException | IgniteCheckedException e) { if (!fut.isCancelled()) { - if (!(e instanceof VisorClusterGroupEmptyException)) + if (log.isDebugEnabled()) U.error(log, "Failed to map task jobs to nodes: " + ses, e); finishTask(null, e); @@ -560,7 +560,8 @@ else if (log.isDebugEnabled()) String errMsg = "Failed to map task jobs to nodes due to undeclared user exception" + " [cause=" + e.getMessage() + ", ses=" + ses + "]"; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); finishTask(null, new ComputeUserUndeclaredException(errMsg, e)); @@ -842,7 +843,8 @@ void onResponse(GridJobExecuteResponse msg) { ctx.resource().invokeAnnotated(dep, jobRes.getJob(), ComputeJobAfterSend.class); } catch (IgniteCheckedException e) { - U.error(log, "Error deserializing job response: " + res, e); + if (log.isDebugEnabled()) + U.error(log, "Error deserializing job response: " + res, e); finishTask(null, e); } @@ -975,7 +977,8 @@ else if (plc != null && !waitForAffTop && !retry) { } } catch (IgniteCheckedException e) { - U.error(log, "Failed to obtain topology [ses=" + ses + ", err=" + e + ']', e); + if (log.isDebugEnabled()) + U.error(log, "Failed to obtain topology [ses=" + ses + ", err=" + e + ']', e); finishTask(null, e); @@ -1033,7 +1036,8 @@ private void sendRetryRequest(final long waitms, final GridJobResultImpl jRes, f sendRequest(jRes); } catch (Exception e) { - U.error(log, "Failed to re-map job or retry request [ses=" + ses + "]", e); + if (log.isDebugEnabled()) + U.error(log, "Failed to re-map job or retry request [ses=" + ses + "]", e); finishTask(null, e); } @@ -1080,13 +1084,7 @@ private void sendRetryRequest(final long waitms, final GridJobResultImpl jRes, f return plc; } catch (IgniteException e) { - if (X.hasCause(e, GridInternalException.class)) { - // Print internal exceptions only if debug is enabled. - if (log.isDebugEnabled()) - U.error(log, "Failed to obtain remote job result policy for result from " + - "ComputeTask.result(..) method (will fail the whole task): " + jobRes, e); - } - else if (X.hasCause(e, ComputeJobFailoverException.class)) { + if (X.hasCause(e, ComputeJobFailoverException.class)) { IgniteCheckedException e0 = new IgniteCheckedException(" Job was not failed over because " + "ComputeJobResultPolicy.FAILOVER was not returned from " + "ComputeTask.result(...) method for job result with ComputeJobFailoverException.", e); @@ -1097,13 +1095,16 @@ else if (X.hasCause(e, ComputeJobFailoverException.class)) { } else if (X.hasCause(e, GridServiceNotFoundException.class) || X.hasCause(e, ClusterTopologyCheckedException.class)) { - // Should be throttled, because GridServiceProxy continuously retry getting service. - LT.error(log, e, "Failed to obtain remote job result policy for result from " + - "ComputeTask.result(..) method (will fail the whole task): " + jobRes); + if (log.isDebugEnabled()) { + // Should be throttled, because GridServiceProxy continuously retry getting service. + LT.error(log, e, "Failed to obtain remote job result policy for result from " + + "ComputeTask.result(..) method (will fail the whole task): " + jobRes); + } } - else + else if (log.isDebugEnabled()) { U.error(log, "Failed to obtain remote job result policy for result from " + "ComputeTask.result(..) method (will fail the whole task): " + jobRes, e); + } finishTask(null, e); @@ -1114,7 +1115,8 @@ else if (X.hasCause(e, GridServiceNotFoundException.class) || "ComputeTask.result(..) method due to undeclared user exception " + "(will fail the whole task): " + jobRes; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); Throwable tmp = new ComputeUserUndeclaredException(errMsg, e); @@ -1163,12 +1165,14 @@ private void reduce(final List results) { recordTaskEvent(EVT_TASK_REDUCED, "Task reduced."); } catch (ClusterTopologyCheckedException e) { - U.warn(log, "Failed to reduce job results for task (any nodes from task topology left grid?): " + task); + if (log.isDebugEnabled()) + U.warn(log, "Failed to reduce job results for task (any nodes from task topology left grid?): " + task); userE = e; } catch (IgniteCheckedException e) { - U.error(log, "Failed to reduce job results for task: " + task, e); + if (log.isDebugEnabled()) + U.error(log, "Failed to reduce job results for task: " + task, e); userE = e; } @@ -1177,7 +1181,8 @@ private void reduce(final List results) { String errMsg = "Failed to reduce job results due to undeclared user exception [task=" + task + ", err=" + e + ']'; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); userE = new ComputeUserUndeclaredException(errMsg, e); @@ -1215,7 +1220,8 @@ private boolean failover( String errMsg = "Failed to failover job due to undeclared user exception [job=" + jobRes.getJob() + ", err=" + e + ']'; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); finishTask(null, new ComputeUserUndeclaredException(errMsg, e)); @@ -1356,9 +1362,11 @@ private void sendRequest(ComputeJobResult res) { // that we make this check because we cannot count on exception being // thrown in case of send failure. if (curNode == null) { - U.warn(log, "Failed to send job request because remote node left grid (if fail-over is enabled, " + - "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + - ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + if (log.isDebugEnabled()) { + U.warn(log, "Failed to send job request because remote node left grid (if fail-over is enabled, " + + "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + + ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + } ctx.resource().invokeAnnotated(dep, res.getJob(), ComputeJobAfterSend.class); @@ -1461,13 +1469,15 @@ private void sendRequest(ComputeJobResult res) { // Avoid stack trace if node has left grid. if (deadNode) { - U.warn(log, "Failed to send job request because remote node left grid (if failover is enabled, " + - "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + - ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + if (log.isDebugEnabled()) { + U.warn(log, "Failed to send job request because remote node left grid (if failover is enabled, " + + "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + + ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + } fakeErr = new ClusterTopologyException("Failed to send job due to node failure: " + node, e); } - else + else if (log.isDebugEnabled()) U.error(log, "Failed to send job request: " + req, e); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java b/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java new file mode 100644 index 00000000000000..195264e6b1b713 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.util; + +import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.util.typedef.internal.A; + +import static java.lang.Math.max; +import static java.util.concurrent.TimeUnit.MICROSECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; + +/** + * The simplified version of Google Guava smooth rate limiter.

    + * + * The primary feature of a rate limiter is its "stable rate", the maximum rate that is should + * allow at normal conditions. This is enforced by "throttling" incoming requests as needed, i.e. + * compute, for an incoming request, the appropriate throttle time, and make the calling thread + * wait as much.

    + * + * The simplest way to maintain a rate of QPS is to keep the timestamp of the last granted + * request, and ensure that (1/QPS) seconds have elapsed since then. For example, for a rate of + * QPS=5 (5 tokens per second), if we ensure that a request isn't granted earlier than 200ms after + * the last one, then we achieve the intended rate. If a request comes and the last request was + * granted only 100ms ago, then we wait for another 100ms. At this rate, serving 15 fresh permits + * (i.e. for an acquire(15) request) naturally takes 3 seconds.

    + * + * It is important to realize that such a limiter has a very superficial memory of the past: + * it only remembers the last request. if the limiter was unused for a long period of + * time, then a request arrived and was immediately granted? This limiter would immediately + * forget about that past underutilization. + */ +public class BasicRateLimiter { + /** Start timestamp. */ + private final long startTime = System.nanoTime(); + + /** Synchronization mutex. */ + private final Object mux = new Object(); + + /** + * The interval between two unit requests, at our stable rate. E.g., a stable rate of 5 permits + * per second has a stable interval of 200ms. + */ + private double stableIntervalMicros; + + /** + * The time when the next request (no matter its size) will be granted. After granting a request, + * this is pushed further in the future. Large requests push this further than small requests. + */ + private long nextFreeTicketMicros; + + /** + * The flag indicates that the rate is not limited. + */ + private volatile boolean unlimited; + + /** + * @param permitsPerSecond Estimated number of permits per second. + */ + public BasicRateLimiter(double permitsPerSecond) { + setRate(permitsPerSecond); + } + + /** + * Updates the stable rate. + * + * @param permitsPerSecond The new stable rate of this {@code RateLimiter}, set {@code 0} for unlimited rate. + * @throws IllegalArgumentException If {@code permitsPerSecond} is negative or zero. + */ + public void setRate(double permitsPerSecond) { + A.ensure(permitsPerSecond >= 0, "Requested permits (" + permitsPerSecond + ") must be non-negative."); + + if (unlimited = (permitsPerSecond == 0)) + return; + + synchronized (mux) { + resync(); + + stableIntervalMicros = SECONDS.toMicros(1L) / permitsPerSecond; + } + } + + /** + * @return The stable rate as {@code permits per seconds} ({@code 0} means that the rate is unlimited). + */ + public double getRate() { + if (unlimited) + return 0; + + synchronized (mux) { + return SECONDS.toMicros(1L) / stableIntervalMicros; + } + } + + /** + * Acquires the given number of permits from this {@code RateLimiter}, blocking until the request + * can be granted. Tells the amount of time slept, if any. + * + * @param permits The number of permits to acquire. + * @throws IllegalArgumentException If the requested number of permits is negative or zero. + */ + public void acquire(int permits) throws IgniteInterruptedCheckedException { + if (unlimited) + return; + + long microsToWait = reserve(permits); + + try { + MICROSECONDS.sleep(microsToWait); + } + catch (InterruptedException e) { + Thread.currentThread().interrupt(); + + throw new IgniteInterruptedCheckedException(e); + } + } + + /** + * Reserves the given number of permits for future use. + * + * @param permits The number of permits. + * @return Time in microseconds to wait until the resource can be acquired, never negative. + */ + private long reserve(int permits) { + A.ensure(permits > 0, "Requested permits (" + permits + ") must be positive"); + + synchronized (mux) { + long nowMicros = resync(); + + long momentAvailable = nextFreeTicketMicros; + + nextFreeTicketMicros = momentAvailable + (long)(permits * stableIntervalMicros); + + return max(momentAvailable - nowMicros, 0); + } + } + + /** + * Updates {@code nextFreeTicketMicros} based on the current time. + * + * @return Time passed (since start) in microseconds. + */ + private long resync() { + long passed = MICROSECONDS.convert(System.nanoTime() - startTime, NANOSECONDS); + + // if nextFreeTicket is in the past, resync to now + if (passed > nextFreeTicketMicros) + nextFreeTicketMicros = passed; + + return passed; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java index 6bdc6fab34138f..b3b644d6cafe35 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java @@ -212,6 +212,7 @@ import org.apache.ignite.internal.events.DiscoveryCustomEvent; import org.apache.ignite.internal.managers.communication.GridIoManager; import org.apache.ignite.internal.managers.communication.GridIoPolicy; +import org.apache.ignite.internal.managers.deployment.GridDeployment; import org.apache.ignite.internal.managers.deployment.GridDeploymentInfo; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.mxbean.IgniteStandardMXBean; @@ -219,6 +220,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheAttributes; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; +import org.apache.ignite.internal.processors.cache.IgnitePeerToPeerClassLoadingException; import org.apache.ignite.internal.processors.cluster.BaselineTopology; import org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException; import org.apache.ignite.internal.transactions.IgniteTxDuplicateKeyCheckedException; @@ -1503,22 +1505,6 @@ private static void logMessage(@Nullable IgniteLogger log, String msg, boolean i warn(log, msg); } - /** - * Dumps stack trace of the thread to the given log at warning level. - * - * @param t Thread to be dumped. - * @param log Logger. - */ - public static void dumpThread(Thread t, @Nullable IgniteLogger log) { - ThreadMXBean mxBean = ManagementFactory.getThreadMXBean(); - - GridStringBuilder sb = new GridStringBuilder(); - - printThreadInfo(mxBean.getThreadInfo(t.getId()), sb, Collections.emptySet()); - - warn(log, sb.toString()); - } - /** * Get deadlocks from the thread bean. * @param mxBean the bean @@ -7611,6 +7597,72 @@ public static boolean p2pLoader(ClassLoader ldr) { return ldr instanceof GridDeploymentInfo; } + /** + * Returns Deployment class loader id if method was invoked in the job context + * (it may be the context of a cache's operation which was triggered by the distributed job) + * or {@code null} if no context was found or Deployment is switched off. + * + * @param ctx Kernal context. + * @return Deployment class loader id or {@code null}. + */ + public static IgniteUuid contextDeploymentClassLoaderId(GridKernalContext ctx) { + if (ctx == null || !ctx.deploy().enabled()) + return null; + + if (ctx.job() != null && ctx.job().currentDeployment() != null) + return ctx.job().currentDeployment().classLoaderId(); + + if (ctx.cache() != null && ctx.cache().context() != null) + return ctx.cache().context().deploy().locLoaderId(); + + return null; + } + + /** + * Gets that deployment class loader matching by the specific id, or {@code null} + * if the class loader was not found. + * + * @param ctx Kernal context. + * @param ldrId Class loader id. + * @return Deployment class loader or {@code null}. + */ + public static ClassLoader deploymentClassLoader(GridKernalContext ctx, IgniteUuid ldrId) { + if (ldrId == null || !ctx.deploy().enabled()) + return null; + + GridDeployment dep = ctx.deploy().getDeployment(ldrId); + + return dep == null ? null : dep.classLoader(); + } + + /** + * Restores a deployment context for cache deployment. + * + * @param ctx Kernal context. + * @param ldrId Class loader id. + */ + public static void restoreDeploymentContext(GridKernalContext ctx, IgniteUuid ldrId) { + if (ctx.deploy().enabled() && ldrId != null) { + GridDeployment dep = ctx.deploy().getDeployment(ldrId); + + if (dep != null) { + try { + ctx.cache().context().deploy().p2pContext( + dep.classLoaderId().globalId(), + dep.classLoaderId(), + dep.userVersion(), + dep.deployMode(), + dep.participants() + ); + } + catch (IgnitePeerToPeerClassLoadingException e) { + ctx.log(ctx.cache().context().deploy().getClass()) + .error("Could not restore P2P context [ldrId=" + ldrId + ']', e); + } + } + } + } + /** * Formats passed date with specified pattern. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java index ada52760667f9a..21605907fa36e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java @@ -180,6 +180,32 @@ public IntHashMap(int cap) { return size() == 0; } + /** {@inheritDoc} */ + @Override public int[] keys() { + int[] keys = new int[size]; + + int idx = 0; + + for (Entry entry : entries) + if (entry != null) + keys[idx++] = entry.key; + + return keys; + } + + /** {@inheritDoc} */ + @Override public V[] values() { + V[] vals = (V[])new Object[size]; + + int idx = 0; + + for (Entry entry : entries) + if (entry != null) + vals[idx++] = entry.val; + + return vals; + } + /** {@inheritDoc} */ @Override public boolean containsKey(int key) { return find(key) >= 0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java index f1bbe51e555460..c60600399c7506 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java @@ -82,4 +82,10 @@ public interface EntryConsumer { * Returns true if this map contains no key-value mappings. */ boolean isEmpty(); + + /** Returns array of keys. */ + int[] keys(); + + /** Return array of values. */ + V[] values(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java index 8d379bb95eed20..52cffaa818f807 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java @@ -106,6 +106,28 @@ public IntRWHashMap() { return size() == 0; } + /** {@inheritDoc} */ + @Override public int[] keys() { + lock.readLock().lock(); + try { + return delegate.keys(); + } + finally { + lock.readLock().unlock(); + } + } + + /** {@inheritDoc} */ + @Override public V[] values() { + lock.readLock().lock(); + try { + return delegate.values(); + } + finally { + lock.readLock().unlock(); + } + } + /** {@inheritDoc} */ @Override public boolean containsKey(int key) { lock.readLock().lock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java b/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java index fe0fee5a99b112..65394537302b65 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java @@ -430,6 +430,16 @@ public enum DistributedProcessType { * * @see IgniteSnapshotManager */ - END_SNAPSHOT + END_SNAPSHOT, + + /** + * Cache group encyption key change prepare phase. + */ + CACHE_GROUP_KEY_CHANGE_PREPARE, + + /** + * Cache group encyption key change perform phase. + */ + CACHE_GROUP_KEY_CHANGE_FINISH } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java b/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java index 358313824cff95..a526764c759b0a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java @@ -1678,6 +1678,52 @@ public static String toString(String str, } } + /** + * Produces uniformed output of string with context properties + * + * @param str Output prefix or {@code null} if empty. + * @param triplets Triplets {@code {name, value, sencitivity}}. + * @return String presentation. + */ + public static String toString(String str, Object... triplets) { + if (triplets.length % 3 != 0) + throw new IllegalArgumentException("Array length must be a multiple of 3"); + + int propCnt = triplets.length / 3; + + Object[] propNames = new Object[propCnt]; + Object[] propVals = new Object[propCnt]; + boolean[] propSens = new boolean[propCnt]; + + for (int i = 0; i < propCnt; i++) { + Object name = triplets[i * 3]; + + assert name != null; + + propNames[i] = name; + + propVals[i] = triplets[i * 3 + 1]; + + Object sens = triplets[i * 3 + 2]; + + assert sens instanceof Boolean; + + propSens[i] = (Boolean)sens; + } + + SBLimitedLength sb = threadLocSB.get(); + + boolean newStr = sb.length() == 0; + + try { + return toStringImpl(str, sb, propNames, propVals, propSens, propCnt); + } + finally { + if (newStr) + sb.reset(); + } + } + /** * Creates an uniformed string presentation for the binary-like object. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java index 9f4b39f2db3f02..b4ee5d1a81cfb6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java @@ -20,12 +20,17 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import java.net.InetAddress; +import java.util.Collection; +import java.util.Collections; import java.util.Map; import org.apache.ignite.cluster.BaselineNode; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; import org.apache.ignite.internal.managers.discovery.IgniteClusterNode; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorDataTransferObject; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** @@ -44,6 +49,12 @@ public class VisorBaselineNode extends VisorDataTransferObject { /** */ private @Nullable Long order; + /** + * Resolved list of (ip, hostname) pairs + * (if ip has no resolved host, hostname will be the string representation of ip). + */ + private @NotNull Collection addrs = Collections.emptyList(); + /** * Default constructor. */ @@ -55,19 +66,22 @@ public VisorBaselineNode() { * Create data transfer object for baseline node. * * @param node Baseline node. + * @param resolvedInetAddrs List of resolved ip, hostnames pairs. */ - public VisorBaselineNode(BaselineNode node) { + public VisorBaselineNode(BaselineNode node, @NotNull Collection resolvedInetAddrs) { consistentId = String.valueOf(node.consistentId()); attrs = node.attributes(); //Baseline topology returns instances of DetachedClusternode - if (node instanceof IgniteClusterNode) + if (node instanceof IgniteClusterNode) { order = ((IgniteClusterNode)node).order(); + addrs = resolvedInetAddrs; + } } /** {@inheritDoc} */ @Override public byte getProtocolVersion() { - return V2; + return V3; } /** @@ -91,11 +105,20 @@ public Map getAttributes() { return order; } + /** + * + * @return Collection with resolved pairs ip->hostname + */ + public @NotNull Collection getAddrs() { + return addrs; + } + /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { U.writeString(out, consistentId); U.writeMap(out, attrs); out.writeObject(order); + U.writeCollection(out, addrs); } /** {@inheritDoc} */ @@ -105,10 +128,72 @@ public Map getAttributes() { if (protoVer >= V2) order = (Long)in.readObject(); + + if (protoVer >= V3) { + Collection inputAddrs = U.readCollection(in); + + if (inputAddrs != null) + addrs = inputAddrs; + } } /** {@inheritDoc} */ @Override public String toString() { return S.toString(VisorBaselineNode.class, this); } + + /** + * Simple data class for storing (hostname, address) pairs + */ + public static class ResolvedAddresses extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private String hostname; + + /** Textual representation of IP address. */ + private String addr; + + /** + * @param inetAddr Inet address. + */ + ResolvedAddresses(InetAddress inetAddr) { + this.hostname = inetAddr.getHostName(); + this.addr = inetAddr.getHostAddress(); + } + + /** + * Default constructor. + */ + public ResolvedAddresses() { + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeString(out, hostname); + U.writeString(out, addr); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) + throws IOException, ClassNotFoundException { + hostname = U.readString(in); + addr = U.readString(in); + } + + /** + * @return Hostname. + */ + public String hostname() { + return hostname; + } + + /** + * @return Textual representation of IP address. + */ + public String address() { + return addr; + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java index 769100828217ab..b283e6bec0b185 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java @@ -20,10 +20,20 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; import java.util.Map; +import java.util.Set; import java.util.TreeMap; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.BaselineNode; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.managers.discovery.IgniteClusterNode; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -75,7 +85,7 @@ private static Map toMap(Collection map = new TreeMap<>(); for (BaselineNode node : nodes) { - VisorBaselineNode dto = new VisorBaselineNode(node); + VisorBaselineNode dto = new VisorBaselineNode(node, Collections.emptyList()); map.put(dto.getConsistentId(), dto); } @@ -83,6 +93,80 @@ private static Map toMap(Collectionhostname pairs. + */ + private static Map toMapWithResolvedAddresses(Collection nodes) { + if (F.isEmpty(nodes)) + return null; + + Map map = new TreeMap<>(); + + for (BaselineNode node : nodes) { + Collection addrs = new ArrayList<>(); + + if (node instanceof IgniteClusterNode) { + for (InetAddress inetAddress: resolveInetAddresses((ClusterNode)node)) + addrs.add(new VisorBaselineNode.ResolvedAddresses(inetAddress)); + } + + VisorBaselineNode dto = new VisorBaselineNode(node, addrs); + + map.put(dto.getConsistentId(), dto); + } + + return map; + } + + /** + * @return Resolved inet addresses of node + */ + private static Collection resolveInetAddresses(ClusterNode node) { + Set res = new HashSet<>(node.addresses().size()); + + Iterator hostNamesIt = node.hostNames().iterator(); + + for (String addr : node.addresses()) { + String hostName = hostNamesIt.hasNext() ? hostNamesIt.next() : null; + + InetAddress inetAddr = null; + + if (!F.isEmpty(hostName)) { + try { + if (IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_TEST_ENV)) { + // 127.0.0.1.hostname will be resolved to 127.0.0.1 + if (hostName.endsWith(".hostname")) { + String ipStr = hostName.substring(0, hostName.length() - ".hostname".length()); + inetAddr = InetAddress.getByAddress(hostName, InetAddress.getByName(ipStr).getAddress()); + } + } + else + inetAddr = InetAddress.getByName(hostName); + } + catch (UnknownHostException ignored) { + } + } + + if (inetAddr == null || inetAddr.isLoopbackAddress()) { + try { + if (IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_TEST_ENV)) + // 127.0.0.1 will be reverse-resolved to 127.0.0.1.hostname + inetAddr = InetAddress.getByAddress(addr + ".hostname", InetAddress.getByName(addr).getAddress()); + else + inetAddr = InetAddress.getByName(addr); + } + catch (UnknownHostException ignored) { + } + } + + if (inetAddr != null) + res.add(inetAddr); + } + + return res; + } + /** * Constructor. * @@ -104,7 +188,7 @@ public VisorBaselineTaskResult( this.active = active; this.topVer = topVer; this.baseline = toMap(baseline); - this.servers = toMap(servers); + this.servers = toMapWithResolvedAddresses(servers); this.autoAdjustSettings = autoAdjustSettings; this.remainingTimeToBaselineAdjust = remainingTimeToBaselineAdjust; this.baselineAdjustInProgress = baselineAdjustInProgress; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java new file mode 100644 index 00000000000000..37d5096ef1c7ef --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * Visor encrypted cache group multinode task. + * + * @param The type of the task result. + */ +public abstract class VisorCacheGroupEncryptionTask extends VisorMultiNodeTask, VisorCacheGroupEncryptionTask.VisorSingleFieldDto> +{ + /** {@inheritDoc} */ + @Nullable @Override protected VisorCacheGroupEncryptionTaskResult reduce0(List results) { + Map jobResults = new HashMap<>(); + Map exceptions = new HashMap<>(); + + for (ComputeJobResult res : results) { + UUID nodeId = res.getNode().id(); + + if (res.getException() != null) { + exceptions.put(nodeId, res.getException()); + + continue; + } + + VisorSingleFieldDto dtoRes = res.getData(); + + jobResults.put(nodeId, dtoRes.value()); + } + + return new VisorCacheGroupEncryptionTaskResult<>(jobResults, exceptions); + } + + /** */ + protected abstract static class VisorSingleFieldDto extends IgniteDataTransferObject { + /** Object value. */ + private T val; + + /** + * @return Object value. + */ + protected T value() { + return val; + } + + /** + * @param val Data object. + * @return {@code this} for chaining. + */ + protected VisorSingleFieldDto value(T val) { + this.val = val; + + return this; + } + } + + /** + * @param Type of job result. + */ + protected abstract static class VisorReencryptionBaseJob + extends VisorJob> { + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionBaseJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run(VisorCacheGroupEncryptionTaskArg arg) throws IgniteException { + try { + String grpName = arg.groupName(); + CacheGroupContext grp = ignite.context().cache().cacheGroup(CU.cacheId(grpName)); + + if (grp == null) { + IgniteInternalCache cache = ignite.context().cache().cache(grpName); + + if (cache == null) + throw new IgniteException("Cache group " + grpName + " not found."); + + grp = cache.context().group(); + + if (grp.sharedGroup()) { + throw new IgniteException("Cache or group \"" + grpName + "\" is a part of group \"" + + grp.name() + "\". Provide group name instead of cache name for shared groups."); + } + } + + return run0(grp); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** + * Executes internal logic of the job. + * + * @param grp Cache group. + * @return Result. + * @throws IgniteCheckedException In case of error. + */ + protected abstract VisorSingleFieldDto run0(CacheGroupContext grp) throws IgniteCheckedException; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskArg.java new file mode 100644 index 00000000000000..ecea9ed0907093 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskArg.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Cache group aware task argument. + */ +public class VisorCacheGroupEncryptionTaskArg extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Cache group name. */ + private String grpName; + + /** Default constructor. */ + public VisorCacheGroupEncryptionTaskArg() { + // No-op. + } + + /** + * @param grpName Cache group name. + */ + public VisorCacheGroupEncryptionTaskArg(String grpName) { + this.grpName = grpName; + } + + /** @return Cache group name. */ + public String groupName() { + return grpName; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeString(out, grpName); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + grpName = U.readString(in); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorCacheGroupEncryptionTaskArg.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java new file mode 100644 index 00000000000000..f29a99b2957422 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Collections; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Multinode cache group encryption task result. + * + * @param Job result type. + */ +@SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") +public class VisorCacheGroupEncryptionTaskResult extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Per node job result. */ + @GridToStringInclude + private Map results; + + /** Per node execution problems. */ + @GridToStringInclude + private Map exceptions; + + /** + * @param results Per node job result. + * @param exceptions Per node execution problems. + */ + public VisorCacheGroupEncryptionTaskResult(Map results, Map exceptions) { + this.results = results; + this.exceptions = exceptions; + } + + /** */ + public VisorCacheGroupEncryptionTaskResult() { + // No-op. + } + + /** @return Per node job result. */ + public Map results() { + return results == null ? Collections.emptyMap() : results; + } + + /** @return Per node execution problems. */ + public Map exceptions() { + return exceptions == null ? Collections.emptyMap() : exceptions; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeMap(out, results); + U.writeMap(out, exceptions); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + results = U.readMap(in); + exceptions = U.readMap(in); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorCacheGroupEncryptionTaskResult.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java new file mode 100644 index 00000000000000..d6659412f6427c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.Collection; +import java.util.Collections; +import org.apache.ignite.IgniteEncryption; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorOneNodeTask; + +/** + * The task for changing the encryption key of the cache group. + * + * @see IgniteEncryption#changeCacheGroupKey(Collection) + */ +public class VisorChangeCacheGroupKeyTask extends VisorOneNodeTask { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job(VisorCacheGroupEncryptionTaskArg arg) { + return new VisorChangeCacheGroupKeyJob(arg, debug); + } + + /** The job for changing the encryption key of the cache group. */ + private static class VisorChangeCacheGroupKeyJob extends VisorJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorChangeCacheGroupKeyJob(VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected Void run(VisorCacheGroupEncryptionTaskArg taskArg) throws IgniteException { + ignite.encryption().changeCacheGroupKey(Collections.singleton(taskArg.groupName())).get(); + + return null; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java new file mode 100644 index 00000000000000..ca5a25475fa90a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorJob; +import org.jetbrains.annotations.Nullable; + +/** + * Get current encryption key IDs of the cache group. + */ +@GridInternal +public class VisorEncryptionKeyIdsTask extends VisorCacheGroupEncryptionTask> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob>> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorEncryptionKeyIdsJob(arg, debug); + } + + /** The job for get current encryption key IDs of the cache group. */ + private static class VisorEncryptionKeyIdsJob extends VisorReencryptionBaseJob> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorEncryptionKeyIdsJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto> run0(CacheGroupContext grp) { + return new VisorEncryptionKeyIdsResult().value(ignite.context().encryption().groupKeyIds(grp.groupId())); + } + } + + /** */ + protected static class VisorEncryptionKeyIdsResult extends VisorSingleFieldDto> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** */ + public VisorEncryptionKeyIdsResult() { + // No-op. + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeCollection(out, value()); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + value(U.readList(in)); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java new file mode 100644 index 00000000000000..425fab52eb2e27 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * View/change cache group re-encryption rate limit . + */ +@GridInternal +public class VisorReencryptionRateTask extends VisorMultiNodeTask, VisorReencryptionRateTask.ReencryptionRateJobResult> +{ + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job( + VisorReencryptionRateTaskArg arg) { + return new VisorReencryptionRateJob(arg, debug); + } + + /** {@inheritDoc} */ + @Nullable @Override protected VisorCacheGroupEncryptionTaskResult reduce0(List results) { + Map jobResults = new HashMap<>(); + Map exceptions = new HashMap<>(); + + for (ComputeJobResult res : results) { + UUID nodeId = res.getNode().id(); + + if (res.getException() != null) { + exceptions.put(nodeId, res.getException()); + + continue; + } + + ReencryptionRateJobResult dtoRes = res.getData(); + + jobResults.put(nodeId, dtoRes.limit()); + } + + return new VisorCacheGroupEncryptionTaskResult<>(jobResults, exceptions); + } + + /** The job for view/change cache group re-encryption rate limit. */ + private static class VisorReencryptionRateJob + extends VisorJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionRateJob(VisorReencryptionRateTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected ReencryptionRateJobResult run(VisorReencryptionRateTaskArg arg) throws IgniteException { + double prevRate = ignite.context().encryption().getReencryptionRate(); + + if (arg.rate() != null) + ignite.context().encryption().setReencryptionRate(arg.rate()); + + return new ReencryptionRateJobResult(prevRate); + } + } + + /** */ + protected static class ReencryptionRateJobResult extends IgniteDataTransferObject { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Re-encryption rate limit. */ + private Double limit; + + /** */ + public ReencryptionRateJobResult() { + // No-op. + } + + /** */ + public ReencryptionRateJobResult(Double limit) { + this.limit = limit; + } + + /** + * @return Re-encryption rate limit. + */ + public Double limit() { + return limit; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeDouble(limit); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + limit = in.readDouble(); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTaskArg.java new file mode 100644 index 00000000000000..2471296fcc65a9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTaskArg.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.jetbrains.annotations.Nullable; + +/** + * Re-encryption rate task argument. + */ +public class VisorReencryptionRateTaskArg extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Re-encryption rate limit in megabytes per second. */ + private Double rate; + + /** Default constructor. */ + public VisorReencryptionRateTaskArg() { + // No-op. + } + + /** + * @param rate Re-encryption rate limit in megabytes per second. + */ + public VisorReencryptionRateTaskArg(Double rate) { + this.rate = rate; + } + + /** + * @return Re-encryption rate limit in megabytes per second. + */ + public @Nullable Double rate() { + return rate; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeBoolean(rate != null); + + if (rate != null) + out.writeDouble(rate); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + if (in.readBoolean()) + rate = in.readDouble(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorReencryptionRateTaskArg.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java new file mode 100644 index 00000000000000..171130c26d6dfa --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask.VisorReencryptionSuspendResumeJobResult; +import org.jetbrains.annotations.Nullable; + +/** + * Resume re-encryption of the cache group. + */ +@GridInternal +public class VisorReencryptionResumeTask extends VisorCacheGroupEncryptionTask { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorReencryptionResumeJob(arg, debug); + } + + /** The job to resume re-encryption of the cache group. */ + private static class VisorReencryptionResumeJob extends VisorReencryptionBaseJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionResumeJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run0(CacheGroupContext grp) throws IgniteCheckedException { + return new VisorReencryptionSuspendResumeJobResult().value( + ignite.context().encryption().resumeReencryption(grp.groupId())); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java new file mode 100644 index 00000000000000..df6004747382b8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.visor.VisorJob; +import org.jetbrains.annotations.Nullable; + +/** + * Get re-encryption status of the cache group. + */ +@GridInternal +public class VisorReencryptionStatusTask extends VisorCacheGroupEncryptionTask { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorReencryptionStatusJob(arg, debug); + } + + /** The job to get re-encryption status of the cache group. */ + private static class VisorReencryptionStatusJob extends VisorReencryptionBaseJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionStatusJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run0(CacheGroupContext grp) { + long res; + + if (!ignite.context().encryption().reencryptionInProgress(grp.groupId())) + res = -1; + else + res = ignite.context().encryption().getBytesLeftForReencryption(grp.groupId()); + + return new VisorReencryptionStatusResult().value(res); + } + } + + /** */ + protected static class VisorReencryptionStatusResult extends VisorSingleFieldDto { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** */ + public VisorReencryptionStatusResult() { + // No-op. + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeLong(value()); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + value(in.readLong()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java new file mode 100644 index 00000000000000..edbfd464893c9f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.visor.VisorJob; +import org.jetbrains.annotations.Nullable; + +/** + * Suspend re-encryption of the cache group. + */ +@GridInternal +public class VisorReencryptionSuspendTask extends VisorCacheGroupEncryptionTask { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorReencryptionSuspendJob(arg, debug); + } + + /** The job to suspend re-encryption of the cache group. */ + private static class VisorReencryptionSuspendJob extends VisorReencryptionBaseJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionSuspendJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run0(CacheGroupContext grp) throws IgniteCheckedException { + return new VisorReencryptionSuspendResumeJobResult().value( + ignite.context().encryption().suspendReencryption(grp.groupId())); + } + } + + /** */ + protected static class VisorReencryptionSuspendResumeJobResult extends VisorSingleFieldDto { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** */ + public VisorReencryptionSuspendResumeJobResult() { + // No-op. + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeBoolean(value()); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + value(in.readBoolean()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java index bcab3186a89d93..dffe8404604265 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java @@ -57,6 +57,9 @@ public class VisorPersistenceMetrics extends VisorDataTransferObject { /** */ private long lastCpDuration; + /** */ + private long lastCpStart; + /** */ private long lastCpLockWaitDuration; @@ -136,6 +139,7 @@ public VisorPersistenceMetrics(DataStorageMetrics m) { cpTotalTm = m.getCheckpointTotalTime(); lastCpDuration = m.getLastCheckpointDuration(); + lastCpStart = m.getLastCheckpointStarted(); lastCpLockWaitDuration = m.getLastCheckpointLockWaitDuration(); lastCpMmarkDuration = m.getLastCheckpointMarkDuration(); lastCpPagesWriteDuration = m.getLastCheckpointPagesWriteDuration(); @@ -225,6 +229,15 @@ public long getLastCheckpointingDuration() { return lastCpDuration; } + /** + * Returns time when the last checkpoint was started. + * + * @return Time when the last checkpoint was started. + * */ + public long getLastCheckpointStarted() { + return lastCpStart; + } + /** * @return Checkpoint lock wait time in milliseconds. */ @@ -360,7 +373,7 @@ public long getSparseStorageSize() { /** {@inheritDoc} */ @Override public byte getProtocolVersion() { - return V3; + return V4; } /** {@inheritDoc} */ @@ -397,6 +410,9 @@ public long getSparseStorageSize() { // V3 out.writeLong(storageSize); out.writeLong(sparseStorageSize); + + // V4 + out.writeLong(lastCpStart); } /** {@inheritDoc} */ @@ -435,6 +451,9 @@ public long getSparseStorageSize() { storageSize = in.readLong(); sparseStorageSize = in.readLong(); } + + if (protoVer > V3) + lastCpStart = in.readLong(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupSettings.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupSettings.java new file mode 100644 index 00000000000000..a5bf3278c45aab --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupSettings.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; + +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** */ +public class PersistenceCleanAndBackupSettings extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private PersistenceCleanAndBackupType cleanAndBackupType; + + /** */ + private List cacheNames; + + /** */ + public PersistenceCleanAndBackupSettings() { + // No-op. + } + + /** */ + public PersistenceCleanAndBackupSettings(PersistenceCleanAndBackupType cleanAndBackupType, List cacheNames) { + this.cleanAndBackupType = cleanAndBackupType; + this.cacheNames = cacheNames; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeEnum(out, cleanAndBackupType); + U.writeCollection(out, cacheNames); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + cleanAndBackupType = PersistenceCleanAndBackupType.fromOrdinal(in.readByte()); + cacheNames = U.readList(in); + } + + /** */ + public PersistenceCleanAndBackupType cleanAndBackupType() { + return cleanAndBackupType; + } + + /** */ + public List cacheNames() { + return cacheNames; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupType.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupType.java new file mode 100644 index 00000000000000..21988516bd8293 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupType.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import org.jetbrains.annotations.Nullable; + +/** */ +public enum PersistenceCleanAndBackupType { + /** */ + ALL, + /** */ + CORRUPTED, + /** */ + CACHES; + + /** */ + private static final PersistenceCleanAndBackupType[] VALS = values(); + + /** + * @param ordinal Index of enum value. + * @return Value of {@link PersistenceCleanAndBackupType} enum. + */ + @Nullable public static PersistenceCleanAndBackupType fromOrdinal(int ordinal) { + return ordinal >= 0 && ordinal < VALS.length ? VALS[ordinal] : null; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceOperation.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceOperation.java new file mode 100644 index 00000000000000..2481af85f30952 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceOperation.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import org.jetbrains.annotations.Nullable; + +/** Persistence cleaning operations. */ +public enum PersistenceOperation { + /** */ + INFO, + + /** */ + CLEAN, + + /** */ + BACKUP; + + /** */ + private static final PersistenceOperation[] VALS = values(); + + /** + * @param ordinal Index of enum value. + * @return Value of {@link PersistenceOperation} enum. + */ + @Nullable public static PersistenceOperation fromOrdinal(int ordinal) { + return ordinal >= 0 && ordinal < VALS.length ? VALS[ordinal] : null; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java new file mode 100644 index 00000000000000..823126d3b5b61d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java @@ -0,0 +1,402 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; +import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; +import org.apache.ignite.internal.processors.cache.GridCacheProcessor; +import org.apache.ignite.internal.processors.cache.persistence.CheckCorruptedCacheStoresCleanAction; +import org.apache.ignite.internal.processors.cache.persistence.CleanCacheStoresMaintenanceAction; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.processors.task.GridVisorManagementTask; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorOneNodeTask; +import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.maintenance.MaintenanceAction; +import org.apache.ignite.maintenance.MaintenanceRegistry; +import org.apache.ignite.maintenance.MaintenanceTask; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.CORRUPTED_DATA_FILES_MNTC_TASK_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.cacheDirName; + +/** */ +@GridInternal +@GridVisorManagementTask +public class PersistenceTask extends VisorOneNodeTask { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private static final String BACKUP_FOLDER_PREFIX = "backup_"; + + @Override protected VisorJob job(PersistenceTaskArg arg) { + return new PersistenceJob(arg, debug); + } + + /** */ + private static class PersistenceJob extends VisorJob { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected PersistenceJob(@Nullable PersistenceTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected PersistenceTaskResult run(@Nullable PersistenceTaskArg arg) throws IgniteException { + if (!ignite.context().maintenanceRegistry().isMaintenanceMode()) + return new PersistenceTaskResult(false); + + switch (arg.operation()) { + case CLEAN: + return clean(arg); + + case BACKUP: + return backup(arg); + + default: + return info(); + } + } + + /** */ + private PersistenceTaskResult backup(PersistenceTaskArg arg) { + PersistenceCleanAndBackupSettings backupSettings = arg.cleanAndBackupSettings(); + + MaintenanceRegistry mntcReg = ignite.context().maintenanceRegistry(); + MaintenanceTask task = mntcReg.activeMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + File workDir = ((FilePageStoreManager) ignite.context().cache().context().pageStore()).workDir(); + + switch (backupSettings.cleanAndBackupType()) { + case ALL: + return backupAll(workDir); + + case CORRUPTED: + return backupCaches(workDir, corruptedCacheDirectories(task)); + + default: + return backupCaches(workDir, cacheDirectoriesFromCacheNames(backupSettings.cacheNames())); + } + } + + /** */ + private PersistenceTaskResult backupAll(File workDir) { + GridCacheProcessor cacheProc = ignite.context().cache(); + + List allCacheDirs = cacheProc.cacheDescriptors() + .values() + .stream() + .map(desc -> cacheDirName(desc.cacheConfiguration())) + .distinct() + .collect(Collectors.toList()); + + return backupCaches(workDir, allCacheDirs); + } + + /** */ + private PersistenceTaskResult backupCaches(File workDir, List cacheDirs) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List backupCompletedCaches = new ArrayList<>(); + List backupFailedCaches = new ArrayList<>(); + + for (String dir : cacheDirs) { + String backupDirName = BACKUP_FOLDER_PREFIX + dir; + + File backupDir = new File(workDir, backupDirName); + + if (!backupDir.exists()) { + try { + U.ensureDirectory(backupDir, backupDirName, null); + + copyCacheFiles(workDir.toPath().resolve(dir).toFile(), backupDir); + + backupCompletedCaches.add(backupDirName); + } catch (IgniteCheckedException | IOException e) { + backupFailedCaches.add(dir); + } + } + } + + res.handledCaches(backupCompletedCaches); + res.failedCaches(backupFailedCaches); + + return res; + } + + /** */ + private void copyCacheFiles(File sourceDir, File backupDir) throws IOException { + for (File f : sourceDir.listFiles()) + Files.copy(f.toPath(), backupDir.toPath().resolve(f.getName()), StandardCopyOption.REPLACE_EXISTING); + } + + /** */ + private PersistenceTaskResult clean(PersistenceTaskArg arg) { + PersistenceTaskResult res = new PersistenceTaskResult(); + + PersistenceCleanAndBackupSettings cleanSettings = arg.cleanAndBackupSettings(); + + GridCacheProcessor cacheProc = ignite.context().cache(); + MaintenanceRegistry mntcReg = ignite.context().maintenanceRegistry(); + + switch (cleanSettings.cleanAndBackupType()) { + case ALL: + return cleanAll(cacheProc, mntcReg); + + case CORRUPTED: + return cleanCorrupted(mntcReg); + + case CACHES: + return cleanCaches(cacheProc, mntcReg, cleanSettings.cacheNames()); + } + + return res; + } + + /** */ + private PersistenceTaskResult cleanCaches( + GridCacheProcessor cacheProc, + MaintenanceRegistry mntcReg, + List cacheNames + ) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List cleanedCaches = new ArrayList<>(); + List failedToCleanCaches = new ArrayList<>(); + + DataStorageConfiguration dsCfg = ignite.context().config().getDataStorageConfiguration(); + IgnitePageStoreManager pageStore = cacheProc.context().pageStore(); + + AtomicReference missedCache = new AtomicReference<>(); + + Boolean allExist = cacheNames + .stream() + .map(name -> { + if (cacheProc.cacheDescriptor(name) != null) + return true; + else { + missedCache.set(name); + + return false; + } + }) + .reduce(true, (t, u) -> t && u); + + if (!allExist) + throw new IllegalArgumentException("Cache with name " + missedCache.get() + + " not found, no caches will be cleaned."); + + for (String name : cacheNames) { + DynamicCacheDescriptor cacheDescr = cacheProc.cacheDescriptor(name); + + if (CU.isPersistentCache(cacheDescr.cacheConfiguration(), dsCfg)) { + try { + pageStore.cleanupPersistentSpace(cacheDescr.cacheConfiguration()); + + cleanedCaches.add(cacheDirName(cacheDescr.cacheConfiguration())); + } + catch (IgniteCheckedException e) { + failedToCleanCaches.add(name); + } + } + } + + res.handledCaches(cleanedCaches); + + if (!failedToCleanCaches.isEmpty()) + res.failedCaches(failedToCleanCaches); + + List> actions = mntcReg.actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + Optional> checkActionOpt = actions.stream() + .filter(a -> a.name().equals(CheckCorruptedCacheStoresCleanAction.ACTION_NAME)) + .findFirst(); + + if (checkActionOpt.isPresent()) { + MaintenanceAction action = (MaintenanceAction)checkActionOpt.get(); + + Boolean mntcTaskCompleted = action.execute(); + + res.maintenanceTaskCompleted(mntcTaskCompleted); + + if (mntcTaskCompleted) + mntcReg.unregisterMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + } + + return res; + } + + /** */ + private PersistenceTaskResult cleanAll(GridCacheProcessor cacheProc, MaintenanceRegistry mntcReg) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List allCacheDirs = cacheProc.cacheDescriptors() + .values() + .stream() + .map(desc -> cacheDirName(desc.cacheConfiguration())) + .collect(Collectors.toList()); + + try { + cacheProc.cleanupCachesDirectories(); + } catch (IgniteCheckedException e) { + throw U.convertException(e); + } + + mntcReg.unregisterMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + res.maintenanceTaskCompleted(true); + res.handledCaches(allCacheDirs); + + return res; + } + + /** */ + private PersistenceTaskResult cleanCorrupted(MaintenanceRegistry mntcReg) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List> actions = mntcReg + .actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + Optional> cleanCorruptedActionOpt = actions + .stream() + .filter(a -> a.name().equals(CleanCacheStoresMaintenanceAction.ACTION_NAME)) + .findFirst(); + + if (cleanCorruptedActionOpt.isPresent()) { + cleanCorruptedActionOpt.get().execute(); + + MaintenanceTask corruptedTask = mntcReg.activeMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + mntcReg.unregisterMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + res.handledCaches( + corruptedCacheDirectories(corruptedTask) + ); + + res.maintenanceTaskCompleted(true); + } + + return res; + } + + /** */ + private PersistenceTaskResult info() { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + GridCacheProcessor cacheProc = ignite.context().cache(); + DataStorageConfiguration dsCfg = ignite.context().config().getDataStorageConfiguration(); + + List corruptedCacheNames = corruptedCacheDirectories(ignite.context().maintenanceRegistry() + .activeMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME)); + + Map> cachesInfo = new HashMap<>(); + + for (DynamicCacheDescriptor desc : cacheProc.cacheDescriptors().values()) { + if (!CU.isPersistentCache(desc.cacheConfiguration(), dsCfg)) + continue; + + CacheGroupDescriptor grpDesc = desc.groupDescriptor(); + + if (grpDesc != null) { + boolean globalWalEnabled = grpDesc.walEnabled(); + boolean localWalEnabled = true; + + if (globalWalEnabled && corruptedCacheNames.contains(desc.cacheName())) + localWalEnabled = false; + + cachesInfo.put(desc.cacheName(), new IgniteBiTuple<>(globalWalEnabled, localWalEnabled)); + } + } + + res.cachesInfo(cachesInfo); + + return res; + } + + /** */ + private List corruptedCacheDirectories(MaintenanceTask task) { + String params = task.parameters(); + + String[] namesArr = params.split(Pattern.quote(File.separator)); + + return Arrays.asList(namesArr); + } + + /** */ + private List cacheDirectoriesFromCacheNames(List cacheNames) { + GridCacheProcessor cacheProc = ignite.context().cache(); + + DataStorageConfiguration dsCfg = ignite.configuration().getDataStorageConfiguration(); + + AtomicReference missedCache = new AtomicReference<>(); + + Boolean allExist = cacheNames.stream() + .map(s -> { + if (cacheProc.cacheDescriptor(s) != null) + return true; + else { + missedCache.set(s); + + return false; + } + }) + .reduce(true, (u, v) -> u && v); + + if (!allExist) + throw new IllegalArgumentException("Cache with name " + missedCache.get() + + " not found, no caches will be backed up."); + + return cacheNames.stream() + .filter(s -> cacheProc.cacheDescriptor(s) != null) + .filter(s -> + CU.isPersistentCache(cacheProc.cacheDescriptor(s).cacheConfiguration(), dsCfg)) + .map(s -> cacheProc.cacheDescriptor(s).cacheConfiguration()) + .map(FilePageStoreManager::cacheDirName) + .distinct() + .collect(Collectors.toList()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskArg.java new file mode 100644 index 00000000000000..c48f936764193e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskArg.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; + +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * + */ +public class PersistenceTaskArg extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private PersistenceOperation op; + + /** */ + private PersistenceCleanAndBackupSettings cleanAndBackupSettings; + + /** + * Default constructor. + */ + public PersistenceTaskArg() { + // No-op. + } + + /** + * @param op {@link PersistenceOperation} requested for execution. + * @param cleanAndBackupSettings {@link PersistenceCleanAndBackupSettings} specific settings for clean and backup + * commands. + */ + public PersistenceTaskArg(PersistenceOperation op, PersistenceCleanAndBackupSettings cleanAndBackupSettings) { + this.op = op; + this.cleanAndBackupSettings = cleanAndBackupSettings; + } + + /** + * @return {@link PersistenceOperation} operation requested for execution. + */ + public PersistenceOperation operation() { + return op; + } + + /** + * @return {@link PersistenceCleanAndBackupSettings} specific settings for clean and backup commands. + */ + public PersistenceCleanAndBackupSettings cleanAndBackupSettings() { + return cleanAndBackupSettings; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeEnum(out, op); + out.writeObject(cleanAndBackupSettings); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + op = PersistenceOperation.fromOrdinal(in.readByte()); + cleanAndBackupSettings = (PersistenceCleanAndBackupSettings) in.readObject(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskResult.java new file mode 100644 index 00000000000000..5a0a0fedb19fda --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskResult.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; +import java.util.Map; + +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteBiTuple; + +public class PersistenceTaskResult extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private boolean inMaintenanceMode; + + /** */ + private boolean maintenanceTaskCompleted; + + /** */ + private List handledCaches; + + /** */ + private List failedToHandleCaches; + + /** */ + private Map> cachesInfo; + + /** */ + public PersistenceTaskResult() { + // No-op. + } + + /** + * + */ + public PersistenceTaskResult(boolean inMaintenanceMode) { + this.inMaintenanceMode = inMaintenanceMode; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeBoolean(inMaintenanceMode); + out.writeBoolean(maintenanceTaskCompleted); + U.writeCollection(out, handledCaches); + U.writeCollection(out, failedToHandleCaches); + U.writeMap(out, cachesInfo); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + inMaintenanceMode = in.readBoolean(); + maintenanceTaskCompleted = in.readBoolean(); + handledCaches = U.readList(in); + failedToHandleCaches = U.readList(in); + cachesInfo = U.readMap(in); + } + + /** */ + public boolean inMaintenanceMode() { + return inMaintenanceMode; + } + + /** */ + public boolean maintenanceTaskCompleted() { + return maintenanceTaskCompleted; + } + + /** */ + public void maintenanceTaskCompleted(boolean maintenanceTaskCompleted) { + this.maintenanceTaskCompleted = maintenanceTaskCompleted; + } + + /** */ + public List handledCaches() { + return handledCaches; + } + + /** */ + public void handledCaches(List handledCaches) { + this.handledCaches = handledCaches; + } + + /** */ + public List failedCaches() { + return failedToHandleCaches; + } + + /** */ + public void failedCaches(List failedToHandleCaches) { + this.failedToHandleCaches = failedToHandleCaches; + } + + /** */ + public Map> cachesInfo() { + return cachesInfo; + } + + /** */ + public void cachesInfo(Map> cachesInfo) { + this.cachesInfo = cachesInfo; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java index 03f9ecd986d571..5fecd01189244c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java @@ -1107,7 +1107,7 @@ public static boolean joinTimedOut(String msg) { * IPv4, private IPv4, IPv4 local host, IPv6. * Lower addresses first. */ - private static class SortableAddress implements Comparable { + public static class SortableAddress implements Comparable { /** */ private int type; @@ -1122,7 +1122,7 @@ private static class SortableAddress implements Comparable { * * @param addr Address as string. */ - private SortableAddress(String addr) { + public SortableAddress(String addr) { this.addr = addr; if (addr.indexOf(':') > 0) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java b/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java index 5829b3cd97dfae..7af557dc45433d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java @@ -229,8 +229,6 @@ public void setSystemWorkerBlockedTimeout(long val) { "[workerName=" + worker.name() + ", threadName=" + runner.getName() + ", blockedFor=" + heartbeatDelay / 1000 + "s]"); - U.dumpThread(worker.runner(), log); - workerFailedHnd.apply(worker, SYSTEM_WORKER_BLOCKED); } diff --git a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java index b264700e969e66..9cebef0e09a2d2 100644 --- a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java +++ b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java @@ -18,9 +18,9 @@ package org.apache.ignite.maintenance; import java.util.List; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.util.lang.IgniteThrowableFunction; import org.apache.ignite.lang.IgniteExperimental; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -142,7 +142,7 @@ public interface MaintenanceRegistry { * * @throws IgniteException if no Maintenance Tasks are registered for provided name. */ - public List actionsForMaintenanceTask(String maintenanceTaskName); + public List> actionsForMaintenanceTask(String maintenanceTaskName); /** * Examine all components if they need to execute maintenance actions. @@ -154,4 +154,22 @@ public interface MaintenanceRegistry { * and their {@link MaintenanceAction maintenance actions} are not executed. */ public void prepareAndExecuteMaintenance(); + + /** + * Call the {@link #registerWorkflowCallback(String, MaintenanceWorkflowCallback)} if the active maintenance task + * with given name exists. + * + * @param maintenanceTaskName name of {@link MaintenanceTask} this callback is registered for. + * @param workflowCalProvider provider of {@link MaintenanceWorkflowCallback} which construct the callback by given + * task. + */ + public default void registerWorkflowCallbackIfTaskExists( + @NotNull String maintenanceTaskName, + @NotNull IgniteThrowableFunction workflowCalProvider + ) throws IgniteCheckedException { + MaintenanceTask task = activeMaintenanceTask(maintenanceTaskName); + + if (task != null) + registerWorkflowCallback(maintenanceTaskName, workflowCalProvider.apply(task)); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java index 49795f1a92cf70..c600952866592f 100644 --- a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java +++ b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java @@ -24,8 +24,8 @@ /** * Represents request to handle maintenance situation. * - * It can be created automatically or by user request by any component needed maintenance and should be registered - * in Maintenance Registry with the method {@link MaintenanceRegistry#registerMaintenanceTask(MaintenanceTask)}. + * Maintenance request can be created programmatically + * with {@link MaintenanceRegistry#registerMaintenanceTask(MaintenanceTask)} public API call. * * Lifecycle of Maintenance Task is managed by {@link MaintenanceRegistry}. * diff --git a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java index 26ba2a1ff14b35..340bb530fcfbdf 100644 --- a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java +++ b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java @@ -18,7 +18,6 @@ package org.apache.ignite.maintenance; import java.util.List; - import org.apache.ignite.lang.IgniteExperimental; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -52,7 +51,7 @@ public interface MaintenanceWorkflowCallback { * * @return Not null and non-empty {@link List} of {@link MaintenanceAction}. */ - @NotNull public List allActions(); + @NotNull public List> allActions(); /** * Component can provide optional {@link MaintenanceAction} that will be executed automatically @@ -64,5 +63,5 @@ public interface MaintenanceWorkflowCallback { * @return {@link MaintenanceAction} for automatic execution or null if maintenance situation * should not be fixed automatically. */ - @Nullable public MaintenanceAction automaticAction(); + @Nullable public MaintenanceAction automaticAction(); } diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java index f48771f38ecc04..be79ec836e66da 100644 --- a/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java @@ -76,6 +76,10 @@ public interface DataStorageMetricsMXBean extends DataStorageMetrics { @MXBeanDescription("Duration of the last checkpoint in milliseconds.") @Override long getLastCheckpointDuration(); + /** {@inheritDoc} */ + @MXBeanDescription("Time when the last checkpoint was started.") + @Override long getLastCheckpointStarted(); + /** {@inheritDoc} */ @MXBeanDescription("Duration of the checkpoint lock wait in milliseconds.") @Override long getLastCheckpointLockWaitDuration(); diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java index 9db104971ae558..5aaca05e47c86d 100644 --- a/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java @@ -17,6 +17,7 @@ package org.apache.ignite.mxbean; +import java.util.Collection; import org.apache.ignite.IgniteEncryption; /** @@ -43,4 +44,15 @@ public interface EncryptionMXBean { public void changeMasterKey( @MXBeanParameter(name = "masterKeyName", description = "Master key name.") String masterKeyName ); + + /** + * Starts cache group encryption key change process. + * + * @param cacheOrGrpName Cache or group name. + * @see IgniteEncryption#changeCacheGroupKey(Collection) + */ + @MXBeanDescription("Change cache group key.") + public void changeCacheGroupKey( + @MXBeanParameter(name = "cacheOrGroupName", description = "Cache or group name.") String cacheOrGrpName + ); } diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index d0c8e8e49d9890..75b55b95bd474f 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -71,6 +71,7 @@ import org.apache.ignite.cache.CacheMetrics; import org.apache.ignite.cluster.ClusterMetrics; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.events.NodeValidationFailedEvent; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteFeatures; @@ -215,12 +216,12 @@ class ServerImpl extends TcpDiscoveryImpl { /** Maximal interval of connection check to next node in the ring. */ private static final long MAX_CON_CHECK_INTERVAL = 500; - /** Minimal timeout to find connection to some next node in the ring while connection recovering. */ - private static final long MIN_RECOVERY_TIMEOUT = 100; - /** Interval of checking connection to next node in the ring. */ private long connCheckInterval; + /** Fundamental value for connection checking actions. */ + private long connCheckTick; + /** */ private IgniteThreadPoolExecutor utilityPool; @@ -393,9 +394,12 @@ class ServerImpl extends TcpDiscoveryImpl { lastRingMsgSentTime = 0; + // Foundumental timeout value for actions related to connection check. + connCheckTick = effectiveExchangeTimeout() / 3; + // Since we take in account time of last sent message, the interval should be quite short to give enough piece // of failure detection timeout as send-and-acknowledge timeout of the message to send. - connCheckInterval = Math.min(effectiveExchangeTimeout() / 4, MAX_CON_CHECK_INTERVAL); + connCheckInterval = Math.min(connCheckTick, MAX_CON_CHECK_INTERVAL); utilityPool = new IgniteThreadPoolExecutor("disco-pool", spi.ignite().name(), @@ -3508,12 +3512,19 @@ else if (log.isTraceEnabled()) if (changeTop) hndMsg.changeTopology(ring.previousNodeOf(next).id()); - if (log.isDebugEnabled()) - log.debug("Sending handshake [hndMsg=" + hndMsg + ", sndState=" + sndState + ']'); + if (log.isDebugEnabled()) { + log.debug("Sending handshake [hndMsg=" + hndMsg + ", sndState=" + sndState + + "] with timeout " + timeoutHelper.nextTimeoutChunk(spi.getSocketTimeout())); + } spi.writeToSocket(sock, out, hndMsg, timeoutHelper.nextTimeoutChunk(spi.getSocketTimeout())); + if (log.isDebugEnabled()) { + log.debug("Reading handshake response with timeout " + + timeoutHelper.nextTimeoutChunk(ackTimeout0)); + } + TcpDiscoveryHandshakeResponse res = spi.readMessage(sock, null, timeoutHelper.nextTimeoutChunk(ackTimeout0)); @@ -4403,6 +4414,8 @@ else if (node.clientRouterNodeId() == null && utilityPool.execute( new Runnable() { @Override public void run() { + spi.getSpiContext().recordEvent(new NodeValidationFailedEvent(locNode, node, err0)); + boolean ping = node.id().equals(err0.nodeId()) ? pingNode(node) : pingNode(err0.nodeId()); if (!ping) { @@ -6523,23 +6536,11 @@ private IgniteSpiOperationTimeoutHelper serverOperationTimeoutHelper(@Nullable C long lastOperationNanos) { long absoluteThreshold = -1; - // Active send-state means we lost connection to next node and have to find another. - // We don't know how many nodes failed. May be several failed in a row. But we got only one - // connectionRecoveryTimeout to establish new connection to the ring. We can't spend this timeout wholly on one - // or two next nodes. We should slice it and try to travers as many as we can. - if (sndState != null) { - int nodesLeft = ring.serverNodes().size() - 1 - sndState.failedNodes; - - assert nodesLeft > 0; - - long now = System.nanoTime(); - - // In case of large cluster and small connectionRecoveryTimeout we have to provide reasonable minimal - // timeout per one of the next nodes. It should not appear too small like 1, 5 or 10ms. - long perNodeTimeout = Math.max((sndState.failTimeNanos - now) / nodesLeft, MIN_RECOVERY_TIMEOUT); - - absoluteThreshold = Math.min(sndState.failTimeNanos, now + perNodeTimeout); - } + // Active send-state means we lost connection to next node and have to find another. We don't know how many + // nodes failed. May be several failed in a row. But we got only one connectionRecoveryTimeout to establish new + // connection. We should travers rest of the cluster with sliced timeout for each node. + if (sndState != null) + absoluteThreshold = Math.min(sndState.failTimeNanos, System.nanoTime() + U.millisToNanos(connCheckTick)); return new IgniteSpiOperationTimeoutHelper(spi, true, lastOperationNanos, absoluteThreshold); } @@ -6901,13 +6902,22 @@ else if (req.changeTopology()) { (req.checkPreviousNodeId() == null || previous.id().equals(req.checkPreviousNodeId()))) { Collection nodeAddrs = spi.getNodeAddresses(previous, false); - liveAddr = checkConnection(new ArrayList<>(nodeAddrs), - (int)U.nanosToMillis(timeThreshold - now)); + // The connection recovery connection to one node is connCheckTick. + // We need to suppose network delays. So we use half of this time. + int backwardCheckTimeout = (int)(connCheckTick / 2); + + if (log.isDebugEnabled()) { + log.debug("Remote node requests topology change. Checking connection to " + + "previous [" + previous + "] with timeout " + backwardCheckTimeout); + } + + liveAddr = checkConnection(new ArrayList<>(nodeAddrs), backwardCheckTimeout); - if (log.isInfoEnabled()) - log.info("Connection check done [liveAddr=" + liveAddr - + ", previousNode=" + previous + ", addressesToCheck=" + nodeAddrs - + ", connectingNodeId=" + nodeId + ']'); + if (log.isInfoEnabled()) { + log.info("Connection check to previous node done: [liveAddr=" + liveAddr + + ", previousNode=" + U.toShortString(previous) + ", addressesToCheck=" + + nodeAddrs + ", connectingNodeId=" + nodeId + ']'); + } } // If local node was able to connect to previous, confirm that it's alive. @@ -6926,6 +6936,11 @@ else if (req.changeTopology()) { } } + if (log.isDebugEnabled()) { + log.debug("Sending handshake response [" + res + "] with timeout " + + spi.getEffectiveSocketTimeout(srvSock) + " to " + rmtAddr + ":" + sock.getPort()); + } + spi.writeToSocket(sock, res, spi.getEffectiveSocketTimeout(srvSock)); // It can happen if a remote node is stopped and it has a loopback address in the list of addresses, diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index f1e8406a9afe97..543634268829c2 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -120,7 +120,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; -import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; /** * Discovery SPI implementation that uses TCP/IP for node discovery. @@ -287,7 +287,7 @@ public class TcpDiscoverySpi extends IgniteSpiAdapter implements IgniteDiscovery public static final long DFLT_MAX_ACK_TIMEOUT = 10 * 60 * 1000; /** Default SO_LINGER to set for socket, 0 means enabled with 0 timeout. */ - public static final int DFLT_SO_LINGER = 5; + public static final int DFLT_SO_LINGER = 0; /** Default connection recovery timeout in ms. */ public static final long DFLT_CONNECTION_RECOVERY_TIMEOUT = IgniteConfiguration.DFLT_FAILURE_DETECTION_TIMEOUT; @@ -307,9 +307,6 @@ public class TcpDiscoverySpi extends IgniteSpiAdapter implements IgniteDiscovery /** @see IgniteSystemProperties#IGNITE_DISCOVERY_METRICS_QNT_WARN */ public static final int DFLT_DISCOVERY_METRICS_QNT_WARN = 500; - /** Name of the discovery metrics registry. */ - public static final String DISCO_METRICS = metricName("io", "discovery"); - /** Ssl message pattern for StreamCorruptedException. */ private static Pattern sslMsgPattern = Pattern.compile("invalid stream header: 150\\d0\\d00"); @@ -631,14 +628,24 @@ public void setClientReconnectDisabled(boolean clientReconnectDisabled) { } /** - * Sets local host IP address that discovery SPI uses. + * Sets network addresses for the Discovery SPI. + *

    + * If not provided, the value is resolved from {@link IgniteConfiguration#getLocalHost()}. If the latter is not set + * as well, the the node binds to all available IP addresses of an environment it's running on. + * If there is no a non-loopback address, then {@link InetAddress#getLocalHost()} is used. *

    - * If not provided, by default a first found non-loopback address - * will be used. If there is no non-loopback address available, - * then {@link InetAddress#getLocalHost()} will be used. + * NOTE: You should initialize the {@link IgniteConfiguration#getLocalHost()} or + * {@link TcpDiscoverySpi#getLocalAddress()} parameter with the network + * interface that will be used for inter-node communication. Otherwise, the node can listen on multiple network + * addresses available in the environment and this can prolong node failures detection if some of the addresses are + * not reachable from other cluster nodes. For instance, if the node is bound to 3 network interfaces, + * it can take up to + * '{@link IgniteConfiguration#getFailureDetectionTimeout()} * 3 + {@link TcpDiscoverySpi#getConnectionRecoveryTimeout()}' + * milliseconds for another node to detect a disconnect of the give node. * * @param locAddr IP address. * @return {@code this} for chaining. + * @see IgniteConfiguration#setLocalHost(String). */ @IgniteSpiConfiguration(optional = true) public TcpDiscoverySpi setLocalAddress(String locAddr) { @@ -897,15 +904,6 @@ public TcpDiscoveryIpFinder getIpFinder() { *

    * If not provided {@link org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder} will * be used by default. - *

    - * NOTE: You should assing multiple addresses to a node only if they represent some real physical connections - * which can give more reliability. Providing several addresses can prolong failure detection of current node. - * The timeouts and settings on network operations ({@link #failureDetectionTimeout()}, {@link #sockTimeout}, - * {@link #ackTimeout}, {@link #maxAckTimeout}, {@link #reconCnt}) work per connection/address. The exception is - * {@link #connRecoveryTimeout}. And node addresses are sorted out sequentially. - *

    - * Example: if you use {@code failureDetectionTimeout} and have set 3 ip addresses for this node, previous node in - * the ring can take up to 'failureDetectionTimeout * 3' to detect failure of current node. * * @param ipFinder IP finder. * @return {@code this} for chaining. diff --git a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java index 07366328e3f3b4..c1e03cd47d2243 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java @@ -68,10 +68,6 @@ public IndexingQueryFilterImpl(GridKernalContext ctx, @Nullable AffinityTopology if (cache.context().isReplicated()) return null; - // No backups and explicit partitions -> nothing to filter. - if (cache.configuration().getBackups() == 0 && parts == null) - return null; - return new IndexingQueryCacheFilter(cache.context().affinity(), parts, topVer, ctx.discovery().localNode()); } diff --git a/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java index 7671a81d0f3cec..fe560073fec1fb 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java @@ -18,6 +18,7 @@ package org.apache.ignite.spi.metric.jmx; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.function.Predicate; import javax.management.JMException; @@ -46,7 +47,7 @@ public class JmxMetricExporterSpi extends IgniteSpiAdapter implements MetricExpo private @Nullable Predicate filter; /** Registered beans. */ - private final List mBeans = new ArrayList<>(); + private final List mBeans = Collections.synchronizedList(new ArrayList<>()); /** {@inheritDoc} */ @Override public void spiStart(@Nullable String igniteInstanceName) throws IgniteSpiException { @@ -127,6 +128,10 @@ private void unregister(ReadOnlyMetricRegistry mreg) { unregBean(ignite, bean); } + /** + * @param ignite Ignite instance. + * @param bean Bean name to unregister. + */ private void unregBean(Ignite ignite, ObjectName bean) { MBeanServer jmx = ignite.configuration().getMBeanServer(); @@ -143,7 +148,7 @@ private void unregBean(Ignite ignite, ObjectName bean) { /** {@inheritDoc} */ @Override public void setMetricRegistry(ReadOnlyMetricManager reg) { - this.mreg = reg; + mreg = reg; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java b/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java index 7fe7c6fbea7ce2..4fef6636b3d4bc 100644 --- a/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java @@ -60,14 +60,25 @@ public class SslContextFactory implements Factory { /** */ private static final long serialVersionUID = 0L; - /** Default key store type. */ - public static final String DFLT_STORE_TYPE = "JKS"; + /** Default key / trust store type. */ + public static final String DFLT_STORE_TYPE = System.getProperty("javax.net.ssl.keyStoreType", "JKS"); /** Default SSL protocol. */ public static final String DFLT_SSL_PROTOCOL = "TLS"; - /** Default key manager algorithm. */ - public static final String DFLT_KEY_ALGORITHM = "SunX509"; + /** + * Property name to specify default key/trust manager algorithm. + * + * @deprecated Use {@code "ssl.KeyManagerFactory.algorithm"} instead as per JSSE standard. + * + * Should be considered for deletion in 9.0. + */ + @Deprecated + public static final String IGNITE_KEY_ALGORITHM_PROPERTY = "ssl.key.algorithm"; + + /** Default key manager / trust manager algorithm. Specifying different trust manager algorithm is not supported. */ + public static final String DFLT_KEY_ALGORITHM = System.getProperty("ssl.KeyManagerFactory.algorithm", + System.getProperty(IGNITE_KEY_ALGORITHM_PROPERTY, "SunX509")); /** SSL protocol. */ private String proto = DFLT_SSL_PROTOCOL; @@ -178,8 +189,7 @@ public String getKeyAlgorithm() { } /** - * Sets key manager algorithm that will be used to create a key manager. Notice that in most cased default value - * suites well, however, on Android platform this value need to be set to X509. + * Sets key manager algorithm that will be used to create a key manager. * * @param keyAlgorithm Key algorithm name. */ diff --git a/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java b/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java index fed0d43f260198..70c5a2998e599c 100644 --- a/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java +++ b/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java @@ -18,6 +18,7 @@ package org.apache.ignite.util; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import org.apache.ignite.cluster.ClusterGroup; import org.apache.ignite.cluster.ClusterNode; @@ -42,7 +43,7 @@ * attribute set to value {@code data}: *
      * <property name="nodeFilter">
    - *     <bean class="org.apache.ignite.util.ClusterAttributeNodeFilter">
    + *     <bean class="org.apache.ignite.util.AttributeNodeFilter">
      *         <constructor-arg value="group"/>
      *         <constructor-arg value="data"/>
      *     </bean>
    @@ -51,7 +52,7 @@
      * You can also specify multiple attributes for the filter:
      * 
      * <property name="nodeFilter">
    - *     <bean class="org.apache.ignite.util.ClusterAttributeNodeFilter">
    + *     <bean class="org.apache.ignite.util.AttributeNodeFilter">
      *         <constructor-arg>
      *             <map>
      *                 <entry key="cpu-group" value="high"/>
    @@ -105,4 +106,13 @@ public AttributeNodeFilter(Map attrs) {
     
             return true;
         }
    +
    +    /**
    +     * Gets attributes.
    +     *
    +     * @return Attributes collection.
    +     */
    +    public Map getAttrs() {
    +        return new HashMap<>(attrs);
    +    }
     }
    diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties
    index deac9f8e57689b..3ec37dd7bd8ffa 100644
    --- a/modules/core/src/main/resources/META-INF/classnames.properties
    +++ b/modules/core/src/main/resources/META-INF/classnames.properties
    @@ -350,20 +350,7 @@ org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg
     org.apache.ignite.internal.commandline.cache.argument.ListCommandArg
     org.apache.ignite.internal.commandline.cache.argument.PartitionReconciliationCommandArg
     org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg
    -org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesResult
    -org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesTask
     org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesTask$CheckIndexInlineSizesJob
    -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionGroup
    -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionNode
    -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionPartition
    -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTask
    -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTask$CacheDistributionJob
    -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskArg
    -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskResult
    -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTask
    -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTask$CacheResetLostPartitionsJob
    -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTaskArg
    -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTaskResult
     org.apache.ignite.internal.commandline.diagnostic.DiagnosticSubCommand
     org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand$PageLocksCommandArg
     org.apache.ignite.internal.commandline.dr.DrSubCommandsList
    @@ -373,16 +360,8 @@ org.apache.ignite.internal.commandline.dr.subcommands.DrCacheCommand$SenderGroup
     org.apache.ignite.internal.commandline.management.ManagementCommandList
     org.apache.ignite.internal.commandline.management.ManagementURLCommandArg
     org.apache.ignite.internal.commandline.meta.subcommands.MetadataAbstractSubCommand.VoidDto
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataListResult
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataInfoTask
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataInfoTask.MetadataListJob
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataMarshalled
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataRemoveTask
     org.apache.ignite.internal.commandline.meta.tasks.MetadataRemoveTask$MetadataRemoveJob
     org.apache.ignite.internal.commandline.meta.tasks.MetadataRemoveTask$DropAllThinSessionsJob
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataTypeArgs
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataUpdateTask
    -org.apache.ignite.internal.commandline.meta.tasks.MetadataUpdateTask.MetadataUpdateJob
     org.apache.ignite.internal.commandline.property.tasks.PropertiesListResult
     org.apache.ignite.internal.commandline.property.tasks.PropertiesListTask
     org.apache.ignite.internal.commandline.property.tasks.PropertyOperationResult
    @@ -2285,6 +2264,7 @@ org.apache.ignite.internal.visor.query.VisorRunningQuery
     org.apache.ignite.internal.visor.query.VisorScanQueryTask
     org.apache.ignite.internal.visor.query.VisorScanQueryTask$VisorScanQueryJob
     org.apache.ignite.internal.visor.query.VisorScanQueryTaskArg
    +org.apache.ignite.internal.visor.persistence.PersistenceTaskResult
     org.apache.ignite.internal.visor.service.VisorCancelServiceTask
     org.apache.ignite.internal.visor.service.VisorCancelServiceTask$VisorCancelServiceJob
     org.apache.ignite.internal.visor.service.VisorCancelServiceTaskArg
    @@ -2329,6 +2309,25 @@ org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask
     org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask$VisorGetMasterKeyNameJob
     org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask
     org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask$VisorChangeMasterKeyJob
    +org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskArg
    +org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult
    +org.apache.ignite.internal.visor.encryption.VisorChangeCacheGroupKeyTask
    +org.apache.ignite.internal.visor.encryption.VisorChangeCacheGroupKeyTask$VisorChangeCacheGroupKeyJob
    +org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask
    +org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask$VisorEncryptionKeyIdsJob
    +org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask$VisorEncryptionKeyIdsResult
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask$VisorReencryptionRateJob
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask$ReencryptionRateJobResult
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTaskArg
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionResumeTask
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionResumeTask$VisorReencryptionResumeJob
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask$VisorReencryptionStatusJob
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask$VisorReencryptionStatusResult
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask$VisorReencryptionSuspendJob
    +org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask$VisorReencryptionSuspendResumeJobResult
     org.apache.ignite.internal.visor.util.VisorClusterGroupEmptyException
     org.apache.ignite.internal.visor.util.VisorEventMapper
     org.apache.ignite.internal.visor.util.VisorExceptionWrapper
    diff --git a/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java b/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java
    index 287c6ec6793bcb..dcb78efa5bdcbd 100644
    --- a/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java
    @@ -42,6 +42,8 @@
     import org.junit.Test;
     import org.junit.rules.Timeout;
     
    +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM;
    +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE;
     import static org.junit.Assert.assertTrue;
     
     /**
    @@ -63,12 +65,12 @@ public void testSerialization() throws IOException, ClassNotFoundException {
                 )
                 .setSslMode(SslMode.REQUIRED)
                 .setSslClientCertificateKeyStorePath("client.jks")
    -            .setSslClientCertificateKeyStoreType("JKS")
    +            .setSslClientCertificateKeyStoreType(DFLT_STORE_TYPE)
                 .setSslClientCertificateKeyStorePassword("123456")
                 .setSslTrustCertificateKeyStorePath("trust.jks")
    -            .setSslTrustCertificateKeyStoreType("JKS")
    +            .setSslTrustCertificateKeyStoreType(DFLT_STORE_TYPE)
                 .setSslTrustCertificateKeyStorePassword("123456")
    -            .setSslKeyAlgorithm("SunX509");
    +            .setSslKeyAlgorithm(DFLT_KEY_ALGORITHM);
     
             ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
     
    diff --git a/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java b/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java
    index bbb2c87ceafea0..2d75d5c5266f1e 100644
    --- a/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java
    @@ -71,14 +71,20 @@ public void testClientConnectBeforeDiscoveryStart() throws Exception {
             IgniteInternalFuture futStartClient = GridTestUtils.runAsync(
                 () -> startClient(grid()));
     
    -        // Server doesn't accept connection before discovery SPI started.
    -        assertFalse(GridTestUtils.waitForCondition(futStartClient::isDone, 500L));
    +        try {
    +            // Server doesn't accept connection before discovery SPI started.
    +            assertFalse(GridTestUtils.waitForCondition(futStartClient::isDone, 500L));
     
    -        barrier.await();
    +            barrier.await();
     
    -        futStartGrid.get();
    +            futStartGrid.get();
     
    -        // Server accept connection after discovery SPI started.
    -        assertTrue(GridTestUtils.waitForCondition(futStartClient::isDone, 500L));
    +            // Server accept connection after discovery SPI started.
    +            assertTrue(GridTestUtils.waitForCondition(futStartClient::isDone, 500L));
    +        }
    +        finally {
    +            if (futStartClient.isDone())
    +                futStartClient.get().close();
    +        }
         }
     }
    diff --git a/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java b/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
    index caa39e963f0d3b..5291baca6f949b 100644
    --- a/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
    @@ -34,8 +34,6 @@
     import java.util.concurrent.BrokenBarrierException;
     import java.util.concurrent.CountDownLatch;
     import java.util.concurrent.CyclicBarrier;
    -import java.util.concurrent.ForkJoinPool;
    -import java.util.concurrent.Future;
     import java.util.concurrent.TimeUnit;
     import java.util.stream.Collectors;
     import java.util.stream.IntStream;
    @@ -612,14 +610,14 @@ private void testPessimisticTxLocking(TransactionIsolation isolation) throws Exc
                 );
                 cache.put(0, "value0");
     
    -            Future fut;
    +            IgniteInternalFuture fut;
     
                 try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, isolation)) {
                     assertEquals("value0", cache.get(0));
     
                     CyclicBarrier barrier = new CyclicBarrier(2);
     
    -                fut = ForkJoinPool.commonPool().submit(() -> {
    +                fut = GridTestUtils.runAsync(() -> {
                         try (ClientTransaction tx2 = client.transactions().txStart(OPTIMISTIC, REPEATABLE_READ, 500)) {
                             cache.put(0, "value2");
                             tx2.commit();
    @@ -663,7 +661,7 @@ public void testOptimitsticSerializableTransactionHoldsLock() throws Exception {
                 try (ClientTransaction tx = client.transactions().txStart(OPTIMISTIC, SERIALIZABLE)) {
                     assertEquals("value0", cache.get(0));
     
    -                Future fut = ForkJoinPool.commonPool().submit(() -> {
    +                IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
                         try (ClientTransaction tx2 = client.transactions().txStart(OPTIMISTIC, REPEATABLE_READ)) {
                             cache.put(0, "value2");
                             tx2.commit();
    @@ -708,15 +706,13 @@ public void testOptimitsticRepeatableReadUpdatesValue() throws Exception {
     
                     cache.put(0, "value1");
     
    -                Future f = ForkJoinPool.commonPool().submit(() -> {
    +                GridTestUtils.runAsync(() -> {
                         assertEquals("value0", cache.get(0));
     
                         cache.put(0, "value2");
     
                         assertEquals("value2", cache.get(0));
    -                });
    -
    -                f.get();
    +                }).get();
     
                     tx.commit();
                 }
    @@ -962,7 +958,7 @@ public void testTransactions() throws Exception {
     
                     cache.put(0, "value18");
     
    -                Future fut = ForkJoinPool.commonPool().submit(() -> {
    +                IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
                         try (ClientTransaction tx1 = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
                             cache.put(1, "value19");
     
    @@ -1002,7 +998,7 @@ public void testTransactions() throws Exception {
                 try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
                     cache.put(0, "value20");
     
    -                ForkJoinPool.commonPool().submit(() -> {
    +                GridTestUtils.runAsync(() -> {
                         // Implicit transaction started here.
                         cache.put(1, "value21");
     
    @@ -1041,7 +1037,7 @@ public void testTransactions() throws Exception {
                     // Start implicit transaction after explicit transaction has been closed by another thread.
                     cache.put(0, "value22");
     
    -                ForkJoinPool.commonPool().submit(() -> assertEquals("value22", cache.get(0))).get();
    +                GridTestUtils.runAsync(() -> assertEquals("value22", cache.get(0))).get();
     
                     // New explicit transaction can be started after current transaction has been closed by another thread.
                     try (ClientTransaction tx1 = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
    @@ -1092,7 +1088,7 @@ public void testTransactions() throws Exception {
                 // Test that implicit transaction started after commit of previous one without closing.
                 cache.put(0, "value24");
     
    -            ForkJoinPool.commonPool().submit(() -> assertEquals("value24", cache.get(0))).get();
    +            GridTestUtils.runAsync(() -> assertEquals("value24", cache.get(0))).get();
             }
         }
     
    diff --git a/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java b/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java
    index 0f0791b8cb8267..c6def06f3f559a 100644
    --- a/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java
    @@ -288,7 +288,7 @@ private void checkClientStartFailure(String[] cipherSuites, String[] protocols)
                 cipherSuites,
                 protocols,
                 ClientConnectionException.class,
    -            "Ignite cluster is unavailable"
    +            "SSL handshake failed"
             );
         }
     
    @@ -307,7 +307,7 @@ private void checkClientStartFailure(
             this.cipherSuites = F.isEmpty(cipherSuites) ? null : cipherSuites;
             this.protocols = F.isEmpty(protocols) ? null : protocols;
     
    -        GridTestUtils.assertThrows(
    +        GridTestUtils.assertThrowsAnyCause(
                 null,
                 new Callable() {
                     @Override public Object call() {
    diff --git a/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java b/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java
    index 8455f878ef6a5e..ccfc50750d6b76 100644
    --- a/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java
    @@ -17,9 +17,12 @@
     
     package org.apache.ignite.failure;
     
    +import java.util.Arrays;
     import java.util.HashSet;
     import java.util.Set;
     import java.util.concurrent.CountDownLatch;
    +import java.util.concurrent.ExecutorService;
    +import java.util.concurrent.Executors;
     import java.util.concurrent.TimeUnit;
     import java.util.concurrent.atomic.AtomicReference;
     import java.util.concurrent.locks.LockSupport;
    @@ -30,8 +33,8 @@
     import org.apache.ignite.internal.util.worker.GridWorker;
     import org.apache.ignite.internal.worker.WorkersRegistry;
     import org.apache.ignite.testframework.GridTestUtils;
    +import org.apache.ignite.testframework.junits.GridAbstractTest;
     import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
    -import org.apache.ignite.thread.IgniteThread;
     import org.junit.Test;
     
     /**
    @@ -44,6 +47,12 @@ public class SystemWorkersBlockingTest extends GridCommonAbstractTest {
         /** Handler latch. */
         private final CountDownLatch hndLatch = new CountDownLatch(1);
     
    +    /** Blocking thread latch. */
    +    private final CountDownLatch blockLatch = new CountDownLatch(1);
    +
    +    /** Worker executor. */
    +    private final ExecutorService workerExecutor = Executors.newSingleThreadExecutor();
    +
         /** Reference to failure error. */
         private final AtomicReference failureError = new AtomicReference<>();
     
    @@ -81,6 +90,13 @@ public class SystemWorkersBlockingTest extends GridCommonAbstractTest {
         @Override protected void afterTest() throws Exception {
             super.afterTest();
     
    +        blockLatch.countDown();
    +
    +        if (workerExecutor.isTerminated()) {
    +            workerExecutor.shutdownNow();
    +            workerExecutor.awaitTermination(2 * SYSTEM_WORKER_BLOCKED_TIMEOUT, TimeUnit.MILLISECONDS);
    +        }
    +
             stopAllGrids();
         }
     
    @@ -91,34 +107,23 @@ public class SystemWorkersBlockingTest extends GridCommonAbstractTest {
         public void testBlockingWorker() throws Exception {
             IgniteEx ignite = startGrid(0);
     
    -        CountDownLatch blockLatch = new CountDownLatch(1);
    +        GridWorker worker = new LatchingGridWorker(ignite);
     
    -        GridWorker worker = new GridWorker(ignite.name(), "test-worker", log) {
    -            @Override protected void body() throws InterruptedException {
    -                blockLatch.await();
    -            }
    -        };
    +        runWorker(worker);
     
    -        IgniteThread runner = null;
    -        try {
    -            runner = runWorker(worker);
    +        ignite.context().workersRegistry().register(worker);
     
    -            ignite.context().workersRegistry().register(worker);
    +        assertTrue(hndLatch.await(ignite.configuration().getFailureDetectionTimeout() * 2,
    +            TimeUnit.MILLISECONDS));
     
    -            assertTrue(hndLatch.await(SYSTEM_WORKER_BLOCKED_TIMEOUT * 2, TimeUnit.MILLISECONDS));
    +        Throwable blockedExeption = failureError.get();
     
    -            Throwable err = failureError.get();
    -
    -            assertNotNull(err);
    -            assertTrue(err.getMessage() != null && err.getMessage().contains("test-worker"));
    -        }
    -        finally {
    -            if (runner != null) {
    -                blockLatch.countDown();
    +        assertNotNull(blockedExeption);
     
    -                runner.join(SYSTEM_WORKER_BLOCKED_TIMEOUT);
    -            }
    -        }
    +        assertTrue(Arrays.stream(blockedExeption.getStackTrace()).anyMatch(
    +            e -> CountDownLatch.class.getName().equals(e.getClassName())));
    +        assertTrue(Arrays.stream(blockedExeption.getStackTrace()).anyMatch(
    +            e -> LatchingGridWorker.class.getName().equals(e.getClassName())));
         }
     
         /**
    @@ -145,26 +150,37 @@ public void testSingleWorker_NotInInfiniteLoop() throws Exception {
                 }
             };
     
    -        IgniteThread runner = runWorker(worker);
    +        runWorker(worker);
     
             Thread.sleep(2 * SYSTEM_WORKER_BLOCKED_TIMEOUT);
     
    -        runner.interrupt();
    +        workerExecutor.shutdownNow();
     
    -        assertTrue(finishLatch.await(SYSTEM_WORKER_BLOCKED_TIMEOUT, TimeUnit.MILLISECONDS));
    +        assertTrue(workerExecutor.awaitTermination(SYSTEM_WORKER_BLOCKED_TIMEOUT, TimeUnit.MILLISECONDS));
         }
     
         /**
    -     * @param worker Grid worker to run.
    -     * @return Thread, running worker.
    +     * Run worker and wait for its initialization.
    +     *
    +     * @param worker GridWorker to run.
    +     * @throws IgniteInterruptedCheckedException If wait is interrupted.
          */
    -    private IgniteThread runWorker(GridWorker worker) throws IgniteInterruptedCheckedException {
    -        IgniteThread runner = new IgniteThread(worker);
    -
    -        runner.start();
    +    private void runWorker(GridWorker worker) throws IgniteInterruptedCheckedException {
    +        workerExecutor.execute(worker);
     
             GridTestUtils.waitForCondition(() -> worker.runner() != null, 100);
    +    }
     
    -        return runner;
    +    /** */
    +    private class LatchingGridWorker extends GridWorker {
    +        /** */
    +        public LatchingGridWorker(IgniteEx ignite) {
    +            super(ignite.name(), "test-worker", GridAbstractTest.log);
    +        }
    +
    +        /** */
    +        @Override protected void body() throws InterruptedException {
    +            blockLatch.await();
    +        }
         }
     }
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java
    index 8faa98f6cbe9ba..3208d2e992852d 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java
    @@ -235,6 +235,10 @@ private TestCacheObject(Object val) {
     
             /** {@inheritDoc} */
             @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
    +            return value(ctx, cpy, null);
    +        }
    +
    +        @Override public  @Nullable T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
                 A.notNull(ctx, "ctx");
     
                 return (T)val;
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java
    index 1e49d909f7bc36..9b59bd536d0b23 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java
    @@ -31,6 +31,7 @@
     import javax.cache.processor.MutableEntry;
     import org.apache.ignite.Ignite;
     import org.apache.ignite.IgniteCache;
    +import org.apache.ignite.IgniteSystemProperties;
     import org.apache.ignite.cache.CacheAtomicityMode;
     import org.apache.ignite.cache.CacheMode;
     import org.apache.ignite.configuration.CacheConfiguration;
    @@ -41,6 +42,7 @@
     import org.apache.ignite.transactions.Transaction;
     import org.apache.ignite.transactions.TransactionConcurrency;
     import org.apache.ignite.transactions.TransactionIsolation;
    +import org.junit.Assume;
     import org.junit.Test;
     
     /**
    @@ -82,7 +84,8 @@ public void testHashMapPutAllExactMessage() throws Exception {
     
             Ignite ignite = startGrid(0);
     
    -        IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration<>("exact"));
    +        IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration("exact")
    +            .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
     
             HashMap m = new HashMap<>();
     
    @@ -150,6 +153,9 @@ public void testHashMapPutAllExplicitOptimistic() throws Exception {
          */
         @Test
         public void testHashMapInvokeAllLocal() throws Exception {
    +        Assume.assumeFalse( "Local transactional caches not supported by MVCC",
    +            IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_FORCE_MVCC_MODE_IN_TESTS, false));
    +
             List messages = Collections.synchronizedList(new ArrayList<>());
     
             testLog = new ListeningTestLogger(false, log());
    @@ -162,7 +168,7 @@ public void testHashMapInvokeAllLocal() throws Exception {
             Ignite ignite = startGrid(0);
     
             IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration("invoke")
    -            .setCacheMode(CacheMode.LOCAL));
    +            .setCacheMode(CacheMode.LOCAL).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
     
             c.put(1, "foo");
             c.put(2, "bar");
    @@ -381,4 +387,42 @@ public void testHashSetGetAllTx() throws Exception {
     
             assertEquals(1, found);
         }
    +
    +    /**
    +     * @throws Exception If failed.
    +     */
    +    @Test
    +    public void testHashMapAtomic() throws Exception {
    +        List messages = Collections.synchronizedList(new ArrayList<>());
    +
    +        testLog = new ListeningTestLogger(log());
    +
    +        testLog.registerListener((s) -> {
    +            if (s.contains("deadlock"))
    +                messages.add(s);
    +        });
    +
    +        Ignite ignite = startGrid(0);
    +
    +        IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration("atomic")
    +            .setAtomicityMode(CacheAtomicityMode.ATOMIC));
    +
    +        HashMap m = new HashMap<>();
    +
    +        m.put(1, "foo");
    +        m.put(2, "bar");
    +
    +        c.putAll(m);
    +        c.invokeAll(m.keySet(), (k, v) -> v);
    +        c.removeAll(m.keySet());
    +        c.removeAll();
    +
    +        assertEquals(0, c.size());
    +
    +        for (String message : messages) {
    +            assertFalse(message.contains("Unordered "));
    +
    +            assertFalse(message.contains("operation on cache"));
    +        }
    +    }
     }
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java
    index 35579949c37301..57a159941e9330 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java
    @@ -25,6 +25,7 @@
     import org.apache.ignite.IgniteCache;
     import org.apache.ignite.configuration.ExecutorConfiguration;
     import org.apache.ignite.configuration.IgniteConfiguration;
    +import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor;
     import org.apache.ignite.internal.util.typedef.F;
     import org.apache.ignite.testframework.GridStringLogger;
     import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
    @@ -163,11 +164,14 @@ protected void checkDataRegionsMetrics(String logOutput) {
                 } else
                     assertTrue(F.isEmpty(matcher.group("total")));
     
    -            regions.add(matcher.group("name").trim());
    +            String regName = matcher.group("name").trim();
    +
    +            regions.add(regName);
             }
     
             Set expRegions = grid(0).context().cache().context().database().dataRegions().stream()
                 .map(v -> v.config().getName().trim())
    +            .filter(regName -> !DataStructuresProcessor.VOLATILE_DATA_REGION_NAME.equals(regName))
                 .collect(Collectors.toSet());
     
             assertFalse("No data regions in the log.", regions.isEmpty());
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteNodeValidationFailedEventTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteNodeValidationFailedEventTest.java
    new file mode 100644
    index 00000000000000..6989144d01027a
    --- /dev/null
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteNodeValidationFailedEventTest.java
    @@ -0,0 +1,106 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *      http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.ignite.internal;
    +
    +import java.util.concurrent.CountDownLatch;
    +import java.util.concurrent.atomic.AtomicReference;
    +import org.apache.ignite.configuration.IgniteConfiguration;
    +import org.apache.ignite.events.Event;
    +import org.apache.ignite.events.NodeValidationFailedEvent;
    +import org.apache.ignite.internal.processors.security.impl.TestSecurityPluginProvider;
    +import org.apache.ignite.spi.IgniteNodeValidationResult;
    +import org.apache.ignite.spi.IgniteSpiException;
    +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
    +import org.junit.Test;
    +
    +import static org.apache.ignite.events.EventType.EVT_NODE_VALIDATION_FAILED;
    +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_NODE_CONSISTENT_ID;
    +import static org.apache.ignite.plugin.security.SecurityPermissionSetBuilder.ALLOW_ALL;
    +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause;
    +
    +/** Tests joining node validation failed event. */
    +public class IgniteNodeValidationFailedEventTest extends GridCommonAbstractTest {
    +    /** {@inheritDoc} */
    +    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
    +        return super.getConfiguration(igniteInstanceName)
    +            .setIncludeEventTypes(EVT_NODE_VALIDATION_FAILED)
    +            .setConsistentId(igniteInstanceName);
    +    }
    +
    +    /** {@inheritDoc} */
    +    @Override protected void afterTest() throws Exception {
    +        super.afterTest();
    +
    +        stopAllGrids();
    +    }
    +
    +    /** */
    +    @Test
    +    public void testNodeValidationFailedEvent() throws Exception {
    +        startGrid(0);
    +
    +        CountDownLatch evtLatch = new CountDownLatch(1);
    +
    +        AtomicReference listenedEvtRef = new AtomicReference<>();
    +
    +        grid(0).events().localListen(evt -> {
    +            assertTrue(listenedEvtRef.compareAndSet(null, evt));
    +
    +            evtLatch.countDown();
    +
    +            return true;
    +        }, EVT_NODE_VALIDATION_FAILED);
    +
    +        startGrid(1);
    +
    +        String invalidNodeName = getTestIgniteInstanceName(2);
    +
    +        IgniteConfiguration invalidCfg = getConfiguration(invalidNodeName)
    +            .setPluginProviders(new TestSecurityPluginProvider("login", "", ALLOW_ALL, false));
    +
    +        assertThrowsWithCause(() -> startGrid(optimize(invalidCfg)), IgniteSpiException.class);
    +
    +        evtLatch.await();
    +
    +        Event listenedEvt = listenedEvtRef.get();
    +
    +        assertTrue(listenedEvt instanceof NodeValidationFailedEvent);
    +
    +        NodeValidationFailedEvent validationEvt = (NodeValidationFailedEvent)listenedEvt;
    +
    +        assertEquals(invalidNodeName, validationEvt.eventNode().attribute(ATTR_NODE_CONSISTENT_ID));
    +
    +        IgniteNodeValidationResult validationRes = validationEvt.validationResult();
    +
    +        assertNotNull(validationRes);
    +
    +        String errMsg = validationRes.message();
    +
    +        assertNotNull(errMsg);
    +        assertTrue(errMsg.contains(
    +            "Local node's grid security processor class is not equal to remote node's grid security processor class"));
    +    }
    +
    +    /** */
    +    @Test
    +    public void testEventDisabledByDefault() throws Exception {
    +        IgniteEx ignite = startGrid(super.getConfiguration(getTestIgniteInstanceName(0)));
    +
    +        assertFalse(ignite.context().event().isRecordable(EVT_NODE_VALIDATION_FAILED));
    +    }
    +}
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java
    index b985a77d5055aa..2e2f21d491c95c 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java
    @@ -38,6 +38,24 @@ protected ClientConfiguration getClientConfiguration() {
             return new ClientConfiguration();
         }
     
    +    /**
    +     * Return thin client port for given node.
    +     *
    +     * @param node Node.
    +     */
    +    protected int clientPort(ClusterNode node) {
    +        return node.attribute(ClientListenerProcessor.CLIENT_LISTENER_PORT);
    +    }
    +
    +    /**
    +     * Return host for given node.
    +     *
    +     * @param node Node.
    +     */
    +    protected String clientHost(ClusterNode node) {
    +        return F.first(node.addresses());
    +    }
    +
         /**
          * Start thin client with configured endpoints to specified nodes.
          *
    @@ -50,7 +68,7 @@ protected IgniteClient startClient(ClusterNode... nodes) {
             for (int i = 0; i < nodes.length; i++) {
                 ClusterNode node = nodes[i];
     
    -            addrs[i] = F.first(node.addresses()) + ":" + node.attribute(ClientListenerProcessor.CLIENT_LISTENER_PORT);
    +            addrs[i] = clientHost(node) + ":" + clientPort(node);
             }
     
             return Ignition.startClient(getClientConfiguration().setAddresses(addrs));
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java
    index 61adf6684e5a18..686a193f7cd521 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java
    @@ -26,6 +26,7 @@
     import java.util.UUID;
     import java.util.concurrent.CompletableFuture;
     import java.util.concurrent.atomic.AtomicInteger;
    +import java.util.function.BiFunction;
     import java.util.function.Consumer;
     import java.util.function.Function;
     import java.util.function.Supplier;
    @@ -35,6 +36,7 @@
     import org.apache.ignite.client.ClientConnectionException;
     import org.apache.ignite.client.ClientException;
     import org.apache.ignite.configuration.ClientConfiguration;
    +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer;
     import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
     import org.apache.ignite.internal.util.typedef.F;
     import org.apache.ignite.testframework.GridTestUtils;
    @@ -51,7 +53,8 @@
      */
     public class ReliableChannelTest {
         /** Mock factory for creating new channels. */
    -    private final Function chFactory = cfg -> new TestClientChannel();
    +    private final BiFunction chFactory =
    +            (cfg, hnd) -> new TestClientChannel();
     
         /** */
         private final String[] dfltAddrs = new String[]{"127.0.0.1:10800", "127.0.0.1:10801", "127.0.0.1:10802"};
    @@ -259,7 +262,7 @@ public void testFailOnInitIfDefaultChannelFailed() {
                 .setAddresses(dfltAddrs)
                 .setPartitionAwarenessEnabled(true);
     
    -        ReliableChannel rc = new ReliableChannel(cfg -> new TestFailureClientChannel(), ccfg, null);
    +        ReliableChannel rc = new ReliableChannel((cfg, hnd) -> new TestFailureClientChannel(), ccfg, null);
     
             rc.channelsInit();
         }
    @@ -302,7 +305,7 @@ private void checkFailAfterSendOperation(Consumer op, boolean ch
             // Emulate cluster is down after TcpClientChannel#send operation.
             AtomicInteger step = new AtomicInteger();
     
    -        ReliableChannel rc = new ReliableChannel(cfg -> {
    +        ReliableChannel rc = new ReliableChannel((cfg, hnd) -> {
                 if (step.getAndIncrement() == 0)
                     return new TestAsyncServiceFailureClientChannel();
                 else
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java
    index dd716d616afeb5..7eda71fd4d5970 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java
    @@ -36,6 +36,7 @@
     import org.apache.ignite.configuration.ClientConfiguration;
     import org.apache.ignite.configuration.IgniteConfiguration;
     import org.apache.ignite.internal.IgniteInterruptedCheckedException;
    +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer;
     import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
     import org.apache.ignite.internal.util.typedef.T2;
     import org.apache.ignite.internal.util.typedef.internal.U;
    @@ -185,11 +186,11 @@ protected ClientConfiguration getClientConfiguration(int... nodeIdxs) {
          * @param chIdxs Channels to wait for initialization.
          */
         protected void initClient(ClientConfiguration clientCfg, int... chIdxs) throws IgniteInterruptedCheckedException {
    -        client = new TcpIgniteClient(cfg -> {
    +        client = new TcpIgniteClient((cfg, hnd) -> {
                 try {
                     log.info("Establishing connection to " + cfg.getAddress());
     
    -                TcpClientChannel ch = new TestTcpClientChannel(cfg);
    +                TcpClientChannel ch = new TestTcpClientChannel(cfg, hnd);
     
                     log.info("Channel initialized: " + ch);
     
    @@ -323,8 +324,8 @@ protected class TestTcpClientChannel extends TcpClientChannel {
             /**
              * @param cfg Config.
              */
    -        public TestTcpClientChannel(ClientChannelConfiguration cfg) {
    -            super(cfg);
    +        public TestTcpClientChannel(ClientChannelConfiguration cfg, ClientConnectionMultiplexer hnd) {
    +            super(cfg, hnd);
     
                 this.cfg = cfg;
     
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java
    index 7dc62220034708..2909c4e9d60cfa 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java
    @@ -23,13 +23,13 @@
     import org.apache.ignite.testframework.GridTestUtils;
     import org.junit.Test;
     
    -import static org.apache.ignite.internal.client.thin.ReliableChannel.ASYNC_RUNNER_THREAD_NAME;
    -import static org.apache.ignite.internal.client.thin.TcpClientChannel.RECEIVER_THREAD_PREFIX;
    -
     /**
      * Test resource releasing by thin client.
      */
     public class ThinClientPartitionAwarenessResourceReleaseTest extends ThinClientAbstractPartitionAwarenessTest {
    +    /** Worker thread prefix. */
    +    private static final String THREAD_PREFIX = "thin-client-channel";
    +
         /**
          * Test that resources are correctly released after closing client with partition awareness.
          */
    @@ -46,15 +46,13 @@ public void testResourcesReleasedAfterClientClosed() throws Exception {
     
             assertFalse(channels[0].isClosed());
             assertFalse(channels[1].isClosed());
    -        assertEquals(1, threadsCount(ASYNC_RUNNER_THREAD_NAME));
    -        assertEquals(2, threadsCount(RECEIVER_THREAD_PREFIX));
    +        assertEquals(1, threadsCount(THREAD_PREFIX));
     
             client.close();
     
             assertTrue(channels[0].isClosed());
             assertTrue(channels[1].isClosed());
    -        assertTrue(GridTestUtils.waitForCondition(() -> threadsCount(ASYNC_RUNNER_THREAD_NAME) == 0, 1_000L));
    -        assertTrue(GridTestUtils.waitForCondition(() -> threadsCount(RECEIVER_THREAD_PREFIX) == 0, 1_000L));
    +        assertTrue(GridTestUtils.waitForCondition(() -> threadsCount(THREAD_PREFIX) == 0, 1_000L));
         }
     
         /**
    @@ -68,7 +66,7 @@ private static int threadsCount(String name) {
             for (long id : threadIds) {
                 ThreadInfo info = U.getThreadMx().getThreadInfo(id);
     
    -            if (info != null && info.getThreadState() != Thread.State.TERMINATED && info.getThreadName().startsWith(name))
    +            if (info != null && info.getThreadState() != Thread.State.TERMINATED && info.getThreadName().contains(name))
                     cnt++;
             }
     
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/TimeoutTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/TimeoutTest.java
    new file mode 100644
    index 00000000000000..2c7bf8847aff3f
    --- /dev/null
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/TimeoutTest.java
    @@ -0,0 +1,220 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *      http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.ignite.internal.client.thin;
    +
    +import java.io.InputStream;
    +import java.io.OutputStream;
    +import java.net.InetSocketAddress;
    +import java.net.ServerSocket;
    +import java.net.Socket;
    +import java.util.concurrent.CountDownLatch;
    +import java.util.concurrent.CyclicBarrier;
    +import java.util.concurrent.TimeUnit;
    +import java.util.concurrent.atomic.AtomicBoolean;
    +import org.apache.ignite.Ignite;
    +import org.apache.ignite.IgniteException;
    +import org.apache.ignite.Ignition;
    +import org.apache.ignite.cache.CacheAtomicityMode;
    +import org.apache.ignite.client.ClientCache;
    +import org.apache.ignite.client.ClientCacheConfiguration;
    +import org.apache.ignite.client.ClientConnectionException;
    +import org.apache.ignite.client.ClientException;
    +import org.apache.ignite.client.ClientTransaction;
    +import org.apache.ignite.client.IgniteClient;
    +import org.apache.ignite.configuration.ClientConfiguration;
    +import org.apache.ignite.configuration.ClientConnectorConfiguration;
    +import org.apache.ignite.configuration.IgniteConfiguration;
    +import org.apache.ignite.internal.IgniteInternalFuture;
    +import org.apache.ignite.internal.binary.streams.BinaryHeapOutputStream;
    +import org.apache.ignite.internal.binary.streams.BinaryOutputStream;
    +import org.apache.ignite.internal.util.typedef.internal.U;
    +import org.apache.ignite.testframework.GridTestUtils;
    +import org.junit.Test;
    +
    +import static org.apache.ignite.configuration.ClientConnectorConfiguration.DFLT_PORT;
    +import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
    +import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
    +
    +/**
    + * Thin client timeouts tests.
    + */
    +public class TimeoutTest extends AbstractThinClientTest {
    +    /**
    +     * Default timeout value.
    +     */
    +    private static final int TIMEOUT = 500;
    +
    +    /** {@inheritDoc} */
    +    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
    +        return super.getConfiguration(igniteInstanceName).setClientConnectorConfiguration(
    +            new ClientConnectorConfiguration().setHandshakeTimeout(TIMEOUT));
    +    }
    +
    +    /** {@inheritDoc} */
    +    @Override protected ClientConfiguration getClientConfiguration() {
    +        return super.getClientConfiguration().setTimeout(TIMEOUT);
    +    }
    +
    +    /**
    +     * Test that server closes thin client connection in case of handshake timeout.
    +     */
    +    @Test
    +    public void testServerClosesThinClientConnectionOnHandshakeTimeout() {
    +        try (Ignite ignite = startGrid(0)) {
    +            long ts0 = System.currentTimeMillis();
    +
    +            Socket s = new Socket();
    +
    +            s.connect(new InetSocketAddress(clientHost(ignite.cluster().localNode()),
    +                clientPort(ignite.cluster().localNode())), 0);
    +
    +            s.setSoTimeout(TIMEOUT * 2);
    +
    +            OutputStream os = s.getOutputStream();
    +
    +            try (BinaryOutputStream bos = new BinaryHeapOutputStream(32)) {
    +                bos.writeInt(1000); // Size.
    +
    +                os.write(bos.arrayCopy());
    +                os.flush();
    +
    +                InputStream is = s.getInputStream();
    +
    +                assertEquals(-1, is.read()); // Connection and stream closed by server after timeout.
    +
    +                long ts1 = System.currentTimeMillis();
    +
    +                assertTrue("Unexpected timeout [ts0=" + ts0 + ", ts1=" + ts1 + ']',
    +                    ts1 - ts0 >= TIMEOUT && ts1 - ts0 < TIMEOUT * 2);
    +            }
    +            finally {
    +                s.close();
    +            }
    +        }
    +        catch (Exception e) {
    +            fail("Exception while sending message: " + e.getMessage());
    +        }
    +    }
    +
    +    /**
    +     * Test client timeout on handshake.
    +     */
    +    @Test
    +    @SuppressWarnings("ThrowableNotThrown")
    +    public void testClientTimeoutOnHandshake() throws Exception {
    +        ServerSocket sock = new ServerSocket();
    +
    +        sock.bind(new InetSocketAddress("127.0.0.1", DFLT_PORT));
    +
    +        AtomicBoolean connectionAccepted = new AtomicBoolean();
    +
    +        CountDownLatch latch = new CountDownLatch(1);
    +
    +        IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
    +            try {
    +                Socket accepted = sock.accept();
    +
    +                connectionAccepted.set(true);
    +
    +                latch.await(TIMEOUT * 2, TimeUnit.MILLISECONDS);
    +
    +                U.closeQuiet(accepted);
    +            }
    +            catch (Exception e) {
    +                throw new IgniteException("Accept thread failed: " + e.getMessage(), e);
    +            }
    +        });
    +
    +        long ts0 = System.currentTimeMillis();
    +
    +        try {
    +            GridTestUtils.assertThrowsWithCause(
    +                (Runnable)() -> Ignition.startClient(getClientConfiguration().setAddresses("127.0.0.1:" + DFLT_PORT)),
    +                ClientConnectionException.class);
    +        }
    +        finally {
    +            latch.countDown();
    +        }
    +
    +        U.closeQuiet(sock);
    +
    +        assertTrue(connectionAccepted.get());
    +
    +        long ts1 = System.currentTimeMillis();
    +
    +        assertTrue("Unexpected timeout [ts0=" + ts0 + ", ts1=" + ts1 + ']',
    +            ts1 - ts0 >= TIMEOUT && ts1 - ts0 < TIMEOUT * 2);
    +
    +        fut.get();
    +    }
    +
    +    /**
    +     * Test client timeout on operation.
    +     */
    +    @Test
    +    @SuppressWarnings("ThrowableNotThrown")
    +    public void testClientTimeoutOnOperation() throws Exception {
    +        try (Ignite ignite = startGrid(0)) {
    +            try (IgniteClient client = startClient(0)) {
    +                ClientCache cache = client.getOrCreateCache(new ClientCacheConfiguration()
    +                    .setName("cache").setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
    +
    +                doSleep(TIMEOUT * 2);
    +
    +                // Should not fail if connection is idle.
    +                cache.put(0, 0);
    +
    +                CyclicBarrier barrier = new CyclicBarrier(2);
    +
    +                IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
    +                    try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
    +                        cache.put(0, 0);
    +
    +                        barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS);
    +                        barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS);
    +                    }
    +                    catch (Exception e) {
    +                        throw new IgniteException(e);
    +                    }
    +                });
    +
    +                // Wait for the key locked.
    +                barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS);
    +
    +                long ts0 = System.currentTimeMillis();
    +
    +                try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
    +                    try {
    +                        GridTestUtils.assertThrowsWithCause(() -> cache.put(0, 0), ClientException.class);
    +                    }
    +                    finally {
    +                        // To unlock another thread.
    +                        barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS);
    +                    }
    +                }
    +
    +                long ts1 = System.currentTimeMillis();
    +
    +                assertTrue("Unexpected timeout [ts0=" + ts0 + ", ts1=" + ts1 + ']',
    +                    ts1 - ts0 >= TIMEOUT && ts1 - ts0 < TIMEOUT * 2);
    +
    +                fut.get();
    +            }
    +        }
    +    }
    +}
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java
    index 68b4888141d3df..8c66afe05d54c4 100644
    --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java
    @@ -20,21 +20,40 @@
     import java.io.File;
     import java.io.FileOutputStream;
     import java.io.OutputStream;
    +import java.nio.ByteBuffer;
    +import java.nio.channels.FileChannel;
    +import java.nio.file.Path;
    +import java.nio.file.StandardOpenOption;
     import java.security.KeyStore;
     import java.util.HashSet;
    +import java.util.List;
     import java.util.Set;
    +import java.util.stream.Collectors;
    +import java.util.stream.IntStream;
     import javax.crypto.KeyGenerator;
     import javax.crypto.SecretKey;
     import org.apache.ignite.Ignite;
     import org.apache.ignite.IgniteCache;
    +import org.apache.ignite.IgniteCheckedException;
    +import org.apache.ignite.IgniteDataStreamer;
     import org.apache.ignite.configuration.CacheConfiguration;
     import org.apache.ignite.configuration.DataRegionConfiguration;
     import org.apache.ignite.configuration.DataStorageConfiguration;
     import org.apache.ignite.configuration.IgniteConfiguration;
     import org.apache.ignite.internal.IgniteEx;
    +import org.apache.ignite.internal.IgniteInternalFuture;
     import org.apache.ignite.internal.IgniteInterruptedCheckedException;
    +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager;
    +import org.apache.ignite.internal.managers.encryption.GroupKey;
    +import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils;
    +import org.apache.ignite.internal.pagemem.PageIdAllocator;
    +import org.apache.ignite.internal.pagemem.PageIdUtils;
    +import org.apache.ignite.internal.processors.cache.CacheGroupContext;
     import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
    +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore;
    +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
     import org.apache.ignite.internal.util.IgniteUtils;
    +import org.apache.ignite.internal.util.future.GridCompoundFuture;
     import org.apache.ignite.internal.util.typedef.G;
     import org.apache.ignite.internal.util.typedef.T2;
     import org.apache.ignite.internal.util.typedef.internal.CU;
    @@ -47,6 +66,7 @@
     
     import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
     import static org.apache.ignite.configuration.WALMode.FSYNC;
    +import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION;
     import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.CIPHER_ALGO;
     import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME;
     
    @@ -115,6 +135,18 @@ protected String keystorePath() {
             return KEYSTORE_PATH;
         }
     
    +    /**
    +     * @param name Cache name.
    +     * @param grp Cache group name.
    +     */
    +    protected  CacheConfiguration cacheConfiguration(String name, String grp) {
    +        CacheConfiguration cfg = new CacheConfiguration<>(name);
    +
    +        return cfg.setWriteSynchronizationMode(FULL_SYNC)
    +            .setGroupName(grp)
    +            .setEncryptionEnabled(true);
    +    }
    +
         /** */
         void checkEncryptedCaches(IgniteEx grid0, IgniteEx grid1) {
             Set cacheNames = new HashSet<>(grid0.cacheNames());
    @@ -139,13 +171,21 @@ void checkEncryptedCaches(IgniteEx grid0, IgniteEx grid1) {
     
                 assertTrue(encrypted1.configuration().isEncryptionEnabled());
     
    -            KeystoreEncryptionKey encKey0 = (KeystoreEncryptionKey)grid0.context().encryption().groupKey(grpId);
    +            GroupKey grpKey0 = grid0.context().encryption().groupKey(grpId);
    +
    +            assertNotNull(grpKey0);
    +
    +            KeystoreEncryptionKey encKey0 = (KeystoreEncryptionKey)grpKey0.key();
     
                 assertNotNull(encKey0);
                 assertNotNull(encKey0.key());
     
                 if (!grid1.configuration().isClientMode()) {
    -                KeystoreEncryptionKey encKey1 = (KeystoreEncryptionKey)grid1.context().encryption().groupKey(grpId);
    +                GroupKey grpKey1 = grid1.context().encryption().groupKey(grpId);
    +
    +                assertNotNull(grpKey1);
    +
    +                KeystoreEncryptionKey encKey1 = (KeystoreEncryptionKey)grpKey1.key();
     
                     assertNotNull(encKey1);
                     assertNotNull(encKey1.key());
    @@ -161,12 +201,21 @@ void checkEncryptedCaches(IgniteEx grid0, IgniteEx grid1) {
     
         /** */
         protected void checkData(IgniteEx grid0) {
    -        IgniteCache cache = grid0.cache(cacheName());
    +        IgniteCache cache = grid0.cache(cacheName());
     
             assertNotNull(cache);
     
    -        for (long i = 0; i < 100; i++)
    -            assertEquals("" + i, cache.get(i));
    +        int size = cache.size();
    +
    +        assertTrue("Cache cannot be empty", size > 0);
    +
    +        for (long i = 0; i < size; i++)
    +            assertEquals(generateValue(i), cache.get(i));
    +    }
    +
    +    /** */
    +    protected Object generateValue(long id) {
    +        return String.valueOf(id);
         }
     
         /** */
    @@ -178,22 +227,17 @@ protected void createEncryptedCache(IgniteEx grid0, @Nullable IgniteEx grid1, St
         /** */
         protected void createEncryptedCache(IgniteEx grid0, @Nullable IgniteEx grid1, String cacheName, String cacheGroup,
             boolean putData) throws IgniteInterruptedCheckedException {
    -        CacheConfiguration ccfg = new CacheConfiguration(cacheName)
    -            .setWriteSynchronizationMode(FULL_SYNC)
    -            .setGroupName(cacheGroup)
    -            .setEncryptionEnabled(true);
    -
    -        IgniteCache cache = grid0.createCache(ccfg);
    +        IgniteCache cache = grid0.createCache(cacheConfiguration(cacheName, cacheGroup));
     
             if (grid1 != null)
                 GridTestUtils.waitForCondition(() -> grid1.cachex(cacheName()) != null, 2_000L);
     
             if (putData) {
                 for (long i = 0; i < 100; i++)
    -                cache.put(i, "" + i);
    +                cache.put(i, generateValue(i));
     
                 for (long i = 0; i < 100; i++)
    -                assertEquals("" + i, cache.get(i));
    +                assertEquals(generateValue(i), cache.get(i));
             }
         }
     
    @@ -271,4 +315,206 @@ protected boolean checkMasterKeyName(String name) {
     
             return true;
         }
    +
    +    /**
    +     * Load data into cache "{@link #cacheName()}" using node "{@link #GRID_0}".
    +     *
    +     * @param cnt Count of entries.
    +     */
    +    protected void loadData(int cnt) {
    +        loadData(cacheName(), cnt);
    +    }
    +
    +    /**
    +     * Load data into cache using node "{@link #GRID_0}".
    +     *
    +     * @param cnt Count of entries.
    +     * @param cacheName Cache name.
    +     */
    +    protected void loadData(String cacheName, int cnt) {
    +        info("Loading " + cnt + " entries into " + cacheName);
    +
    +        int start = grid(GRID_0).cache(cacheName).size();
    +
    +        try (IgniteDataStreamer streamer = grid(GRID_0).dataStreamer(cacheName)) {
    +            for (long i = start; i < (cnt + start); i++)
    +                streamer.addData(i, generateValue(i));
    +        }
    +
    +        info("Load data finished");
    +    }
    +
    +    /**
    +     * Ensures that all pages of page store have expected encryption key identifier.
    +     *
    +     * @param grpId Cache group ID.
    +     * @param expKeyId Encryption key ID.
    +     * @param timeout Timeout to wait for encryption to complete.
    +     * @throws Exception If failed.
    +     */
    +    protected void checkGroupKey(int grpId, int expKeyId, long timeout) throws Exception {
    +        awaitEncryption(G.allGrids(), grpId, timeout);
    +
    +        for (Ignite g : G.allGrids()) {
    +            IgniteEx grid = (IgniteEx)g;
    +
    +            if (grid.context().clientNode())
    +                continue;
    +
    +            info("Validating encryption key [node=" + g.cluster().localNode().id() + ", grp=" + grpId + "]");
    +
    +            CacheGroupContext grp = grid.context().cache().cacheGroup(grpId);
    +
    +            if (grp == null || !grp.affinityNode()) {
    +                info("Context doesn't exits on " + grid.localNode().id());
    +
    +                continue;
    +            }
    +
    +            GridEncryptionManager encryption = grid.context().encryption();
    +
    +            assertEquals(grid.localNode().id().toString(), (byte)expKeyId, encryption.groupKey(grpId).id());
    +
    +            IgniteInternalFuture fut = encryption.reencryptionFuture(grpId);
    +
    +            // The future will be completed after the checkpoint, forcecheckpoint does nothing
    +            // if the checkpoint has already been scheduled.
    +            GridTestUtils.waitForCondition(() -> {
    +                if (fut.isDone())
    +                    return true;
    +
    +                try {
    +                    forceCheckpoint(g);
    +                }
    +                catch (IgniteCheckedException e) {
    +                    throw new RuntimeException(e);
    +                }
    +
    +                return fut.isDone();
    +            }, timeout);
    +
    +            assertTrue(fut.isDone());
    +
    +            List parts = IntStream.range(0, grp.shared().affinity().affinity(grpId).partitions())
    +                .boxed().collect(Collectors.toList());
    +
    +            parts.add(INDEX_PARTITION);
    +
    +            int realPageSize = grp.dataRegion().pageMemory().realPageSize(grpId);
    +            int encryptionBlockSize = grp.shared().kernalContext().config().getEncryptionSpi().blockSize();
    +
    +            for (int p : parts) {
    +                FilePageStore pageStore =
    +                    (FilePageStore)((FilePageStoreManager)grp.shared().pageStore()).getStore(grpId, p);
    +
    +                if (!pageStore.exists())
    +                    continue;
    +
    +                long state = grid.context().encryption().getEncryptionState(grpId, p);
    +
    +                String msg = String.format("p=%d, off=%d, total=%d",
    +                    p, ReencryptStateUtils.pageIndex(state), ReencryptStateUtils.pageCount(state));
    +
    +                assertEquals(msg, 0, ReencryptStateUtils.pageCount(state));
    +                assertEquals(msg, 0, ReencryptStateUtils.pageIndex(state));
    +
    +                long startPageId = PageIdUtils.pageId(p, PageIdAllocator.FLAG_DATA, 0);
    +
    +                int pagesCnt = pageStore.pages();
    +                int pageSize = pageStore.getPageSize();
    +
    +                ByteBuffer pageBuf = ByteBuffer.allocate(pageSize);
    +
    +                Path path = new File(pageStore.getFileAbsolutePath()).toPath();
    +
    +                try (FileChannel ch = FileChannel.open(path, StandardOpenOption.READ)) {
    +                    for (int n = 0; n < pagesCnt; n++) {
    +                        long pageId = startPageId + n;
    +                        long pageOff = pageStore.pageOffset(pageId);
    +
    +                        pageBuf.position(0);
    +
    +                        ch.position(pageOff);
    +                        ch.read(pageBuf);
    +
    +                        pageBuf.position(realPageSize + encryptionBlockSize);
    +
    +                        int pageCrc = pageBuf.getInt();
    +                        int pageKeyId = pageBuf.get() & 0xff;
    +
    +                        // If this page is empty we can skip it.
    +                        if (pageCrc == 0 && pageKeyId == 0) {
    +                            pageBuf.position(0);
    +
    +                            boolean emptyPage = false;
    +
    +                            while (pageBuf.hasRemaining() && !emptyPage)
    +                                emptyPage = pageBuf.getLong() == 0;
    +
    +                            if (emptyPage)
    +                                continue;
    +                        }
    +
    +                        msg = String.format("File=%s, page=%d", pageStore.getFileAbsolutePath(), n);
    +                        assertEquals(msg, expKeyId, pageKeyId);
    +                    }
    +                }
    +            }
    +        }
    +    }
    +
    +    /**
    +     * @param grids Grids.
    +     * @param grpId Cache group ID.
    +     * @param timeout Timeout to wait for encryption to complete.
    +     * @throws IgniteCheckedException If failed.
    +     */
    +    protected void awaitEncryption(List grids, int grpId, long timeout) throws IgniteCheckedException {
    +        GridCompoundFuture fut = new GridCompoundFuture<>();
    +
    +        for (Ignite node : grids) {
    +            IgniteEx grid = (IgniteEx)node;
    +
    +            if (grid.context().clientNode())
    +                continue;
    +
    +            IgniteInternalFuture fut0 = GridTestUtils.runAsync(() -> {
    +                boolean success =
    +                    GridTestUtils.waitForCondition(() -> !isReencryptionInProgress(grid, grpId), timeout);
    +
    +                assertTrue(success);
    +
    +                return null;
    +            });
    +
    +            fut.add(fut0);
    +        }
    +
    +        fut.markInitialized();
    +
    +        fut.get(timeout);
    +    }
    +
    +    /**
    +     * @param node Node.
    +     * @param grpId Cache group ID.
    +     * @return {@code True} If reencryption of the specified group is not yet complete.
    +     */
    +    protected boolean isReencryptionInProgress(IgniteEx node, int grpId) {
    +        CacheGroupContext grp = node.context().cache().cacheGroup(grpId);
    +
    +        if (grp == null || !grp.affinityNode())
    +            return false;
    +
    +        for (int p = 0; p < grp.affinity().partitions(); p++) {
    +            long state = node.context().encryption().getEncryptionState(grpId, p);
    +
    +            if (ReencryptStateUtils.pageIndex(state) != ReencryptStateUtils.pageCount(state))
    +                return true;
    +        }
    +
    +        long state = node.context().encryption().getEncryptionState(grpId, INDEX_PARTITION);
    +
    +        return ReencryptStateUtils.pageIndex(state) != ReencryptStateUtils.pageCount(state);
    +    }
     }
    diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupKeyChangeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupKeyChangeTest.java
    new file mode 100644
    index 00000000000000..810e05d109ace3
    --- /dev/null
    +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupKeyChangeTest.java
    @@ -0,0 +1,1063 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *      http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.ignite.internal.encryption;
    +
    +import java.io.File;
    +import java.io.Serializable;
    +import java.util.Arrays;
    +import java.util.Collections;
    +import java.util.List;
    +import java.util.Random;
    +import java.util.concurrent.CountDownLatch;
    +import java.util.concurrent.ThreadLocalRandom;
    +import java.util.concurrent.TimeUnit;
    +import java.util.concurrent.atomic.AtomicBoolean;
    +import org.apache.ignite.Ignite;
    +import org.apache.ignite.IgniteCache;
    +import org.apache.ignite.IgniteCheckedException;
    +import org.apache.ignite.IgniteDataStreamer;
    +import org.apache.ignite.IgniteException;
    +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
    +import org.apache.ignite.cluster.ClusterState;
    +import org.apache.ignite.configuration.CacheConfiguration;
    +import org.apache.ignite.configuration.DataRegionConfiguration;
    +import org.apache.ignite.configuration.DataStorageConfiguration;
    +import org.apache.ignite.configuration.IgniteConfiguration;
    +import org.apache.ignite.configuration.WALMode;
    +import org.apache.ignite.internal.IgniteEx;
    +import org.apache.ignite.internal.IgniteInternalFuture;
    +import org.apache.ignite.internal.TestRecordingCommunicationSpi;
    +import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage;
    +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager;
    +import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
    +import org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer;
    +import org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType;
    +import org.apache.ignite.internal.util.distributed.InitMessage;
    +import org.apache.ignite.internal.util.distributed.SingleNodeMessage;
    +import org.apache.ignite.internal.util.future.GridFinishedFuture;
    +import org.apache.ignite.internal.util.typedef.G;
    +import org.apache.ignite.internal.util.typedef.T2;
    +import org.apache.ignite.internal.util.typedef.internal.CU;
    +import org.apache.ignite.internal.util.typedef.internal.U;
    +import org.apache.ignite.lang.IgniteFuture;
    +import org.apache.ignite.spi.IgniteSpiException;
    +import org.apache.ignite.spi.discovery.tcp.TestTcpDiscoverySpi;
    +import org.apache.ignite.testframework.GridTestUtils.DiscoveryHook;
    +import org.junit.Test;
    +
    +import static org.apache.ignite.configuration.WALMode.LOG_ONLY;
    +import static org.apache.ignite.configuration.WALMode.NONE;
    +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.INITIAL_KEY_ID;
    +import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME;
    +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause;
    +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause;
    +import static org.apache.ignite.testframework.GridTestUtils.runAsync;
    +import static org.apache.ignite.testframework.GridTestUtils.waitForCondition;
    +
    +/**
    + * Cache group key change distributed process tests.
    + */
    +public class CacheGroupKeyChangeTest extends AbstractEncryptionTest {
    +    /** Timeout. */
    +    private static final long MAX_AWAIT_MILLIS = 15_000;
    +
    +    /** 1 megabyte in bytes. */
    +    private static final int MB = 1024 * 1024;
    +
    +    /** */
    +    private static final String GRID_2 = "grid-2";
    +
    +    /** Discovery hook for distributed process. */
    +    private InitMessageDiscoveryHook discoveryHook;
    +
    +    /** Count of cache backups. */
    +    private int backups;
    +
    +    /** Number of WAL segments. */
    +    private int walSegments = 10;
    +
    +    /** WAL mode. */
    +    private WALMode walMode = LOG_ONLY;
    +
    +    /** {@inheritDoc} */
    +    @Override protected IgniteConfiguration getConfiguration(String name) throws Exception {
    +        IgniteConfiguration cfg = super.getConfiguration(name);
    +
    +        cfg.setConsistentId(name);
    +        cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
    +
    +        if (discoveryHook != null)
    +            ((TestTcpDiscoverySpi)cfg.getDiscoverySpi()).discoveryHook(discoveryHook);
    +
    +        DataStorageConfiguration memCfg = new DataStorageConfiguration()
    +            .setDefaultDataRegionConfiguration(
    +                new DataRegionConfiguration()
    +                    .setMaxSize(100 * MB)
    +                    .setPersistenceEnabled(true))
    +            .setPageSize(4 * 1024)
    +            .setWalSegmentSize(MB)
    +            .setWalSegments(walSegments)
    +            .setMaxWalArchiveSize(2 * walSegments * MB)
    +            .setCheckpointFrequency(30 * 1000L)
    +            .setWalMode(walMode);
    +
    +        cfg.setDataStorageConfiguration(memCfg);
    +
    +        return cfg;
    +    }
    +
    +    /** {@inheritDoc} */
    +    @Override protected  CacheConfiguration cacheConfiguration(String name, String grp) {
    +        CacheConfiguration cfg = super.cacheConfiguration(name, grp);
    +
    +        return cfg.setAffinity(new RendezvousAffinityFunction(false, 8)).setBackups(backups);
    +    }
    +
    +    /** {@inheritDoc} */
    +    @Override protected void afterTest() throws Exception {
    +        stopAllGrids();
    +
    +        cleanPersistenceDir();
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    @SuppressWarnings("ThrowableNotThrown")
    +    public void testRejectNodeJoinDuringRotation() throws Exception {
    +        T2 grids = startTestGrids(true);
    +
    +        createEncryptedCache(grids.get1(), grids.get2(), cacheName(), null);
    +
    +        int grpId = CU.cacheId(cacheName());
    +
    +        assertEquals(0, grids.get1().context().encryption().groupKey(grpId).id());
    +
    +        TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(grids.get2());
    +
    +        commSpi.blockMessages((node, msg) -> msg instanceof SingleNodeMessage);
    +
    +        IgniteFuture fut = grids.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName()));
    +
    +        commSpi.waitForBlocked();
    +
    +        assertThrowsWithCause(() -> startGrid(3), IgniteCheckedException.class);
    +
    +        commSpi.stopBlock();
    +
    +        fut.get();
    +
    +        checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +
    +        checkEncryptedCaches(grids.get1(), grids.get2());
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    public void testNotAllBltNodesPresent() throws Exception {
    +        startTestGrids(true);
    +
    +        createEncryptedCache(grid(GRID_0), grid(GRID_1), cacheName(), null);
    +
    +        stopGrid(GRID_1);
    +
    +        grid(GRID_0).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get();
    +
    +        startGrid(GRID_1);
    +
    +        checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    public void testNodeFailsBeforePrepare() throws Exception {
    +        checkNodeFailsDuringRotation(false, true, true);
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    public void testNodeFailsBeforePerform() throws Exception {
    +        checkNodeFailsDuringRotation(false, false, true);
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    public void testNodeFailsAfterPrepare() throws Exception {
    +        checkNodeFailsDuringRotation(false, true, false);
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    public void testCrdFailsAfterPrepare() throws Exception {
    +        checkNodeFailsDuringRotation(true, true, false);
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    public void testNodeFailsAfterPerform() throws Exception {
    +        checkNodeFailsDuringRotation(false, false, false);
    +    }
    +
    +    /** @throws Exception If failed. */
    +    @Test
    +    public void testCrdFailsAfterPerform() throws Exception {
    +        checkNodeFailsDuringRotation(true, false, false);
    +    }
    +
    +    /**
    +     * @param stopCrd {@code True} to stop coordinator.
    +     * @param prepare {@code True} to stop on the prepare phase. {@code False} to stop on the perform phase.
    +     * @param discoBlock  {@code True} to block discovery, {@code False} to block communication SPI.
    +     */
    +    private void checkNodeFailsDuringRotation(boolean stopCrd, boolean prepare, boolean discoBlock) throws Exception {
    +        cleanPersistenceDir();
    +
    +        DistributedProcessType type = prepare ?
    +            DistributedProcessType.CACHE_GROUP_KEY_CHANGE_PREPARE : DistributedProcessType.CACHE_GROUP_KEY_CHANGE_FINISH;
    +
    +        InitMessageDiscoveryHook locHook = new InitMessageDiscoveryHook(type);
    +
    +        if (discoBlock && stopCrd)
    +            discoveryHook = locHook;
    +
    +        IgniteEx grid0 = startGrid(GRID_0);
    +
    +        if (discoBlock && !stopCrd)
    +            discoveryHook = locHook;
    +
    +        IgniteEx grid1 = startGrid(GRID_1);
    +
    +        grid0.cluster().state(ClusterState.ACTIVE);
    +
    +        createEncryptedCache(grid0, grid1, cacheName(), null);
    +
    +        int grpId = CU.cacheId(cacheName());
    +
    +        checkGroupKey(grpId, INITIAL_KEY_ID, MAX_AWAIT_MILLIS);
    +
    +        TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(grid1);
    +
    +        if (!discoBlock) {
    +            AtomicBoolean preparePhase = new AtomicBoolean(true);
    +
    +            spi.blockMessages((node, msg) -> {
    +                if (msg instanceof SingleNodeMessage) {
    +                    boolean isPrepare = preparePhase.compareAndSet(true, false);
    +
    +                    return prepare || !isPrepare;
    +                }
    +
    +                return false;
    +            });
    +        }
    +
    +        String alive = stopCrd ? GRID_1 : GRID_0;
    +        String stopped = stopCrd ? GRID_0 : GRID_1;
    +
    +        IgniteFuture changeFut = grid(alive).encryption().changeCacheGroupKey(Collections.singleton(cacheName()));
    +
    +        IgniteInternalFuture stopFut = new GridFinishedFuture<>();
    +
    +        if (discoBlock) {
    +            locHook.waitForBlocked(MAX_AWAIT_MILLIS);
    +
    +            stopGrid(stopped, true);
    +
    +            locHook.stopBlock();
    +        }
    +        else {
    +            spi.waitForBlocked();
    +
    +            stopFut = runAsync(() -> stopGrid(stopped, true));
    +        }
    +
    +        changeFut.get(MAX_AWAIT_MILLIS);
    +        stopFut.get(MAX_AWAIT_MILLIS);
    +
    +        checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +
    +        IgniteEx stoppedNode = startGrid(stopped);
    +
    +        stoppedNode.resetLostPartitions(Collections.singleton(ENCRYPTED_CACHE));
    +
    +        awaitPartitionMapExchange();
    +
    +        checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +
    +        stoppedNode.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS);
    +
    +        checkGroupKey(grpId, INITIAL_KEY_ID + 2, MAX_AWAIT_MILLIS);
    +    }
    +
    +    /**
    +     * Ensures that we can rotate the key more than 255 times.
    +     *
    +     * @throws Exception If failed.
    +     */
    +    @Test
    +    public void testKeyIdentifierOverflow() throws Exception {
    +        IgniteEx node = startTestGrids(true).get1();
    +
    +        createEncryptedCache(node, null, cacheName(), null, false);
    +
    +        int grpId = CU.cacheId(cacheName());
    +
    +        byte keyId = INITIAL_KEY_ID;
    +
    +        do {
    +            node.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get();
    +
    +            // Validates reencryption of index partition.
    +            checkGroupKey(grpId, ++keyId & 0xff, MAX_AWAIT_MILLIS);
    +        } while (keyId != INITIAL_KEY_ID);
    +    }
    +
    +    /**
    +     * @throws Exception If failed.
    +     */
    +    @Test
    +    public void testMasterAndCacheGroupKeySimultaneousChange() throws Exception {
    +        startTestGrids(true);
    +
    +        IgniteEx node0 = grid(GRID_0);
    +        IgniteEx node1 = grid(GRID_1);
    +
    +        createEncryptedCache(node0, node1, cacheName(), null);
    +
    +        int grpId = CU.cacheId(cacheName());
    +
    +        assertTrue(checkMasterKeyName(DEFAULT_MASTER_KEY_NAME));
    +
    +        Random rnd = ThreadLocalRandom.current();
    +
    +        for (byte keyId = 1; keyId < 50; keyId++) {
    +            String currMkName = node0.context().config().getEncryptionSpi().getMasterKeyName();
    +            String newMkName = currMkName.equals(MASTER_KEY_NAME_2) ? MASTER_KEY_NAME_3 : MASTER_KEY_NAME_2;
    +
    +            boolean changeGrpFirst = rnd.nextBoolean();
    +
    +            IgniteFuture grpKeyFut;
    +            IgniteFuture masterKeyFut;
    +
    +            if (changeGrpFirst) {
    +                grpKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName()));
    +                masterKeyFut = node0.encryption().changeMasterKey(newMkName);
    +            }
    +            else {
    +                masterKeyFut = node0.encryption().changeMasterKey(newMkName);
    +                grpKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName()));
    +            }
    +
    +            masterKeyFut.get(MAX_AWAIT_MILLIS);
    +            assertTrue(checkMasterKeyName(newMkName));
    +
    +            try {
    +                grpKeyFut.get(MAX_AWAIT_MILLIS);
    +                checkGroupKey(grpId, keyId, MAX_AWAIT_MILLIS);
    +            } catch (IgniteException e) {
    +                assertTrue(e.getMessage().contains("Cache group key change was rejected. Master key has been changed."));
    +
    +                // Retry iteration.
    +                keyId -= 1;
    +            }
    +        }
    +    }
    +
    +    /**
    +     * @throws Exception If failed.
    +     */
    +    @Test
    +    public void testCacheStartDuringRotation() throws Exception {
    +        T2 grids = startTestGrids(true);
    +
    +        createEncryptedCache(grids.get1(), grids.get2(), cacheName(), null);
    +
    +        TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(grids.get2());
    +
    +        commSpi.blockMessages((node, msg) -> msg instanceof SingleNodeMessage);
    +
    +        IgniteFuture fut = grids.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName()));
    +
    +        commSpi.waitForBlocked();
    +
    +        IgniteCache cache = grids.get1().createCache(cacheConfiguration("cache1", null));
    +
    +        for (int i = 0; i < 100; i++)
    +            cache.put(i, i);
    +
    +        commSpi.stopBlock();
    +
    +        fut.get();
    +
    +        checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +
    +        checkGroupKey(CU.cacheId("cache1"), INITIAL_KEY_ID, MAX_AWAIT_MILLIS);
    +    }
    +
    +    /**
    +     * @throws Exception If failed.
    +     */
    +    @Test
    +    public void testCacheStartSameGroupDuringRotation() throws Exception {
    +        T2 grids = startTestGrids(true);
    +
    +        String grpName = "shared";
    +
    +        createEncryptedCache(grids.get1(), grids.get2(), cacheName(), grpName);
    +
    +        TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(grids.get2());
    +
    +        commSpi.blockMessages((node, msg) -> msg instanceof SingleNodeMessage);
    +
    +        IgniteFuture fut = grids.get1().encryption().changeCacheGroupKey(Collections.singleton(grpName));
    +
    +        commSpi.waitForBlocked();
    +
    +        IgniteCache cache =
    +            grids.get1().createCache(cacheConfiguration("cache1", grpName));
    +
    +        commSpi.stopBlock();
    +
    +        for (int i = 0; i < 100; i++)
    +            cache.put(i, i);
    +
    +        fut.get();
    +
    +        checkGroupKey(CU.cacheId(grpName), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +    }
    +
    +    /**
    +     * @throws Exception If failed.
    +     */
    +    @Test
    +    public void testChangeKeyDuringRebalancing() throws Exception {
    +        T2 grids = startTestGrids(true);
    +
    +        IgniteEx node0 = grids.get1();
    +        IgniteEx node1 = grids.get2();
    +
    +        createEncryptedCache(node0, node1, cacheName(), null);
    +
    +        loadData(500_000);
    +
    +        IgniteEx node2 = startGrid(GRID_2);
    +
    +        resetBaselineTopology();
    +
    +        int grpId = CU.cacheId(cacheName());
    +
    +        node2.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS);
    +
    +        awaitPartitionMapExchange();
    +
    +        checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +    }
    +
    +    /**
    +     * @throws Exception If failed.
    +     */
    +    @Test
    +    public void testNodeWithOlderKeyBecameCoordinator() throws Exception {
    +        backups = 1;
    +
    +        startTestGrids(true);
    +
    +        IgniteEx node0 = grid(GRID_0);
    +        IgniteEx node1 = grid(GRID_1);
    +
    +        createEncryptedCache(node0, node1, cacheName(), null);
    +
    +        int grpId = CU.cacheId(cacheName());
    +
    +        stopGrid(GRID_0);
    +
    +        // Changing encryption key on one node.
    +        node1.context().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS);
    +        checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +
    +        stopGrid(GRID_1);
    +
    +        // The node with only the old key ID has become the coordinator.
    +        node0 = startGrid(GRID_0);
    +        assertTrue(Collections.singleton(INITIAL_KEY_ID).containsAll(node0.context().encryption().groupKeyIds(grpId)));
    +
    +        node1 = startGrid(GRID_1);
    +        node1.cluster().state(ClusterState.ACTIVE);
    +
    +        // Wait until cache will be reencrypted with the old key.
    +        checkGroupKey(grpId, INITIAL_KEY_ID, MAX_AWAIT_MILLIS);
    +
    +        GridEncryptionManager encrMgr0 = node0.context().encryption();
    +        GridEncryptionManager encrMgr1 = node1.context().encryption();
    +
    +        // Changing the encryption key is not possible until the WAL segment,
    +        // encrypted (probably) with the previous key, is deleted.
    +        assertThrowsAnyCause(log,
    +            () -> encrMgr1.changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS),
    +            IgniteException.class,
    +            "Cache group key change was rejected. Cannot add new key identifier, it's already present.");
    +
    +        long walIdx = node1.context().cache().context().wal().currentSegment();
    +
    +        // Simulate WAL segment deletion.
    +        for (long n = 0; n <= walIdx; n++)
    +            node1.context().encryption().onWalSegmentRemoved(walIdx);
    +
    +        encrMgr1.changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS);
    +        checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
    +        checkEncryptedCaches(node0, node1);
    +
    +        walIdx = Math.max(node0.context().cache().context().wal().currentSegment(),
    +            node1.context().cache().context().wal().currentSegment());
    +
    +        // Simulate WAL segment deletion.
    +        for (long n = 0; n <= walIdx; n++) {
    +            encrMgr0.onWalSegmentRemoved(walIdx);
    +            encrMgr1.onWalSegmentRemoved(walIdx);
    +        }
    +
    +        // Make sure the previous key has been removed.
    +        assertEquals(1, encrMgr0.groupKeyIds(grpId).size());
    +        assertEquals(encrMgr1.groupKeyIds(grpId), encrMgr0.groupKeyIds(grpId));
    +    }
    +
    +    /**
    +     * Ensures that a node cannot join the cluster if it cannot replace an existing encryption key.
    +     * 

    + * If the joining node has a different encryption key than the coordinator, but with the same identifier, it should + * not perform key rotation to a new key (recevied from coordinator) until the previous key is deleted. + * + * @throws Exception If failed. + */ + @Test + public void testNodeJoinRejectedIfKeyCannotBeReplaced() throws Exception { + backups = 2; + + T2 nodes = startTestGrids(true); + + startGrid(GRID_2); + + resetBaselineTopology(); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + forceCheckpoint(); + + stopGrid(GRID_0); + stopGrid(GRID_1); + + grid(GRID_2).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + grid(GRID_2).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + checkGroupKey(grpId, INITIAL_KEY_ID + 2, MAX_AWAIT_MILLIS); + + stopGrid(GRID_2); + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID, MAX_AWAIT_MILLIS); + + grid(GRID_0).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + assertThrowsAnyCause(log, + () -> startGrid(GRID_2), + IgniteSpiException.class, + "Cache key differs! Node join is rejected."); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testKeyChangeWithNodeFilter() throws Exception { + startTestGrids(true); + + IgniteEx node0 = grid(GRID_0); + IgniteEx node1 = grid(GRID_1); + + Object nodeId0 = node0.localNode().consistentId(); + Object nodeId1 = node1.localNode().consistentId(); + + String cache1 = cacheName(); + String cache2 = "cache2"; + + node0.createCache(cacheConfiguration(cache1, null) + .setNodeFilter(node -> !node.consistentId().equals(nodeId0))); + + node0.createCache(cacheConfiguration(cache2, null) + .setNodeFilter(node -> !node.consistentId().equals(nodeId1))); + + loadData(10_000); + + forceCheckpoint(); + + int grpId1 = CU.cacheId(cache1); + int grpId2 = CU.cacheId(cache2); + + node0.encryption().changeCacheGroupKey(Arrays.asList(cache1, cache2)).get(); + + List keys0 = node0.context().encryption().groupKeyIds(grpId1); + List keys1 = node1.context().encryption().groupKeyIds(grpId1); + + assertEquals(2, keys0.size()); + assertEquals(2, keys1.size()); + + assertTrue(keys0.containsAll(keys1)); + + keys0 = node0.context().encryption().groupKeyIds(grpId2); + keys1 = node1.context().encryption().groupKeyIds(grpId2); + + assertEquals(2, keys0.size()); + assertEquals(2, keys1.size()); + + assertTrue(keys0.containsAll(keys1)); + + checkGroupKey(grpId1, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkGroupKey(grpId2, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + stopAllGrids(); + + startTestGrids(false); + + node0 = grid(GRID_0); + node1 = grid(GRID_1); + + IgniteCache allNodesCache = node0.createCache("cacheX"); + + // Previous keys must be deleted when the corresponding WAL segment is deleted, so we adding data on all nodes. + long endTime = U.currentTimeMillis() + 30_000; + int cntr = 0; + + do { + allNodesCache.put(cntr, String.valueOf(cntr)); + + if (node0.context().encryption().groupKeyIds(grpId1).size() == 1 && + node1.context().encryption().groupKeyIds(grpId1).size() == 1 && + node0.context().encryption().groupKeyIds(grpId2).size() == 1 && + node1.context().encryption().groupKeyIds(grpId2).size() == 1) + break; + + ++cntr; + } while (U.currentTimeMillis() < endTime); + + assertEquals(1, node0.context().encryption().groupKeyIds(grpId1).size()); + assertEquals(1, node0.context().encryption().groupKeyIds(grpId2).size()); + + assertEquals(node0.context().encryption().groupKeyIds(grpId1), node1.context().encryption().groupKeyIds(grpId1)); + assertEquals(node0.context().encryption().groupKeyIds(grpId2), node1.context().encryption().groupKeyIds(grpId2)); + + checkGroupKey(grpId1, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkGroupKey(grpId2, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + checkEncryptedCaches(node0, node1); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testBasicChangeWithConstantLoad() throws Exception { + walSegments = 20; + + startTestGrids(true); + + IgniteEx node0 = grid(GRID_0); + IgniteEx node1 = grid(GRID_1); + + GridEncryptionManager encrMgr0 = node0.context().encryption(); + GridEncryptionManager encrMgr1 = node1.context().encryption(); + + createEncryptedCache(node0, node1, cacheName(), null); + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + IgniteInternalFuture loadFut = loadDataAsync(node0); + + try { + IgniteCache cache = node0.cache(cacheName()); + + boolean success = waitForCondition(() -> cache.size() > 2000, MAX_AWAIT_MILLIS); + assertTrue(success); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + waitForCondition(() -> + encrMgr0.groupKeyIds(grpId).size() == 1 && encrMgr1.groupKeyIds(grpId).size() == 1, MAX_AWAIT_MILLIS); + } finally { + loadFut.cancel(); + } + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + assertEquals(node0.cluster().localNode().id().toString(), 1, encrMgr0.groupKeyIds(grpId).size()); + assertEquals(node1.cluster().localNode().id().toString(), 1, encrMgr1.groupKeyIds(grpId).size()); + } + + /** + * Ensures that unused key will be removed even if user cleaned wal archive folder manually. + * + * @throws Exception If failed. + */ + @Test + public void testWalArchiveCleanup() throws Exception { + cleanPersistenceDir(); + + IgniteEx node = startGrid(GRID_0); + + node.cluster().state(ClusterState.ACTIVE); + + createEncryptedCache(node, null, cacheName(), null); + + node.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + IgniteWriteAheadLogManager walMgr = node.context().cache().context().wal(); + + long reservedIdx = walMgr.currentSegment(); + + boolean reserved = walMgr.reserve(new WALPointer(reservedIdx, 0, 0)); + assertTrue(reserved); + + IgniteInternalFuture loadFut = loadDataAsync(node); + + // Wait until the reserved segment is moved to the archive. + try { + boolean success = waitForCondition(() -> walMgr.lastArchivedSegment() >= reservedIdx, MAX_AWAIT_MILLIS); + assertTrue(success); + } finally { + loadFut.cancel(); + } + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + assertEquals(2, node.context().encryption().groupKeyIds(grpId).size()); + + stopAllGrids(); + + // Cleanup WAL arcive folder. + File dbDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false); + + boolean rmvd = U.delete(new File(dbDir, "wal/archive")); + + assertTrue(rmvd); + + node = startGrid(GRID_0); + + node.cluster().state(ClusterState.ACTIVE); + + loadFut = loadDataAsync(node); + + // Make sure that unused encryption key has been deleted. + try { + GridEncryptionManager encryptMgr = node.context().encryption(); + + boolean success = waitForCondition(() -> encryptMgr.groupKeyIds(grpId).size() == 1, MAX_AWAIT_MILLIS); + assertTrue(success); + } finally { + loadFut.cancel(); + } + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @param grid Grid. + * @return Future for this operation. + */ + private IgniteInternalFuture loadDataAsync(Ignite grid) { + return runAsync(() -> { + long cntr = grid.cache(cacheName()).size(); + + try (IgniteDataStreamer streamer = grid.dataStreamer(cacheName())) { + while (!Thread.currentThread().isInterrupted()) { + streamer.addData(cntr, String.valueOf(cntr)); + + ++cntr; + } + } + }); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testCacheStartOnClientDuringRotation() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + IgniteEx client = startClientGrid(getConfiguration("client")); + + node0.cluster().state(ClusterState.ACTIVE); + + String grpName = "shared"; + + createEncryptedCache(client, null, cacheName(), grpName); + + awaitPartitionMapExchange(); + + TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(node1); + + commSpi.blockMessages((node, message) -> message instanceof SingleNodeMessage); + + IgniteFuture changeKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(grpName)); + + commSpi.waitForBlocked(); + + String cacheName = "userCache"; + + IgniteInternalFuture cacheStartFut = runAsync(() -> { + client.getOrCreateCache(cacheConfiguration(cacheName, grpName)); + }); + + commSpi.stopBlock(); + + changeKeyFut.get(MAX_AWAIT_MILLIS); + cacheStartFut.get(MAX_AWAIT_MILLIS); + + IgniteCache cache = client.cache(cacheName); + + for (int i = 0; i < 200; i++) + cache.put(i, String.valueOf(i)); + + checkEncryptedCaches(node0, client); + + checkGroupKey(CU.cacheId(grpName), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + checkEncryptedCaches(node0, node1); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testClientJoinDuringRotation() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + node0.cluster().state(ClusterState.ACTIVE); + + createEncryptedCache(node0, node1, cacheName(), null); + + awaitPartitionMapExchange(); + + TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(node1); + + commSpi.blockMessages((node, message) -> message instanceof SingleNodeMessage); + + IgniteFuture changeKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())); + + commSpi.waitForBlocked(); + + IgniteEx client = startClientGrid(getConfiguration("client")); + + assertTrue(!changeKeyFut.isDone()); + + commSpi.stopBlock(); + + changeKeyFut.get(MAX_AWAIT_MILLIS); + + checkEncryptedCaches(node0, client); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Ensures that node can join after rotation of encryption key. + * + * @throws Exception If failed. + */ + @Test + public void testNodeJoinAfterRotation() throws Exception { + backups = 1; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + forceCheckpoint(); + + stopGrid(GRID_1); + resetBaselineTopology(); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + startGrid(GRID_1); + resetBaselineTopology(); + awaitPartitionMapExchange(); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkEncryptedCaches(grid(GRID_0), grid(GRID_1)); + + GridEncryptionManager encrMgr0 = grid(GRID_0).context().encryption(); + GridEncryptionManager encrMgr1 = grid(GRID_1).context().encryption(); + + long maxWalIdx = Math.max(nodes.get1().context().cache().context().wal().currentSegment(), + nodes.get2().context().cache().context().wal().currentSegment()); + + for (long idx = 0; idx <= maxWalIdx; idx++) { + encrMgr0.onWalSegmentRemoved(maxWalIdx); + encrMgr1.onWalSegmentRemoved(maxWalIdx); + } + + assertEquals(1, encrMgr1.groupKeyIds(grpId).size()); + assertEquals(encrMgr0.groupKeyIds(grpId), encrMgr1.groupKeyIds(grpId)); + + startGrid(GRID_2); + + resetBaselineTopology(); + awaitPartitionMapExchange(); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkEncryptedCaches(grid(GRID_2), nodes.get1()); + + assertEquals(encrMgr0.groupKeyIds(grpId), grid(GRID_2).context().encryption().groupKeyIds(grpId)); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testWrongCacheGroupSpecified() throws Exception { + T2 grids = startTestGrids(true); + + IgniteEx node0 = grids.get1(); + IgniteEx node1 = grids.get2(); + + assertThrowsAnyCause(log, + () -> node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS), + IgniteException.class, + "Cache group key change was rejected. Cache or group \"" + cacheName() + "\" doesn't exists"); + + node0.createCache(new CacheConfiguration<>(cacheName()).setNodeFilter(node -> node.equals(node0.localNode()))); + + assertThrowsAnyCause(log, + () -> node1.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS), + IgniteException.class, + "Cache group key change was rejected. Cache or group \"" + cacheName() + "\" is not encrypted."); + + node0.destroyCache(cacheName()); + + awaitPartitionMapExchange(); + + String grpName = "cacheGroup1"; + + createEncryptedCache(node0, node1, cacheName(), grpName); + + assertThrowsAnyCause(log, + () -> node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS), + IgniteException.class, + "Cache group key change was rejected. Cache or group \"" + cacheName() + "\" is a part of group \"" + + grpName + "\". Provide group name instead of cache name for shared groups."); + } + + /** @throws Exception If failed. */ + @Test + public void testChangeCacheGroupKeyWithoutWAL() throws Exception { + walMode = NONE; + T2 grids = startTestGrids(true); + + createEncryptedCache(grids.get1(), grids.get2(), cacheName(), null); + + IgniteEx node0 = grids.get1(); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + assertEquals(1, node0.context().encryption().groupKeyIds(grpId).size()); + assertEquals(1, grids.get2().context().encryption().groupKeyIds(grpId).size()); + } + + /** + * Custom discovery hook to block distributed process. + */ + private static class InitMessageDiscoveryHook extends DiscoveryHook { + /** + * Latch to sync execution. + */ + private final CountDownLatch unlockLatch = new CountDownLatch(1); + + /** + * Latch to sync execution. + */ + private final CountDownLatch blockedLatch = new CountDownLatch(1); + + /** + * Distributed process type. + */ + private final DistributedProcessType type; + + /** + * @param type Distributed process type. + */ + private InitMessageDiscoveryHook(DistributedProcessType type) { + this.type = type; + } + + /** {@inheritDoc} */ + @Override public void beforeDiscovery(DiscoveryCustomMessage customMsg) { + if (!(customMsg instanceof InitMessage)) + return; + + InitMessage msg = (InitMessage)customMsg; + + if (msg.type() != type.ordinal()) + return; + + try { + blockedLatch.countDown(); + + unlockLatch.await(MAX_AWAIT_MILLIS, TimeUnit.MILLISECONDS); + } + catch (InterruptedException ignore) { + Thread.currentThread().interrupt(); + } + } + + /** + * @param timeout Timeout in milliseconds. + * @throws InterruptedException If interrupted. + */ + public void waitForBlocked(long timeout) throws InterruptedException { + blockedLatch.await(timeout, TimeUnit.MILLISECONDS); + } + + /** */ + public void stopBlock() { + unlockLatch.countDown(); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java new file mode 100644 index 00000000000000..19c8351184175c --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java @@ -0,0 +1,871 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.encryption; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.OpenOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.EncryptionConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.events.EventType; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteFutureCancelledCheckedException; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.cache.CacheGroupMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIODecorator; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; +import org.apache.ignite.internal.processors.metric.MetricRegistry; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.spi.metric.BooleanMetric; +import org.apache.ignite.spi.metric.LongMetric; +import org.apache.ignite.testframework.GridTestUtils; +import org.junit.Test; + +import static org.apache.ignite.configuration.EncryptionConfiguration.DFLT_REENCRYPTION_RATE_MBPS; +import static org.apache.ignite.configuration.WALMode.LOG_ONLY; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.INITIAL_KEY_ID; +import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause; + +/** + * Cache re-encryption tests. + */ +public class CacheGroupReencryptionTest extends AbstractEncryptionTest { + /** */ + private static final String GRID_2 = "grid-2"; + + /** */ + private static final String GRID_3 = "grid-3"; + + /** Timeout. */ + private static final long MAX_AWAIT_MILLIS = 15_000; + + /** File IO fail flag. */ + private final AtomicBoolean failFileIO = new AtomicBoolean(); + + /** Count of cache backups. */ + private int backups; + + /** Re-encryption rate limit. */ + private double pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + /** The number of pages that is scanned during re-encryption under checkpoint lock. */ + private int pageScanBatchSize = EncryptionConfiguration.DFLT_REENCRYPTION_BATCH_SIZE; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String name) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(name); + + cfg.setConsistentId(name); + + cfg.setIncludeEventTypes(EventType.EVT_CACHE_REBALANCE_STOPPED); + + EncryptionConfiguration encCfg = new EncryptionConfiguration() + .setReencryptionBatchSize(pageScanBatchSize) + .setReencryptionRateLimit(pageScanRate); + + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setMaxSize(1024 * 1024 * 1024L) + .setPersistenceEnabled(true)) + .setPageSize(4 * 1024) + .setWalSegmentSize(10 * 1024 * 1024) + .setWalSegments(4) + .setMaxWalArchiveSize(100 * 1024 * 1024L) + .setCheckpointFrequency(30 * 1000L) + .setWalMode(LOG_ONLY) + .setFileIOFactory(new FailingFileIOFactory(new RandomAccessFileIOFactory(), failFileIO)) + .setEncryptionConfiguration(encCfg); + + cfg.setDataStorageConfiguration(memCfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected CacheConfiguration cacheConfiguration(String name, String grp) { + CacheConfiguration cfg = super.cacheConfiguration(name, grp); + + cfg.setIndexedTypes(Long.class, IndexedObject.class); + + return cfg.setAffinity(new RendezvousAffinityFunction(false, 16)).setBackups(backups); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected Object generateValue(long id) { + return new IndexedObject(id, "string-" + id); + } + + /** + * Check physical recovery after checkpoint failure during re-encryption. + * + * @throws Exception If failed. + */ + @Test + public void testPhysicalRecovery() throws Exception { + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + IgniteInternalFuture fut = GridTestUtils.runAsync(() -> loadData(50_000)); + + forceCheckpoint(); + + enableCheckpoints(nodes.get1(), false); + enableCheckpoints(nodes.get2(), false); + + int grpId = CU.cacheId(cacheName()); + + failFileIO.set(true); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + fut.get(); + + assertThrowsAnyCause(log, () -> { + enableCheckpoints(grid(GRID_0), true); + enableCheckpoints(grid(GRID_1), true); + + forceCheckpoint(); + + return null; + }, IgniteCheckedException.class, null); + + stopAllGrids(true); + + failFileIO.set(false); + + nodes = startTestGrids(false); + + checkEncryptedCaches(nodes.get1(), nodes.get2()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testPhysicalRecoveryWithUpdates() throws Exception { + pageScanRate = 1.5; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + IgniteInternalFuture addFut = GridTestUtils.runAsync(() -> loadData(100_000)); + + IgniteInternalFuture updateFut = GridTestUtils.runAsync(() -> { + IgniteCache cache = grid(GRID_0).cache(cacheName()); + + while (!Thread.currentThread().isInterrupted()) { + for (long i = 50_000; i > 20_000; i--) { + String val = cache.get(i); + + cache.put(i, val); + } + } + }); + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + forceCheckpoint(); + + failFileIO.set(true); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + addFut.get(); + updateFut.cancel(); + + assertThrowsAnyCause(log, () -> { + forceCheckpoint(); + + return null; + }, IgniteCheckedException.class, null); + + stopAllGrids(true); + + failFileIO.set(false); + + nodes = startTestGrids(false); + + checkEncryptedCaches(nodes.get1(), nodes.get2()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Ensures that re-encryption continues after a restart. + * + * @throws Exception If failed. + */ + @Test + public void testLogicalRecovery() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null, true); + + loadData(100_000); + + forceCheckpoint(); + + enableCheckpoints(G.allGrids(), false); + + int grpId = CU.cacheId(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + assertEquals(1, node0.context().encryption().groupKey(grpId).id()); + assertEquals(1, node1.context().encryption().groupKey(grpId).id()); + + stopAllGrids(); + + info(">>> Start grids (iteration 1)"); + + startTestGrids(false); + + enableCheckpoints(G.allGrids(), false); + + stopAllGrids(); + + info(">>> Start grids (iteration 2)"); + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testCacheStopDuringReencryption() throws Exception { + pageScanRate = 1; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + loadData(100_000); + + IgniteCache cache = node0.cache(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + int grpId = CU.cacheId(cacheName()); + + IgniteInternalFuture fut0 = node0.context().encryption().reencryptionFuture(grpId); + + assertFalse(fut0.isDone()); + + assertTrue(isReencryptionInProgress(node0, grpId)); + + cache.destroy(); + + assertThrowsAnyCause(log, () -> { + fut0.get(); + + return null; + }, IgniteFutureCancelledCheckedException.class, null); + + awaitPartitionMapExchange(); + + assertNull(node0.context().encryption().groupKeyIds(grpId)); + assertNull(node1.context().encryption().groupKeyIds(grpId)); + } + + /** @throws Exception If failed. */ + @Test + public void testPartitionEvictionDuringReencryption() throws Exception { + backups = 1; + pageScanRate = 1; + + CountDownLatch rebalanceFinished = new CountDownLatch(1); + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + loadData(100_000); + + IgniteEx node2 = startGrid(GRID_2); + + node2.events().localListen(evt -> { + rebalanceFinished.countDown(); + + return true; + }, EventType.EVT_CACHE_REBALANCE_STOPPED); + + resetBaselineTopology(); + + rebalanceFinished.await(); + + stopGrid(GRID_2); + + resetBaselineTopology(); + + int grpId = CU.cacheId(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + stopAllGrids(); + + pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Test that partition files are reused correctly. + * + * @throws Exception If failed. + */ + @Test + public void testPartitionFileDestroy() throws Exception { + backups = 1; + pageScanRate = 0.2; + pageScanBatchSize = 10; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + forceCheckpoint(); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + startGrid(GRID_2); + + // Trigger partitions eviction. + resetBaselineTopology(); + + awaitPartitionMapExchange(true, true, null); + + forceCheckpoint(); + + assertTrue(isReencryptionInProgress(Collections.singleton(cacheName()))); + + // Set unlimited re-encryption rate. + nodes.get1().context().encryption().setReencryptionRate(0); + nodes.get2().context().encryption().setReencryptionRate(0); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Test that partition files are reused correctly. + * + * @throws Exception If failed. + */ + @Test + public void testPartitionFileDestroyAndRecreate() throws Exception { + backups = 1; + pageScanRate = 1; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + grid(GRID_0).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + long walSegment = nodes.get1().context().cache().context().wal().currentSegment(); + + for (long n = 0; n <= walSegment; n++) + nodes.get1().context().encryption().onWalSegmentRemoved(n); + + walSegment = nodes.get2().context().cache().context().wal().currentSegment(); + + for (long n = 0; n <= walSegment; n++) + nodes.get2().context().encryption().onWalSegmentRemoved(n); + + // Force checkpoint to prevent logical recovery after key rotation. + forceCheckpoint(); + + startGrid(GRID_2); + + // Trigger partitions eviction. + resetBaselineTopology(); + + awaitPartitionMapExchange(true, true, null); + + // Trigger partitions re-create. + stopGrid(GRID_2); + + resetBaselineTopology(); + + awaitPartitionMapExchange(true, true, null); + + stopAllGrids(); + + nodes = startTestGrids(false); + + checkEncryptedCaches(nodes.get1(), nodes.get2()); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testNotBltNodeJoin() throws Exception { + backups = 1; + pageScanRate = 1; + pageScanBatchSize = 10; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + forceCheckpoint(); + + long startIdx1 = nodes.get1().context().cache().context().wal().currentSegment(); + long startIdx2 = nodes.get2().context().cache().context().wal().currentSegment(); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + long endIdx1 = nodes.get1().context().cache().context().wal().currentSegment(); + long endIdx2 = nodes.get2().context().cache().context().wal().currentSegment(); + + stopGrid(GRID_1); + + resetBaselineTopology(); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + startGrid(GRID_1); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + assertEquals(2, grid(GRID_0).context().encryption().groupKeyIds(grpId).size()); + assertEquals(2, grid(GRID_1).context().encryption().groupKeyIds(grpId).size()); + + // Simulate that wal was removed. + for (long segment = startIdx1; segment <= endIdx1; segment++) + grid(GRID_0).context().encryption().onWalSegmentRemoved(segment); + + assertEquals(1, grid(GRID_0).context().encryption().groupKeyIds(grpId).size()); + + for (long segment = startIdx2; segment <= endIdx2; segment++) + grid(GRID_1).context().encryption().onWalSegmentRemoved(segment); + + assertEquals(1, grid(GRID_1).context().encryption().groupKeyIds(grpId).size()); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testReencryptionStartsAfterNodeRestart() throws Exception { + pageScanRate = 0.000000001; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + forceCheckpoint(); + + stopAllGrids(); + + nodes = startTestGrids(false); + + node0 = nodes.get1(); + node1 = nodes.get2(); + + assertTrue(isReencryptionInProgress(node0, grpId)); + assertTrue(isReencryptionInProgress(node1, grpId)); + + stopAllGrids(); + + pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testReencryptionOnUnstableTopology() throws Exception { + backups = 1; + pageScanRate = 2; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + startGrid(GRID_2); + startGrid(GRID_3); + + resetBaselineTopology(); + + createEncryptedCache(node0, node1, cacheName(), null); + + String cache2 = "encrypted-2"; + + createEncryptedCache(node0, node1, cache2, null); + + loadData(cacheName(), 100_000); + loadData(cache2, 100_000); + + List cacheGroups = Arrays.asList(cacheName(), cache2); + + node0.encryption().changeCacheGroupKey(cacheGroups).get(); + + while (isReencryptionInProgress(cacheGroups)) { + int rndNode = ThreadLocalRandom.current().nextInt(3); + + String gridName = "grid-" + rndNode; + + stopGrid(gridName); + + startGrid(gridName); + } + + stopAllGrids(); + + startGrid(GRID_0); + startGrid(GRID_1); + startGrid(GRID_2); + startGrid(GRID_3); + + grid(GRID_0).cluster().state(ClusterState.ACTIVE); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkGroupKey(CU.cacheId(cache2), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testChangeBaseline() throws Exception { + backups = 1; + pageScanRate = 2; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + loadData(100_000); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + assertTrue(isReencryptionInProgress(Collections.singleton(cacheName()))); + + startGrid(GRID_2); + + resetBaselineTopology(); + + startGrid(GRID_3); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + stopGrid(GRID_2); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 2, MAX_AWAIT_MILLIS); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + startGrid(GRID_2); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 3, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testKeyCleanup() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + forceCheckpoint(); + + enableCheckpoints(G.allGrids(), false); + + int grpId = CU.cacheId(cacheName()); + + long startIdx = node1.context().cache().context().wal().currentSegment(); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + long endIdx = node1.context().cache().context().wal().currentSegment(); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + // Simulate that wal was removed. + for (long segment = startIdx; segment <= endIdx; segment++) + node1.context().encryption().onWalSegmentRemoved(segment); + + stopGrid(GRID_1); + + node1 = startGrid(GRID_1); + + enableCheckpoints(G.allGrids(), true); + + node1.cluster().state(ClusterState.ACTIVE); + + node1.resetLostPartitions(Collections.singleton(ENCRYPTED_CACHE)); + + checkEncryptedCaches(node0, node1); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testReencryptionMetrics() throws Exception { + pageScanRate = 0.000000001; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + validateMetrics(node0, false); + validateMetrics(node1, false); + + pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + stopAllGrids(); + + nodes = startTestGrids(false); + + node0 = nodes.get1(); + node1 = nodes.get2(); + + awaitEncryption(G.allGrids(), CU.cacheId(cacheName()), MAX_AWAIT_MILLIS); + + forceCheckpoint(); + + validateMetrics(node0, true); + validateMetrics(node1, true); + } + + /** + * @param node Grid. + * @param finished Expected reencryption status. + */ + private void validateMetrics(IgniteEx node, boolean finished) { + MetricRegistry registry = + node.context().metric().registry(metricName(CacheGroupMetricsImpl.CACHE_GROUP_METRICS_PREFIX, cacheName())); + + LongMetric bytesLeft = registry.findMetric("ReencryptionBytesLeft"); + + if (finished) + assertEquals(0, bytesLeft.value()); + else + assertTrue(bytesLeft.value() > 0); + + BooleanMetric reencryptionFinished = registry.findMetric("ReencryptionFinished"); + + assertEquals(finished, reencryptionFinished.value()); + } + + /** + * @param cacheGroups Cache group names. + * @return {@code True} If reencryption of the specified groups is not yet complete. + */ + private boolean isReencryptionInProgress(Iterable cacheGroups) { + for (Ignite node : G.allGrids()) { + for (String groupName : cacheGroups) { + if (isReencryptionInProgress((IgniteEx)node, CU.cacheId(groupName))) + return true; + } + } + + return false; + } + + /** */ + private static final class FailingFileIOFactory implements FileIOFactory { + /** */ + private final FileIOFactory delegateFactory; + + /** */ + private final AtomicBoolean failFlag; + + /** + * @param factory Delegate factory. + */ + FailingFileIOFactory(FileIOFactory factory, AtomicBoolean failFlag) { + delegateFactory = factory; + + this.failFlag = failFlag; + } + + /** {@inheritDoc}*/ + @Override public FileIO create(File file, OpenOption... modes) throws IOException { + FileIO delegate = delegateFactory.create(file, modes); + + return new FailingFileIO(delegate); + } + + /** */ + final class FailingFileIO extends FileIODecorator { + /** + * @param delegate File I/O delegate + */ + public FailingFileIO(FileIO delegate) { + super(delegate); + } + + /** {@inheritDoc} */ + @Override public int writeFully(ByteBuffer srcBuf, long position) throws IOException { + if (failFlag.get()) + throw new IOException("Test exception."); + + return delegate.writeFully(srcBuf, position); + } + } + } + + /** */ + private static class IndexedObject { + /** Id. */ + @QuerySqlField(index = true) + private final long id; + + /** Name. */ + @QuerySqlField(index = true) + private final String name; + + /** + * @param id Id. + */ + public IndexedObject(long id, String name) { + this.id = id; + this.name = name; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + IndexedObject obj = (IndexedObject)o; + + return id == obj.id && Objects.equals(name, obj.name); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(name, id); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java index 92202025d45f32..613f376a65c146 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java @@ -22,6 +22,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; @@ -59,7 +60,7 @@ public void testCreateEncryptedCacheWithBigEntry() throws Exception { int grpId = CU.cacheGroupId(cacheName(), null); KeystoreEncryptionKey keyBeforeRestart = - (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId).key(); stopAllGrids(); @@ -67,7 +68,11 @@ public void testCreateEncryptedCacheWithBigEntry() throws Exception { checkEncryptedCaches(grids.get1(), grids.get2()); - KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + GroupKey grpKeyAfterRestart = grids.get1().context().encryption().groupKey(grpId); + + assertNotNull(grpKeyAfterRestart); + + KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grpKeyAfterRestart.key(); assertNotNull(keyAfterRestart); assertNotNull(keyAfterRestart.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java index aaf880abf7f912..7f5f3ae1954691 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java @@ -28,6 +28,7 @@ import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; @@ -89,8 +90,11 @@ public void testCreateEncryptedCache() throws Exception { assertNotNull(enc); - KeystoreEncryptionKey key = - (KeystoreEncryptionKey)grid.context().encryption().groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, null)); + GroupKey grpKey = grid.context().encryption().groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, null)); + + assertNotNull(grpKey); + + KeystoreEncryptionKey key = (KeystoreEncryptionKey)grpKey.key(); assertNotNull(key); assertNotNull(key.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java index 4fe9f591e2cc3c..1f5f0cf39723ac 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java @@ -19,13 +19,13 @@ import java.util.Collection; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; -import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; import org.junit.Test; -import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEY_PREFIX; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEYS_PREFIX; /** */ @@ -114,17 +114,17 @@ private void checkCacheDestroyed(IgniteEx grid, String encCacheName, String grpN int grpId = CU.cacheGroupId(encCacheName, grpName); - KeystoreEncryptionKey encKey = (KeystoreEncryptionKey)grid.context().encryption().groupKey(grpId); + GroupKey encKey = grid.context().encryption().groupKey(grpId); MetaStorage metaStore = grid.context().cache().context().database().metaStorage(); if (keyShouldBeEmpty) { assertNull(encKey); - assertNull(metaStore.readRaw(ENCRYPTION_KEY_PREFIX + grpId)); + assertNull(metaStore.readRaw(ENCRYPTION_KEYS_PREFIX + grpId)); } else { assertNotNull(encKey); - assertNotNull(metaStore.readRaw(ENCRYPTION_KEY_PREFIX + grpId)); + assertNotNull(metaStore.readRaw(ENCRYPTION_KEYS_PREFIX + grpId)); } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java index 12fd8be41b6c58..4caf3033be4a78 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java @@ -21,6 +21,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; @@ -71,7 +72,11 @@ public void testCreateEncryptedCacheGroup() throws Exception { GridEncryptionManager encMgr = encrypted2.context().kernalContext().encryption(); - KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)encMgr.groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, ENCRYPTED_GROUP)); + GroupKey grpKey2 = encMgr.groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, ENCRYPTED_GROUP)); + + assertNotNull(grpKey2); + + KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)grpKey2.key(); assertNotNull(key2); assertNotNull(key2.key()); @@ -108,8 +113,11 @@ private KeystoreEncryptionKey createEncryptedCache(String cacheName, String grpN assertNotNull(enc); - KeystoreEncryptionKey key = - (KeystoreEncryptionKey)grid.context().encryption().groupKey(CU.cacheGroupId(cacheName, grpName)); + GroupKey grpKey = grid.context().encryption().groupKey(CU.cacheGroupId(cacheName, grpName)); + + assertNotNull(grpKey); + + KeystoreEncryptionKey key = (KeystoreEncryptionKey)grpKey.key(); assertNotNull(key); assertNotNull(key.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java index cdf802bf17f997..072afffa7b9079 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java @@ -17,12 +17,16 @@ package org.apache.ignite.internal.encryption; +import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cluster.ClusterState; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi; +import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -42,6 +46,12 @@ public class EncryptedCacheNodeJoinTest extends AbstractEncryptionTest { /** */ private static final String GRID_5 = "grid-5"; + /** */ + private static final String GRID_6 = "grid-6"; + + /** */ + private static final String GRID_7 = "grid-7"; + /** */ public static final String CLIENT = "client"; @@ -76,7 +86,9 @@ public class EncryptedCacheNodeJoinTest extends AbstractEncryptionTest { grid.equals(GRID_2) || grid.equals(GRID_3) || grid.equals(GRID_4) || - grid.equals(GRID_5)) { + grid.equals(GRID_5) || + grid.equals(GRID_6) || + grid.equals(GRID_7)) { KeystoreEncryptionSpi encSpi = new KeystoreEncryptionSpi(); encSpi.setKeyStorePath(grid.equals(GRID_2) ? KEYSTORE_PATH_2 : KEYSTORE_PATH); @@ -98,7 +110,12 @@ protected CacheConfiguration cacheConfiguration(String gridName) { CacheConfiguration ccfg = defaultCacheConfiguration(); ccfg.setName(cacheName()); - ccfg.setEncryptionEnabled(gridName.equals(GRID_0)); + + if (gridName.startsWith(CLIENT) || + gridName.equals(GRID_0) || + gridName.equals(GRID_6) || + gridName.equals(GRID_7)) + ccfg.setEncryptionEnabled(true); return ccfg; } @@ -204,6 +221,103 @@ public void testClientNodeJoin() throws Exception { createEncryptedCache(client, grid0, cacheName(), null); } + /** */ + @Test + public void testClientNodeJoinActiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, true, true); + } + + /** */ + @Test + public void testClientNodeJoinActiveClusterWithExistingStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, true, false); + } + + /** */ + @Test + public void testClientNodeJoinInactiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, false, true); + } + + /** */ + @Test + public void testClientNodeJoinInactiveClusterWithExistingStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, false, false); + } + + /** */ + @Test + public void testServerNodeJoinActiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(false, true, true); + } + + /** */ + @Test + public void testServerNodeJoinInactiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(false, false, true); + } + + /** + * @param client {@code True} to test client node join, {@code False} to test server node join. + * @param activateBeforeJoin {@code True} to activate the server before joining the client node. + * @param newCfg {@code True} to configure cache on the last joined node. {@code False} to configure on all nodes. + */ + private void checkNodeJoinWithStaticCacheConfig( + boolean client, + boolean activateBeforeJoin, + boolean newCfg + ) throws Exception { + if (!newCfg) + configureCache = true; + + startGrid(GRID_0); + startGrid(GRID_6); + + IgniteEx client1 = startClientGrid("client1"); + + if (newCfg) + configureCache = true; + + if (activateBeforeJoin) + grid(GRID_0).cluster().state(ClusterState.ACTIVE); + + if (client && newCfg) { + String expErrMsg = "Joining node has encrypted caches which are not presented on the cluster, " + + "encrypted caches configured on client node cannot be started when such node joins " + + "the cluster, these caches can be started manually (dynamically) after node joined" + + "[caches=" + cacheName() + ']'; + + GridTestUtils.assertThrowsAnyCause(log, () -> startClientGrid(CLIENT), IgniteSpiException.class, expErrMsg); + + return; + } + + IgniteEx node = client ? startClientGrid(CLIENT) : startGrid(GRID_7); + + if (!activateBeforeJoin) + grid(GRID_0).cluster().state(ClusterState.ACTIVE); + + awaitPartitionMapExchange(); + + IgniteCache cache = node.cache(cacheName()); + + assertNotNull(cache); + + for (long i = 0; i < 100; i++) + cache.put(i, String.valueOf(i)); + + checkEncryptedCaches(grid(GRID_0), grid(GRID_6)); + checkEncryptedCaches(grid(GRID_0), client1); + checkData(client1); + + if (client) { + checkEncryptedCaches(grid(GRID_0), grid(CLIENT)); + checkData(grid(CLIENT)); + } + else + checkEncryptedCaches(grid(GRID_7), grid(GRID_0)); + } + /** */ @Test public void testNodeCantJoinWithSameNameButNotEncCache() throws Exception { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java index 9107ddf39947cd..dd2a50a5c2caea 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.encryption; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; @@ -48,7 +49,8 @@ public void testCreateEncryptedCache() throws Exception { int grpId = CU.cacheGroupId(cacheName(), null); - KeystoreEncryptionKey keyBeforeRestart = (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + KeystoreEncryptionKey keyBeforeRestart = + (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId).key(); stopAllGrids(); @@ -56,7 +58,11 @@ public void testCreateEncryptedCache() throws Exception { checkEncryptedCaches(grids.get1(), grids.get2()); - KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + GroupKey grpKeyAfterRestart = grids.get1().context().encryption().groupKey(grpId); + + assertNotNull(grpKeyAfterRestart); + + KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grpKeyAfterRestart.key(); assertNotNull(keyAfterRestart); assertNotNull(keyAfterRestart.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java index ab5bf521ae9872..5844b5e481c6fe 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java @@ -20,10 +20,12 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.managers.encryption.EncryptionMXBeanImpl; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.mxbean.EncryptionMXBean; import org.junit.Test; import static org.apache.ignite.cluster.ClusterState.ACTIVE_READ_ONLY; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.INITIAL_KEY_ID; import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -110,6 +112,28 @@ public void testMasterKeyChangeOnInactiveAndReadonlyCluster() throws Exception { assertEquals(MASTER_KEY_NAME_2, grid0.encryption().getMasterKeyName()); } + /** @throws Exception If failed. */ + @Test + public void testCacheGroupKeyChange() throws Exception { + IgniteEx ignite = startGrid(GRID_0); + + ignite.cluster().active(true); + + createEncryptedCache(ignite, null, cacheName(), null); + + EncryptionMXBean mBean = getMBean(GRID_0); + + int grpId = CU.cacheId(cacheName()); + + assertEquals(INITIAL_KEY_ID, ignite.context().encryption().groupKey(grpId).id()); + + mBean.changeCacheGroupKey(cacheName()); + + assertEquals(INITIAL_KEY_ID + 1, ignite.context().encryption().groupKey(grpId).id()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, getTestTimeout()); + } + /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { cleanPersistenceDir(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java index 81d489f6da5278..91dcd052b05d7b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java @@ -42,7 +42,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_MASTER_KEY_NAME_TO_CHANGE_BEFORE_STARTUP; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; -import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEY_PREFIX; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEYS_PREFIX; import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.MASTER_KEY_NAME_PREFIX; import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -59,6 +59,7 @@ public class MasterKeyChangeTest extends AbstractEncryptionTest { IgniteConfiguration cfg = super.getConfiguration(name); cfg.setCommunicationSpi(new TestRecordingCommunicationSpi()); + cfg.setConsistentId(name); return cfg; } @@ -315,14 +316,14 @@ public void testRecoveryFromWalWithCacheOperations() throws Exception { DynamicCacheDescriptor desc = grid0.context().cache().cacheDescriptor(cacheName()); - Serializable oldKey = metaStorage.read(ENCRYPTION_KEY_PREFIX + desc.groupId()); + Serializable oldKey = metaStorage.read(ENCRYPTION_KEYS_PREFIX + desc.groupId()); assertNotNull(oldKey); dbMgr.checkpointReadLock(); // 6. Simulate group key write error to MetaStore for one node to check recovery from WAL. - metaStorage.write(ENCRYPTION_KEY_PREFIX + desc.groupId(), new byte[0]); + metaStorage.write(ENCRYPTION_KEYS_PREFIX + desc.groupId(), new byte[0]); dbMgr.checkpointReadUnlock(); @@ -468,7 +469,7 @@ public void testMultiByteMasterKeyNameWalRecovery() throws Exception { } /** {@inheritDoc} */ - @Override protected void afterTest() throws Exception { + @Override protected void beforeTest() throws Exception { stopAllGrids(); cleanPersistenceDir(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/managers/deployment/P2PCacheOperationIntoComputeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/managers/deployment/P2PCacheOperationIntoComputeTest.java new file mode 100644 index 00000000000000..8062e1f60a4f01 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/managers/deployment/P2PCacheOperationIntoComputeTest.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.deployment; + +import java.lang.reflect.Constructor; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DeploymentMode; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** + * Using cache API in P2P tasks. + */ +public class P2PCacheOperationIntoComputeTest extends GridCommonAbstractTest { + /** Person class name. */ + private static final String PERSON_CLASS_NAME = "org.apache.ignite.tests.p2p.cache.Person"; + + /** Deployment task name. */ + private static final String AVERAGE_PERSON_SALARY_CLOSURE_NAME = "org.apache.ignite.tests.p2p.compute.AveragePersonSalaryCallable"; + + /** Transactional cache name. */ + private static final String DEFAULT_TX_CACHE_NAME = DEFAULT_CACHE_NAME + "_tx"; + + /** Deployment mode for node configuration. */ + public DeploymentMode deplymentMode; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName) + .setConsistentId(igniteInstanceName) + .setPeerClassLoadingEnabled(true) + .setDeploymentMode(deplymentMode) + .setCacheConfiguration(new CacheConfiguration(DEFAULT_CACHE_NAME), + new CacheConfiguration(DEFAULT_TX_CACHE_NAME) + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } + + /** + * Checks cache API in the deployed tasks with SHARED mode. + * + * @throws Exception If failed. + */ + @Test + public void testShared() throws Exception { + deplymentMode = DeploymentMode.SHARED; + + Ignite ignite0 = startGrids(2); + + awaitPartitionMapExchange(); + + Ignite client = startClientGrid(2); + + calculateAverageSalary(client, DEFAULT_CACHE_NAME); + calculateAverageSalary(client, DEFAULT_TX_CACHE_NAME); + } + + /** + * Checks cache API in the deployed tasks with CONTINUOUS mode. + * + * @throws Exception If failed. + */ + @Test + public void testContinuous() throws Exception { + deplymentMode = DeploymentMode.CONTINUOUS; + + Ignite ignite0 = startGrids(2); + + awaitPartitionMapExchange(); + + Ignite client = startClientGrid(2); + + calculateAverageSalary(client, DEFAULT_CACHE_NAME); + calculateAverageSalary(client, DEFAULT_TX_CACHE_NAME); + } + + /** + * Launches a closure which is initiated in a client node, but is executed in server. The closure are manipulating + * with a data through user's classes. + * + * @param client Client node. + * @param cacheName Cache name. + * @throws Exception If failed. + */ + private void calculateAverageSalary( + Ignite client, + String cacheName + ) throws Exception { + Constructor personCtor = getExternalClassLoader().loadClass(PERSON_CLASS_NAME).getConstructor(String.class); + + IgniteCallable avgSalaryClosure = (IgniteCallable)getExternalClassLoader().loadClass(AVERAGE_PERSON_SALARY_CLOSURE_NAME) + .getConstructor(String.class, int.class, int.class).newInstance(cacheName, 0, 10); + + IgniteCache cache = client.cache(cacheName); + + for (int i = 0; i < 10; i++) + cache.put(i, createPerson(personCtor, i)); + + Double avg = client.compute().call(avgSalaryClosure); + + info("Average salary is " + avg); + } + + /** + * Creates a new person instance. + * + * @param personConst Constructor. + * @param id Person id. + * @return A person instance. + * @throws Exception If failed. + */ + private Object createPerson(Constructor personConst, int id) throws Exception { + Object person = personConst.newInstance("Person" + id); + GridTestUtils.setFieldValue(person, "id", id); + GridTestUtils.setFieldValue(person, "lastName", "Last name " + id); + GridTestUtils.setFieldValue(person, "salary", id * Math.PI); + return person; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java index 9f8a32408c9cf3..38a743a4b8a286 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java @@ -123,7 +123,7 @@ public void testCacheBasic() throws Exception { long cacheLogicalReadsCnt = mreg.findMetric(LOGICAL_READS).value(); - assertEquals(cnt, cacheLogicalReadsCnt); + assertEquals(cnt - 1, cacheLogicalReadsCnt); // 1 is for reuse bucket stripe. long cachePhysicalReadsCnt = mreg.findMetric(PHYSICAL_READS).value(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java index 45ac6a2b5f3cef..4ac203dceaa14b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java @@ -22,7 +22,9 @@ import java.util.List; import java.util.Set; import java.util.Spliterators; +import java.util.concurrent.CountDownLatch; import java.util.stream.StreamSupport; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.metric.impl.AtomicLongMetric; @@ -34,14 +36,20 @@ import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.metric.BooleanMetric; import org.apache.ignite.spi.metric.DoubleMetric; import org.apache.ignite.spi.metric.IntMetric; import org.apache.ignite.spi.metric.LongMetric; import org.apache.ignite.spi.metric.Metric; +import org.apache.ignite.spi.metric.MetricExporterSpi; import org.apache.ignite.spi.metric.ObjectMetric; +import org.apache.ignite.spi.metric.ReadOnlyMetricManager; +import org.apache.ignite.spi.metric.noop.NoopMetricExporterSpi; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.GridTestKernalContext; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.Nullable; import org.junit.Before; import org.junit.Test; @@ -361,6 +369,87 @@ public void testFromFullName() { assertEquals(new T2<>("org", "apache"), fromFullName("org.apache")); } + /** */ + @Test + public void testAddBeforeRemoveCompletes() throws Exception { + MetricExporterSpi checkSpi = new NoopMetricExporterSpi() { + private ReadOnlyMetricManager registry; + + private Set names = new HashSet<>(); + + @Override public void spiStart(@Nullable String igniteInstanceName) throws IgniteSpiException { + registry.addMetricRegistryCreationListener(mreg -> { + assertFalse(mreg.name() + " should be unique", names.contains(mreg.name())); + + names.add(mreg.name()); + }); + + registry.addMetricRegistryRemoveListener(mreg -> names.remove(mreg.name())); + } + + @Override public void setMetricRegistry(ReadOnlyMetricManager registry) { + this.registry = registry; + } + }; + + CountDownLatch rmvStarted = new CountDownLatch(1); + CountDownLatch rmvCompleted = new CountDownLatch(1); + + MetricExporterSpi blockingSpi = new NoopMetricExporterSpi() { + private ReadOnlyMetricManager registry; + + @Override public void spiStart(@Nullable String igniteInstanceName) throws IgniteSpiException { + registry.addMetricRegistryRemoveListener(mreg -> { + rmvStarted.countDown(); + try { + rmvCompleted.await(); + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + } + + @Override public void setMetricRegistry(ReadOnlyMetricManager registry) { + this.registry = registry; + } + }; + + IgniteConfiguration cfg = new IgniteConfiguration().setMetricExporterSpi(blockingSpi, checkSpi); + + GridTestKernalContext ctx = new GridTestKernalContext(log(), cfg); + + ctx.start(); + + // Add metric registry. + ctx.metric().registry("test"); + + // Removes it async, blockingSpi will block remove procedure. + IgniteInternalFuture rmvFut = runAsync(() -> ctx.metric().remove("test")); + + rmvStarted.await(); + + CountDownLatch addStarted = new CountDownLatch(1); + + IgniteInternalFuture addFut = runAsync(() -> { + addStarted.countDown(); + + ctx.metric().registry("test"); + }); + + // Waiting for creation to start. + addStarted.await(); + + Thread.sleep(100); + + // Complete removal. + rmvCompleted.countDown(); + + rmvFut.get(getTestTimeout()); + + addFut.get(getTestTimeout()); + } + /** */ private void run(Runnable r, int cnt) throws org.apache.ignite.IgniteCheckedException { List futs = new ArrayList<>(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java index 825c8678aa825d..087b20cd0d9576 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java @@ -18,7 +18,7 @@ package org.apache.ignite.internal.pagemem.impl; import java.util.Random; - +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -99,13 +99,15 @@ public void testOffsetExtraction() throws Exception { @Test public void testPageIdFromLink() throws Exception { assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x00FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x10FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x01FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x11FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x80FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x88FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x08FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0xFFFFFFFFFFFFFFFFL)); + + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x0001FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x1001FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x0101FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x1101FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x8001FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x8801FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x0801FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0xFF01FFFFFFFFFFFFL)); assertEquals(0x0002FFFFFFFFFFFFL, PageIdUtils.pageId(0x0002FFFFFFFFFFFFL)); assertEquals(0x1002FFFFFFFFFFFFL, PageIdUtils.pageId(0x1002FFFFFFFFFFFFL)); @@ -116,12 +118,21 @@ public void testPageIdFromLink() throws Exception { assertEquals(0x0802FFFFFFFFFFFFL, PageIdUtils.pageId(0x0802FFFFFFFFFFFFL)); assertEquals(0xFF02FFFFFFFFFFFFL, PageIdUtils.pageId(0xFF02FFFFFFFFFFFFL)); - assertEquals(0L, PageIdUtils.pageId(0x0000000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x1000000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x0100000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x8000000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x0800000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0xFF00000000000000L)); + assertEquals(0x0004FFFFFFFFFFFFL, PageIdUtils.pageId(0x0004FFFFFFFFFFFFL)); + assertEquals(0x1004FFFFFFFFFFFFL, PageIdUtils.pageId(0x1004FFFFFFFFFFFFL)); + assertEquals(0x0104FFFFFFFFFFFFL, PageIdUtils.pageId(0x0104FFFFFFFFFFFFL)); + assertEquals(0x1104FFFFFFFFFFFFL, PageIdUtils.pageId(0x1104FFFFFFFFFFFFL)); + assertEquals(0x8004FFFFFFFFFFFFL, PageIdUtils.pageId(0x8004FFFFFFFFFFFFL)); + assertEquals(0x8804FFFFFFFFFFFFL, PageIdUtils.pageId(0x8804FFFFFFFFFFFFL)); + assertEquals(0x0804FFFFFFFFFFFFL, PageIdUtils.pageId(0x0804FFFFFFFFFFFFL)); + assertEquals(0xFF04FFFFFFFFFFFFL, PageIdUtils.pageId(0xFF04FFFFFFFFFFFFL)); + + assertEquals(0x0000000000000000L, PageIdUtils.pageId(0x0000000000000000L)); + assertEquals(0x1000000000000000L, PageIdUtils.pageId(0x1000000000000000L)); + assertEquals(0x0100000000000000L, PageIdUtils.pageId(0x0100000000000000L)); + assertEquals(0x8000000000000000L, PageIdUtils.pageId(0x8000000000000000L)); + assertEquals(0x0800000000000000L, PageIdUtils.pageId(0x0800000000000000L)); + assertEquals(0xFF00000000000000L, PageIdUtils.pageId(0xFF00000000000000L)); } /** @@ -136,7 +147,7 @@ public void testRandomIds() throws Exception { int partId = rnd.nextInt(PageIdUtils.MAX_PART_ID + 1); int pageNum = rnd.nextInt(); - long pageId = PageIdUtils.pageId(partId, (byte) 0, pageNum); + long pageId = PageIdUtils.pageId(partId, PageIdAllocator.FLAG_DATA, pageNum); String msg = "For values [offset=" + U.hexLong(off) + ", fileId=" + U.hexLong(partId) + ", pageNum=" + U.hexLong(pageNum) + ']'; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java index b57c2080842035..2a084f591fe916 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java @@ -63,6 +63,10 @@ public class GridCacheConfigurationValidationSelfTest extends GridCommonAbstract private static final String RESERVED_FOR_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME = "reservedForDsCacheGroupNameCheckFails"; + /** */ + private static final String RESERVED_FOR_VOLATILE_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME = + "reservedForVolatileDsCacheGroupNameCheckFails"; + /** */ private static final String CACHE_NAME_WITH_SPECIAL_CHARACTERS_REPLICATED = "--№=+:(replicated)"; @@ -135,6 +139,9 @@ else if (igniteInstanceName.contains(DUP_DFLT_CACHES_IGNITE_INSTANCE_NAME)) if (igniteInstanceName.contains(RESERVED_FOR_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME)) namedCacheCfg.setGroupName("default-ds-group"); + if (igniteInstanceName.contains(RESERVED_FOR_VOLATILE_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME)) + namedCacheCfg.setGroupName("default-volatile-ds-group@volatileDsMemPlc"); + return cfg; } @@ -178,6 +185,9 @@ public void testCacheAttributesValidation() throws Exception { // This grid should not start. startInvalidGrid(RESERVED_FOR_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME); + // This grid should not start. + startInvalidGrid(RESERVED_FOR_VOLATILE_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME); + // This grid will start normally. startGrid(1); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java new file mode 100644 index 00000000000000..be95c9e63846de --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicInteger; +import javax.cache.configuration.Factory; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.store.CacheStore; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; + +/** Test concurrent putAll/removeAll operations with unordered set of keys on atomic caches. */ +@RunWith(Parameterized.class) +public class IgniteCacheAtomicConcurrentUnorderedUpdateAllTest extends GridCommonAbstractTest { + /** */ + private static final int NODES_CNT = 3; + + /** */ + private static final int THREADS_CNT = 20; + + /** */ + private static final String CACHE_NAME = "test-cache"; + + /** */ + private static final int CACHE_SIZE = 1_000; + + /** Parameters. */ + @Parameterized.Parameters(name = "cacheMode={0}, writeThrough={1}, near={2}") + public static Iterable data() { + return Arrays.asList( + new Object[] {CacheMode.PARTITIONED, Boolean.FALSE, Boolean.FALSE}, + new Object[] {CacheMode.PARTITIONED, Boolean.TRUE, Boolean.FALSE}, + new Object[] {CacheMode.PARTITIONED, Boolean.FALSE, Boolean.TRUE}, + new Object[] {CacheMode.REPLICATED, Boolean.FALSE, Boolean.FALSE}, + new Object[] {CacheMode.REPLICATED, Boolean.TRUE, Boolean.FALSE}, + new Object[] {CacheMode.LOCAL, Boolean.FALSE, Boolean.FALSE}, + new Object[] {CacheMode.LOCAL, Boolean.TRUE, Boolean.FALSE} + ); + } + + /** Cache mode. */ + @Parameterized.Parameter() + public CacheMode cacheMode; + + /** Write through. */ + @Parameterized.Parameter(1) + public Boolean writeThrough; + + /** Near cache. */ + @Parameterized.Parameter(2) + public Boolean near; + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testConcurrentUpdateAll() throws Exception { + Ignite ignite = startGridsMultiThreaded(NODES_CNT); + + Factory> cacheStoreFactory = writeThrough ? + new MapCacheStoreStrategy.MapStoreFactory() : null; + + IgniteCache cache = ignite.createCache(new CacheConfiguration<>(CACHE_NAME) + .setWriteThrough(writeThrough).setCacheStoreFactory(cacheStoreFactory) + .setNearConfiguration(near ? new NearCacheConfiguration<>() : null) + .setCacheMode(cacheMode).setAtomicityMode(ATOMIC).setBackups(1)); + + CyclicBarrier barrier = new CyclicBarrier(THREADS_CNT); + + AtomicInteger threadCnt = new AtomicInteger(); + + GridTestUtils.runMultiThreaded(() -> { + int threadIdx = threadCnt.incrementAndGet(); + + IgniteCache cache0 = grid(ThreadLocalRandom.current().nextInt(NODES_CNT)).cache(CACHE_NAME); + + Map map = new LinkedHashMap<>(); + + if (threadIdx % 2 == 0) { + for (int i = 0; i < CACHE_SIZE; i++) + map.put(i, i); + } else { + for (int i = CACHE_SIZE - 1; i >= 0; i--) + map.put(i, i); + } + + for (int i = 0; i < 20; i++) { + try { + barrier.await(); + } catch (Exception e) { + fail(e.getMessage()); + } + + cache0.putAll(map); + + cache0.invokeAll(map.keySet(), (k, v) -> v); + + cache0.removeAll(map.keySet()); + + log.info("Thread " + threadIdx + " iteration " + i + " finished"); + } + }, THREADS_CNT, "update-all-runner"); + + assertEquals(0, cache.size()); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java index cd28022f788d4f..db64131315861c 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java @@ -99,6 +99,7 @@ import org.apache.ignite.testframework.GridTestUtils.SF; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.apache.ignite.transactions.Transaction; +import org.apache.ignite.transactions.TransactionRollbackException; import org.jetbrains.annotations.Nullable; import org.junit.Ignore; import org.junit.Test; @@ -4106,10 +4107,14 @@ public void testRestartsAndCacheCreateDestroy() throws Exception { cacheOperation(rnd, cache); } catch (Exception e) { - if (X.hasCause(e, CacheStoppedException.class)) { + if (X.hasCause(e, CacheStoppedException.class) || + (X.hasCause(e, CacheInvalidStateException.class) && + X.hasCause(e, TransactionRollbackException.class)) + ) { // Cache operation can be blocked on // awaiting new topology version and cancelled with CacheStoppedException cause. - + // Cache operation can failed + // if a node was stopped during transaction. continue; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java index 41b0c112643556..26264596440278 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java @@ -28,6 +28,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import javax.cache.CacheException; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheAtomicityMode; @@ -44,12 +45,14 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.TransactionSerializationException; import org.junit.Ignore; import org.junit.Test; @@ -128,8 +131,6 @@ private void clientStartCoordinatorFails(CacheAtomicityMode atomicityMode) throw } }, "start-cache"); - U.sleep(1000); - assertFalse(fut.isDone()); stopGrid(0); @@ -198,8 +199,6 @@ private void clientStartLastServerFails(CacheAtomicityMode atomicityMode) throws } }, "start-cache"); - U.sleep(1000); - assertFalse(fut.isDone()); stopGrid(1); @@ -365,9 +364,29 @@ public void testRebalanceStateConcurrentStart() throws Exception { Map map0 = cache.getAll(keys); - assertEquals(KEYS, map0.size()); + assertEquals("[cache=" + cacheName + + ", expected=" + KEYS + + ", actual=" + map0.size() + ']', KEYS, map0.size()); + + int key = rnd.nextInt(KEYS); + + try { + cache.put(key, i); + } + catch (CacheException e) { + log.error("It couldn't put a value [cache=" + cacheName + + ", key=" + key + + ", val=" + i + ']', e); + + CacheConfiguration ccfg = cache.getConfiguration(CacheConfiguration.class); + + TransactionSerializationException txEx = X.cause(e, TransactionSerializationException.class); - cache.put(rnd.nextInt(KEYS), i); + if (txEx == null || + ccfg.getAtomicityMode() != TRANSACTIONAL_SNAPSHOT || + !txEx.getMessage().contains("Cannot serialize transaction due to write conflict (transaction is marked for rollback)")) + fail("Assert violated because exception was thrown [e=" + e.getMessage() + ']'); + } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java index e0507ce6cba0cb..72d9f9ca6a1b4d 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java @@ -104,6 +104,11 @@ private TestCacheObject(final byte type) { /** {@inheritDoc} */ @Nullable @Override public T value(final CacheObjectValueContext ctx, final boolean cpy) { + return value(ctx, cpy, null); + } + + /** {@inheritDoc} */ + @Nullable @Override public T value(final CacheObjectValueContext ctx, final boolean cpy, ClassLoader ldr) { return null; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/TransactionValidationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/TransactionValidationTest.java new file mode 100644 index 00000000000000..5813aed1652ac8 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/TransactionValidationTest.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import javax.cache.CacheException; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTransactionalCache; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.Transaction; +import org.junit.Test; + +import static org.apache.ignite.testframework.MvccFeatureChecker.Feature.NEAR_CACHE; +import static org.apache.ignite.testframework.MvccFeatureChecker.skipIfNotSupported; + +/** + * Tests check that second operation in transaction fail if it doesn't pass validation. + */ +public class TransactionValidationTest extends GridCommonAbstractTest { + /** + * @throws Exception If failed. + */ + @Test + public void validationOnRemoteNode() throws Exception { + validationTest(true, false); + } + + /** + * @throws Exception If failed. + */ + @Test + public void validationOnLocalNode() throws Exception { + validationTest(false, false); + } + + /** + * @throws Exception If failed. + */ + @Test + public void validationOnNearCache() throws Exception { + skipIfNotSupported(NEAR_CACHE); + + validationTest(true, true); + } + + /** + * @throws Exception If failed. + */ + public void validationTest(boolean distributed, boolean nearCache) throws Exception { + IgniteEx txCrd; + + if (distributed && nearCache) + txCrd = startGrids(2); + else if (distributed && !nearCache) { + startGrids(2); + + txCrd = startClientGrid(2); + } + else + txCrd = startGrid(0); + + CacheConfiguration cfgCache0 = new CacheConfiguration<>("cache0") + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); + + CacheConfiguration cfgCache1 = new CacheConfiguration<>("cache1") + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) + .setTopologyValidator(nodes -> false); + + if (nearCache) { + cfgCache0.setNearConfiguration(new NearCacheConfiguration<>()); + + cfgCache1.setNearConfiguration(new NearCacheConfiguration<>()); + } + + IgniteCache cache0 = txCrd.createCache(cfgCache0); + + IgniteCache cache1 = txCrd.createCache(cfgCache1); + + try (Transaction tx = txCrd.transactions().txStart()) { + cache0.put(1, 1); + + boolean isNearCache = ((GatewayProtectedCacheProxy) cache1).context().cache() instanceof GridNearTransactionalCache; + + if (nearCache) + assertTrue("Must be near cache", isNearCache); + else + assertTrue("Must not be near cache", !isNearCache); + + try { + cache1.put(1, 1); + + fail("Validation broken"); + } + catch (CacheException e) { + assertTrue(X.getFullStackTrace(e), + X.hasCause(e, "cache topology is not valid", CacheInvalidStateException.class)); + } + } + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java index 9eb3b7cea89218..10e1db155abb58 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java @@ -24,7 +24,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/OutOfMemoryVolatileRegionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/OutOfMemoryVolatileRegionTest.java new file mode 100644 index 00000000000000..58e6290d4fb0c1 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/OutOfMemoryVolatileRegionTest.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.datastructures; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.AbstractFailureHandler; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; + +/** + * Tests behavior of volatile data structures and regular caches + * when {@link IgniteOutOfMemoryException} is thrown. + */ +public class OutOfMemoryVolatileRegionTest extends GridCommonAbstractTest { + /** Minimal region size. */ + private static final long DATA_REGION_SIZE = 15L * 1024 * 1024; + + /** */ + private static final int ATTEMPTS_NUM = 3; + + /** Failure handler triggered. */ + private static volatile boolean failure; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + cfg.setDataStorageConfiguration(new DataStorageConfiguration() + .setPageSize(4096) + .setSystemRegionInitialSize(DATA_REGION_SIZE) + .setSystemRegionMaxSize(DATA_REGION_SIZE) + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setPersistenceEnabled(true) + .setMetricsEnabled(true))); + + cfg.setFailureHandler(new AbstractFailureHandler() { + /** {@inheritDoc} */ + @Override protected boolean handle(Ignite ignite, FailureContext failureCtx) { + failure = true; + + // Do not invalidate a node context. + return false; + } + }); + + cfg.setCacheConfiguration(cacheConfiguration(ATOMIC), cacheConfiguration(TRANSACTIONAL)); + + return cfg; + } + + /** + * Creates a new cache configuration with the given cache atomicity mode. + * + * @param mode Cache atomicity mode. + * @return Cache configuration. + */ + private CacheConfiguration cacheConfiguration(CacheAtomicityMode mode) { + return new CacheConfiguration(mode.name()) + .setAtomicityMode(mode) + .setAffinity(new RendezvousAffinityFunction(false, 32)); + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + cleanPersistenceDir(); + + startGrid(0); + startGrid(1); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testLoadAndClearAtomicCache() throws Exception { + loadAndClearCache(ATOMIC, ATTEMPTS_NUM); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testLoadAndClearTransactionalCache() throws Exception { + loadAndClearCache(TRANSACTIONAL, ATTEMPTS_NUM); + } + + /** + * Creates a new cache with the given atomicity node and tries to load & clear it in a loop. + * It is assumed that {@link IgniteOutOfMemoryException} is thrown during loading the cache, + * however {@link IgniteCache#clear()} should return the cache to the operable state. + * + * @param mode Cache atomicity mode. + * @param attempts Number of attempts to load and clear the cache. + */ + private void loadAndClearCache(CacheAtomicityMode mode, int attempts) { + grid(0).cluster().active(true); + + failure = false; + + IgniteCache cache = grid(0).cache(mode.name()); + + for (int i = 0; i < attempts; ++i) { + for (int key = 0; key < 5_000; ++key) + cache.put(key, new byte[40]); + + cache.clear(); + } + + assertFalse("Failure handler should not be notified", failure); + + try { + for (int j = 0; j < 100000; j++) { + grid(0).reentrantLock("l" + getClass().getName() + j, + j % 2 == 0, j % 3 == 0, true); + grid(1).semaphore("s" + getClass().getName() + j, + 1 + (j % 7), j % 3 == 0, true); + grid(0).countDownLatch("c" + getClass().getName() + j, + 1 + (j % 13), j % 2 == 0, true); + } + + fail("OutOfMemoryException hasn't been thrown"); + } + catch (Exception e) { + if (!X.hasCause(e, IgniteOutOfMemoryException.class)) + fail("Unexpected exception" + e); + + log.info("Expected exception, n: " + e); + } + + assertTrue("Failure handler wasn't notified", failure); + + for (int i = 0; i < attempts; ++i) { + for (int key = 0; key < 5_000; ++key) + cache.put(key, new byte[40]); + + cache.clear(); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDetectLostPartitionsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDetectLostPartitionsTest.java new file mode 100644 index 00000000000000..d7143d00dad55c --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDetectLostPartitionsTest.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.distributed; + +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteException; +import org.apache.ignite.cache.PartitionLossPolicy; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** */ +public class CacheDetectLostPartitionsTest extends GridCommonAbstractTest { + /** */ + private static final String TEST_CACHE_NAME = "testcache"; + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } + + /** + * Test detect lost partitions on a client node when the cache init after partitions was lost. + * @throws Exception + */ + @Test + public void testDetectLostPartitionsOnClient() throws Exception { + IgniteEx ig = startGrids(2); + + awaitPartitionMapExchange(); + + IgniteCache cache1 = ig.createCache(getCacheConfig(TEST_CACHE_NAME + 1)); + + IgniteCache cache2 = ig.createCache(getCacheConfig(TEST_CACHE_NAME + 2)); + + for (int i = 0; i < 1000; i++) { + cache1.put(i, i); + + cache2.put(i, i); + } + + IgniteEx client = startClientGrid(2); + + stopGrid(1); + + cache1 = client.cache(TEST_CACHE_NAME + 1); + checkCache(cache1); + + cache2 = client.cache(TEST_CACHE_NAME + 2); + checkCache(cache2); + + cache1.close(); + cache2.close(); + + checkCache(client.cache(TEST_CACHE_NAME + 1)); + checkCache(client.cache(TEST_CACHE_NAME + 2)); + } + + /** + * Test detect lost partitions on a client node when the cache was closed before partitions was lost. + * @throws Exception + */ + @Test + public void testDetectLostPartitionsOnClientWithClosedCache() throws Exception { + IgniteEx ig = startGrids(2); + + awaitPartitionMapExchange(); + + IgniteCache cacheSrv = ig.createCache(getCacheConfig(TEST_CACHE_NAME)); + + for (int i = 0; i < 1000; i++) + cacheSrv.put(i, i); + + IgniteEx client = startClientGrid(2); + + IgniteCache cacheCl = client.cache(TEST_CACHE_NAME); + + cacheCl.close(); + + stopGrid(1); + + cacheCl = client.cache(TEST_CACHE_NAME); + + checkCache(cacheCl); + } + + /** + * Test detect lost partitions on a server node which doesn't have partitions when the cache was closed + * before partitions was lost. + * @throws Exception + */ + @Test + public void testDetectLostPartitionsOnServerWithClosedCache() throws Exception { + startGrids(3); + + awaitPartitionMapExchange(); + + IgniteCache cacheSrv1 = grid(1).createCache( + getCacheConfig(TEST_CACHE_NAME) + .setNodeFilter(new NodeConsistentIdFilter(grid(2).localNode().consistentId())) + ); + + for (int i = 0; i < 1000; i++) + cacheSrv1.put(i, i); + + IgniteEx ig2 = grid(2); + + IgniteCache cacheSrv2 = ig2.cache(TEST_CACHE_NAME); + + cacheSrv2.close(); + + stopGrid(1); + + cacheSrv2 = ig2.cache(TEST_CACHE_NAME); + + checkCache(cacheSrv2); + } + + /** */ + private CacheConfiguration getCacheConfig(String cacheName) { + return new CacheConfiguration<>(cacheName) + .setPartitionLossPolicy(PartitionLossPolicy.READ_WRITE_SAFE); + } + + /** */ + private void checkCache(IgniteCache cache) { + assertFalse(cache.lostPartitions().isEmpty()); + + GridTestUtils.assertThrows(null, () -> { + for (int i = 0; i < 1000; i++) + cache.get(i); + }, + IgniteException.class, "partition data has been lost"); + + GridTestUtils.assertThrows(null, () -> { + for (int i = 0; i < 1000; i++) + cache.put(i, i); + }, + IgniteException.class, "partition data has been lost"); + } + + /** Filter by consistent id. */ + private static class NodeConsistentIdFilter implements IgnitePredicate { + /** */ + private final Object consistentId; + + /** + * @param consistentId Consistent id where cache should be started. + */ + NodeConsistentIdFilter(Object consistentId) { + this.consistentId = consistentId; + } + + /** {@inheritDoc} */ + @Override public boolean apply(ClusterNode node) { + return !node.consistentId().equals(consistentId); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java index 05774ce100f1ba..ef94050792764e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java @@ -61,6 +61,7 @@ import static org.apache.ignite.internal.IgniteFeatures.PME_FREE_SWITCH; import static org.apache.ignite.internal.IgniteFeatures.allNodesSupports; import static org.apache.ignite.internal.IgniteFeatures.nodeSupports; +import static org.apache.ignite.testframework.GridTestUtils.runAsync; /** * @@ -369,8 +370,11 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l Ignite candidate; MvccProcessor proc; + int nodeToStop; + do { - candidate = G.allGrids().get(r.nextInt(nodes)); + nodeToStop = r.nextInt(nodes); + candidate = grid(nodeToStop); proc = ((IgniteEx)candidate).context().coordinators(); } @@ -383,11 +387,33 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l AtomicInteger key_from = new AtomicInteger(); - CountDownLatch readyLatch = new CountDownLatch((backups > 0 ? 4 : 2) * multiplicator); + CountDownLatch readyLatch = new CountDownLatch((backups > 0 ? 6 : 3) * multiplicator); CountDownLatch failedLatch = new CountDownLatch(1); IgniteCache failedCache = failed.getOrCreateCache(cacheName); + int nodeToStop0 = nodeToStop; + + IgniteInternalFuture checkRebalanced = runAsync(() -> { + try { + failedLatch.await(); + } + catch (Exception e) { + fail("Should not happen [exception=" + e + "]"); + } + for (int i = 0; i < nodes; i++) { + if (i != nodeToStop0) { + GridDhtPartitionsExchangeFuture lastFinishedFut = + grid(i).cachex(cacheName).context().shared().exchange().lastFinishedFuture(); + + assertTrue(lastFinishedFut.rebalanced()); + + assertTrue(lastFinishedFut.topologyVersion() + .equals(new AffinityTopologyVersion(nodes + 1, 0))); + } + } + }); + IgniteInternalFuture nearThenNearFut = multithreadedAsync(() -> { try { List keys = nearKeys(failedCache, 2, key_from.addAndGet(100)); @@ -406,6 +432,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); primaryCache.put(key1, key1); @@ -438,6 +465,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); try { backupCache.put(key1, key1); @@ -470,6 +498,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); try { primaryCache.put(key1, key1); @@ -502,6 +531,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); primaryCache.put(key1, key1); @@ -516,6 +546,75 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l } }, multiplicator) : new GridFinishedFuture<>(); + IgniteInternalFuture primaryThenNearFut = multithreadedAsync(() -> { + try { + Integer key0 = primaryKeys(failedCache, 1, key_from.addAndGet(100)).get(0); + Integer key1 = nearKeys(failedCache, 1, key_from.addAndGet(100)).get(0); + + Ignite primary = primaryNode(key1, cacheName); + + assertNotSame(failed, primary); + + IgniteCache primaryCache = primary.getOrCreateCache(cacheName); + + try (Transaction tx = primary.transactions().txStart()) { + primaryCache.put(key0, key0); + + readyLatch.countDown(); + failedLatch.await(); + checkRebalanced.get(); + + primaryCache.put(key1, key1); + + try { + tx.commit(); + + fail("Should not happen"); + } + catch (Exception ignored) { + // Transaction broken because of primary left. + } + } + } + catch (Exception e) { + fail("Should not happen [exception=" + e + "]"); + } + }, multiplicator); + + IgniteInternalFuture primaryThenPrimaryWithSameKeyFut = backups > 0 ? multithreadedAsync(() -> { + try { + List keys = primaryKeys(failedCache, 2, key_from.addAndGet(100)); + + Integer key0 = keys.get(0); + + Ignite backup = backupNode(key0, cacheName); + + assertNotSame(failed, backup); + + IgniteCache backupCache = backup.getOrCreateCache(cacheName); + + try (Transaction tx = backup.transactions().txStart()) { + backupCache.put(key0, key0); + + readyLatch.countDown(); + failedLatch.await(); + checkRebalanced.get(); + + try { + backupCache.put(key0, key0 + 1); + + fail("Should not happen"); + } + catch (Exception ignored) { + // Transaction broken because of primary left. + } + } + } + catch (Exception e) { + fail("Should not happen [exception=" + e + "]"); + } + }, multiplicator) : new GridFinishedFuture<>(); + readyLatch.await(); failed.close(); // Stopping node. @@ -528,6 +627,8 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l primaryThenPrimaryFut.get(); nearThenPrimaryFut.get(); nearThenBackupFut.get(); + primaryThenNearFut.get(); + primaryThenPrimaryWithSameKeyFut.get(); int pmeFreeCnt = 0; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxExceptionNodeFailTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxExceptionNodeFailTest.java new file mode 100644 index 00000000000000..f16c6eab561e2e --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxExceptionNodeFailTest.java @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.distributed.near; + +import java.util.Arrays; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.ignite.ShutdownPolicy; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheWriteSynchronizationMode; +import org.apache.ignite.cache.affinity.Affinity; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgnitionEx; +import org.apache.ignite.internal.TestRecordingCommunicationSpi; +import org.apache.ignite.internal.processors.cache.CacheInvalidStateException; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.Transaction; +import org.apache.ignite.transactions.TransactionHeuristicException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.springframework.util.Assert; + +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.PRIMARY_SYNC; +import static org.apache.ignite.internal.TestRecordingCommunicationSpi.spi; +import static org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishFuture.ALL_PARTITION_OWNERS_LEFT_GRID_MSG; +import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccEnabled; + +/** + * Tests check a result of commit when a node fail before + * send {@link GridNearTxFinishResponse} to transaction coodinator + */ +@RunWith(Parameterized.class) +public class IgniteTxExceptionNodeFailTest extends GridCommonAbstractTest { + /** Parameters. */ + @Parameterized.Parameters(name = "syncMode={0}") + public static Iterable data() { + return Arrays.asList(new Object[][] { + { PRIMARY_SYNC }, + { FULL_SYNC }, + }); + } + + /** syncMode */ + @Parameterized.Parameter() + public CacheWriteSynchronizationMode syncMode; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + DataStorageConfiguration dsConfig = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration().setMaxSize(100L * 1024 * 1024) + .setPersistenceEnabled(true)); + + cfg.setCommunicationSpi(new TestRecordingCommunicationSpi()); + + return cfg + .setDataStorageConfiguration(dsConfig) + .setCacheConfiguration(new CacheConfiguration("cache") + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) + .setWriteSynchronizationMode(syncMode).setBackups(0)); + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(); + } + + /** + *

      + *
    • Start 2 nodes with transactional cache, without backups, with {@link IgniteTxExceptionNodeFailTest#syncMode} + *
    • Start transaction: + *
        + *
      • put a key to a partition on transaction coordinator + *
      • put a key to a partition on other node + *
      • try to commit the transaction + *
      + *
    • Stop other node when it try to send GridNearTxFinishResponse + *
    • Check that {@link Transaction#commit()} throw {@link TransactionHeuristicException} + *
    + * + * @throws Exception If failed + */ + @Test + public void testNodeFailBeforeSendGridNearTxFinishResponse() throws Exception { + startGrids(2); + + grid(0).cluster().active(true); + + IgniteEx grid0 = grid(0); + IgniteEx grid1 = grid(1); + + int key0 = 0; + int key1 = 0; + + Affinity aff = grid1.affinity("cache"); + + for (int i = 1; i < 1000; i++) { + if (grid0.equals(grid(aff.mapKeyToNode(i)))) { + key0 = i; + + break; + } + } + + for (int i = key0; i < 1000; i++) { + if (grid1.equals(grid(aff.mapKeyToNode(i))) && !aff.mapKeyToNode(key0).equals(aff.mapKeyToNode(i))) { + key1 = i; + + break; + } + } + + assert !aff.mapKeyToNode(key0).equals(aff.mapKeyToNode(key1)); + + try (Transaction tx = grid1.transactions().txStart()) { + grid1.cache("cache").put(key0, 100); + grid1.cache("cache").put(key1, 200); + + spi(grid0).blockMessages((node, msg) -> { + if (msg instanceof GridNearTxFinishResponse) { + new Thread( + new Runnable() { + @Override public void run() { + log().info("Stopping node: [" + grid0.name() + "]"); + + IgnitionEx.stop(grid0.name(), true, ShutdownPolicy.IMMEDIATE, false); + } + }, + "node-stopper" + ).start(); + + return true; + } + + return false; + } + ); + + boolean passed = false; + + try { + tx.commit(); + } + catch (Throwable e) { + String msg = e.getMessage(); + + Assert.isTrue(e.getCause() instanceof CacheInvalidStateException); + + Assert.isTrue(msg.contains(ALL_PARTITION_OWNERS_LEFT_GRID_MSG)); + + if (!mvccEnabled(grid1.context())) { + Pattern msgPtrn = Pattern.compile(" \\[cacheName=cache, partition=\\d+, " + "key=KeyCacheObjectImpl \\[part=\\d+, val=" + key0 + + ", hasValBytes=true\\]\\]"); + + Matcher matcher = msgPtrn.matcher(msg); + + Assert.isTrue(matcher.find()); + } + + passed = true; + } + + Assert.isTrue(passed); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationEncryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationEncryptionTest.java new file mode 100644 index 00000000000000..f1ef929478e46a --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationEncryptionTest.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.encryption.AbstractEncryptionTest; +import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi; + +/** */ +public class IgnitePdsDefragmentationEncryptionTest extends IgnitePdsDefragmentationTest { + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + KeystoreEncryptionSpi encSpi = new KeystoreEncryptionSpi(); + + encSpi.setKeyStorePath(AbstractEncryptionTest.KEYSTORE_PATH); + encSpi.setKeyStorePassword(AbstractEncryptionTest.KEYSTORE_PASSWORD.toCharArray()); + + cfg.setEncryptionSpi(encSpi); + + for (CacheConfiguration ccfg : cfg.getCacheConfiguration()) + ccfg.setEncryptionEnabled(true); + + return cfg; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationRandomLruEvictionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationRandomLruEvictionTest.java new file mode 100644 index 00000000000000..7709d76f952669 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationRandomLruEvictionTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.IgniteConfiguration; + +/** */ +public class IgnitePdsDefragmentationRandomLruEvictionTest extends IgnitePdsDefragmentationTest { + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.getDataStorageConfiguration() + .getDefaultDataRegionConfiguration() + .setPageEvictionMode(DataPageEvictionMode.RANDOM_LRU); + + return cfg; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java new file mode 100644 index 00000000000000..8f06a4895cd66f --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java @@ -0,0 +1,541 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.io.File; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.FileVisitor; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collections; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.UnaryOperator; +import java.util.stream.IntStream; +import javax.cache.configuration.Factory; +import javax.cache.expiry.Duration; +import javax.cache.expiry.ExpiryPolicy; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.FailureHandler; +import org.apache.ignite.failure.StopNodeFailureHandler; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.maintenance.MaintenanceFileStore; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.util.lang.IgniteThrowableConsumer; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.maintenance.MaintenanceRegistry; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentationCompletionMarkerFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedIndexFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartMappingFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters.toStore; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; + +/** */ +public class IgnitePdsDefragmentationTest extends GridCommonAbstractTest { + /** */ + public static final String CACHE_2_NAME = "cache2"; + + /** */ + public static final int PARTS = 5; + + /** */ + public static final int ADDED_KEYS_COUNT = 150; + + /** */ + protected static final String GRP_NAME = "group"; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(true); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(true); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected FailureHandler getFailureHandler(String igniteInstanceName) { + return new StopNodeFailureHandler(); + } + + /** */ + protected static class PolicyFactory implements Factory { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override public ExpiryPolicy create() { + return new ExpiryPolicy() { + @Override public Duration getExpiryForCreation() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + + /** {@inheritDoc} */ + @Override public Duration getExpiryForAccess() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + + /** {@inheritDoc} */ + @Override public Duration getExpiryForUpdate() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + }; + } + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration dsCfg = new DataStorageConfiguration(); + dsCfg.setWalSegmentSize(4 * 1024 * 1024); + + dsCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setInitialSize(100L * 1024 * 1024) + .setMaxSize(1024L * 1024 * 1024) + .setPersistenceEnabled(true) + ); + + cfg.setDataStorageConfiguration(dsCfg); + + CacheConfiguration cache1Cfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + CacheConfiguration cache2Cfg = new CacheConfiguration<>(CACHE_2_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setExpiryPolicyFactory(new PolicyFactory()) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + cfg.setCacheConfiguration(cache1Cfg, cache2Cfg); + + return cfg; + } + + /** + * Basic test scenario. Does following steps: + * - Start node; + * - Fill cache; + * - Remove part of data; + * - Stop node; + * - Start node in defragmentation mode; + * - Stop node; + * - Start node; + * - Check that partitions became smaller; + * - Check that cache is accessible and works just fine. + * + * @throws Exception If failed. + */ + @Test + public void testSuccessfulDefragmentation() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(ig.cache(DEFAULT_CACHE_NAME)); + + forceCheckpoint(ig); + + createMaintenanceRecord(); + + stopGrid(0); + + File workDir = resolveCacheWorkDir(ig); + + long[] oldPartLen = partitionSizes(workDir); + + long oldIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + startGrid(0); + + long[] newPartLen = partitionSizes(workDir); + + for (int p = 0; p < PARTS; p++) + assertTrue(newPartLen[p] < oldPartLen[p]); //TODO Fails. + + long newIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + assertTrue(newIdxFileLen <= oldIdxFileLen); + + File completionMarkerFile = defragmentationCompletionMarkerFile(workDir); + assertTrue(completionMarkerFile.exists()); + + stopGrid(0); + + IgniteEx ig0 = startGrid(0); + + ig0.cluster().state(ClusterState.ACTIVE); + + assertFalse(completionMarkerFile.exists()); + + validateCache(grid(0).cache(DEFAULT_CACHE_NAME)); + + validateLeftovers(workDir); + } + + /** + * @return Working directory for cache group {@link IgnitePdsDefragmentationTest#GRP_NAME}. + * @throws IgniteCheckedException If failed for some reason, like if it's a file instead of directory. + */ + private File resolveCacheWorkDir(IgniteEx ig) throws IgniteCheckedException { + File dbWorkDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); + + File nodeWorkDir = new File(dbWorkDir, U.maskForFileName(ig.name())); + + return new File(nodeWorkDir, FilePageStoreManager.CACHE_GRP_DIR_PREFIX + GRP_NAME); + } + + /** + * Force checkpoint and wait for it so all partitions will be in their final state after restart if no more data is + * uploaded. + * + * @param ig Ignite node. + * @throws IgniteCheckedException If checkpoint failed for some reason. + */ + private void forceCheckpoint(IgniteEx ig) throws IgniteCheckedException { + ig.context().cache().context().database() + .forceCheckpoint("testDefrag") + .futureFor(CheckpointState.FINISHED) + .get(); + } + + /** */ + protected void createMaintenanceRecord() throws IgniteCheckedException { + IgniteEx grid = grid(0); + MaintenanceRegistry mntcReg = grid.context().maintenanceRegistry(); + + mntcReg.registerMaintenanceTask(toStore(Collections.singletonList(groupIdForCache(grid, DEFAULT_CACHE_NAME)))); + } + + /** + * Returns array that contains sizes of partition files in gived working directories. Assumes that partitions + * {@code 0} to {@code PARTS - 1} exist in that dir. + * + * @param workDir Working directory. + * @return The array. + */ + protected long[] partitionSizes(File workDir) { + return IntStream.range(0, PARTS) + .mapToObj(p -> new File(workDir, String.format(FilePageStoreManager.PART_FILE_TEMPLATE, p))) + .mapToLong(File::length) + .toArray(); + } + + /** + * Checks that plain node start after failed defragmentation will finish batch renaming. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverRestartWithoutDefragmentation() throws Exception { + testFailover(workDir -> { + try { + File mntcRecFile = new File(workDir.getParent(), MaintenanceFileStore.MAINTENANCE_FILE_NAME); + + assertTrue(mntcRecFile.exists()); + + Files.delete(mntcRecFile.toPath()); + + startGrid(0); + + validateLeftovers(workDir); + } + catch (Exception e) { + throw new IgniteCheckedException(e); + } + finally { + createMaintenanceRecord(); + + stopGrid(0); + } + }); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if no completion marker was found. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverOnLastStage() throws Exception { + testFailover(workDir -> {}); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if index was not defragmented. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverIncompletedIndex() throws Exception { + testFailover(workDir -> move( + DefragmentationFileUtils.defragmentedIndexFile(workDir), + DefragmentationFileUtils.defragmentedIndexTmpFile(workDir) + )); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if partition was not defragmented. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverIncompletedPartition1() throws Exception { + testFailover(workDir -> { + DefragmentationFileUtils.defragmentedIndexFile(workDir).delete(); + + move( + DefragmentationFileUtils.defragmentedPartFile(workDir, PARTS - 1), + DefragmentationFileUtils.defragmentedPartTmpFile(workDir, PARTS - 1) + ); + }); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if no mapping was found for partition. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverIncompletedPartition2() throws Exception { + testFailover(workDir -> { + DefragmentationFileUtils.defragmentedIndexFile(workDir).delete(); + + DefragmentationFileUtils.defragmentedPartMappingFile(workDir, PARTS - 1).delete(); + }); + } + + /** */ + private void move(File from, File to) throws IgniteCheckedException { + try { + Files.move(from.toPath(), to.toPath(), StandardCopyOption.REPLACE_EXISTING); + } + catch (IOException e) { + throw new IgniteCheckedException(e); + } + } + + /** */ + private void testFailover(IgniteThrowableConsumer c) throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(ig.cache(DEFAULT_CACHE_NAME)); + + forceCheckpoint(ig); + + createMaintenanceRecord(); + + stopGrid(0); + + File workDir = resolveCacheWorkDir(ig); + + String errMsg = "Failed to create defragmentation completion marker."; + + AtomicBoolean errOccurred = new AtomicBoolean(); + + UnaryOperator cfgOp = cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + + FileIOFactory delegate = dsCfg.getFileIOFactory(); + + dsCfg.setFileIOFactory((file, modes) -> { + if (file.equals(defragmentationCompletionMarkerFile(workDir))) { + errOccurred.set(true); + + throw new IOException(errMsg); + } + + return delegate.create(file, modes); + }); + + return cfg; + }; + + try { + startGrid(0, cfgOp); + } + catch (Exception ignore) { + // No-op. + } + + // Failed node can leave interrupted status of the thread that needs to be cleared, + // otherwise following "wait" wouldn't work. + // This call can't be moved inside of "catch" block because interruption can actually be silent. + Thread.interrupted(); + + assertTrue(GridTestUtils.waitForCondition(errOccurred::get, 10_000L)); + + assertTrue(GridTestUtils.waitForCondition(() -> G.allGrids().isEmpty(), 10_000L)); + + c.accept(workDir); + + startGrid(0); + + stopGrid(0); + + // Everything must be completed. + startGrid(0).cluster().state(ClusterState.ACTIVE); + + validateCache(grid(0).cache(DEFAULT_CACHE_NAME)); + + validateLeftovers(workDir); + } + + /** */ + public void validateLeftovers(File workDir) { + assertFalse(defragmentedIndexFile(workDir).exists()); + + for (int p = 0; p < PARTS; p++) { + assertFalse(defragmentedPartMappingFile(workDir, p).exists()); + + assertFalse(defragmentedPartFile(workDir, p).exists()); + } + } + + /** */ + @Test + public void testDefragmentedPartitionCreated() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(ig.cache(DEFAULT_CACHE_NAME)); + + fillCache(ig.getOrCreateCache(CACHE_2_NAME)); + + createMaintenanceRecord(); + + stopGrid(0); + + startGrid(0); + + File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); + + AtomicReference cachePartFile = new AtomicReference<>(); + AtomicReference defragCachePartFile = new AtomicReference<>(); + + Files.walkFileTree(workDir.toPath(), new FileVisitor() { + @Override public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes basicFileAttributes) throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override public FileVisitResult visitFile(Path path, BasicFileAttributes basicFileAttributes) throws IOException { + if (path.toString().contains("cacheGroup-group")) { + File file = path.toFile(); + + if (file.getName().contains("part-dfrg-")) + cachePartFile.set(file); + else if (file.getName().contains("part-")) + defragCachePartFile.set(file); + } + + return FileVisitResult.CONTINUE; + } + + @Override public FileVisitResult visitFileFailed(Path path, IOException e) throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override public FileVisitResult postVisitDirectory(Path path, IOException e) throws IOException { + return FileVisitResult.CONTINUE; + } + }); + + assertNull(cachePartFile.get()); //TODO Fails. + assertNotNull(defragCachePartFile.get()); + } + + /** + * Fill cache using integer keys. + * + * @param cache + */ + protected void fillCache(IgniteCache cache) { + fillCache(Function.identity(), cache); + } + + /** */ + protected void fillCache(Function keyMapper, IgniteCache cache) { + try (IgniteDataStreamer ds = grid(0).dataStreamer(cache.getName())) { + for (int i = 0; i < ADDED_KEYS_COUNT; i++) { + byte[] val = new byte[8192]; + new Random().nextBytes(val); + + ds.addData(keyMapper.apply(i), val); + } + } + + try (IgniteDataStreamer ds = grid(0).dataStreamer(cache.getName())) { + ds.allowOverwrite(true); + + for (int i = 0; i <= ADDED_KEYS_COUNT / 2; i++) + ds.removeData(keyMapper.apply(i * 2)); + } + } + + /** */ + public void validateCache(IgniteCache cache) { + for (int k = 0; k < ADDED_KEYS_COUNT; k++) { + Object val = cache.get(k); + + if (k % 2 == 0) + assertNull(val); + else + assertNotNull(val); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java index 12774b464e13a9..82dfb71fc7e643 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java @@ -339,7 +339,7 @@ private void generateWal( long writeStart = System.nanoTime(); - storeMgr.write(cacheId, pageId, buf, tag); + storeMgr.write(cacheId, pageId, buf, tag, true); long writeEnd = System.nanoTime(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java index 95e24ccb9d6e26..300ce4e3d1a3e2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java @@ -44,7 +44,6 @@ import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore; import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.internal.util.typedef.internal.U; @@ -199,8 +198,7 @@ public void testFilePageStoreInterruptThreads() throws Exception { DataStorageConfiguration dbCfg = getDataStorageConfiguration(); - FilePageStore pageStore = new FilePageStore(PageMemory.FLAG_DATA, () -> file.toPath(), factory, dbCfg, - new LongAdderMetric("NO_OP", null)); + FilePageStore pageStore = new FilePageStore(PageMemory.FLAG_DATA, file::toPath, factory, dbCfg, val -> {}); int pageSize = dbCfg.getPageSize(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java index 03ad4dd062bcb1..b2b2bde699efc2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java @@ -30,7 +30,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; @@ -739,10 +738,10 @@ public void testPdsWithBrokenBinaryConsistencyIsClearedAfterRestartWithDisabledW @Override public void run() { MaintenanceRegistry mntcRegistry = ((IgniteEx) ig).context().maintenanceRegistry(); - List actions = mntcRegistry + List> actions = mntcRegistry .actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); - Optional optional = actions + Optional> optional = actions .stream() .filter(a -> a.name().equals(CleanCacheStoresMaintenanceAction.ACTION_NAME)).findFirst(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java index 5866d43d74a8e3..f64d5684b98000 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -295,9 +294,9 @@ public void testMaintenanceActionNameSymbols() throws IgniteCheckedException { /** */ private final class SimpleMaintenanceCallback implements MaintenanceWorkflowCallback { /** */ - private final List actions = new ArrayList<>(); + private final List> actions = new ArrayList<>(); - SimpleMaintenanceCallback(List actions) { + SimpleMaintenanceCallback(List> actions) { this.actions.addAll(actions); } @@ -307,7 +306,7 @@ private final class SimpleMaintenanceCallback implements MaintenanceWorkflowCall } /** {@inheritDoc} */ - @Override public @NotNull List allActions() { + @Override public @NotNull List> allActions() { return actions; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java index 1864c0b53886bd..cee869f66e93e4 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java @@ -30,6 +30,7 @@ import org.junit.Test; import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEM_PLC_DEFAULT_NAME; +import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.VOLATILE_DATA_REGION_NAME; /** * @@ -77,7 +78,7 @@ public void testNoConfigProvided() throws Exception { Collection allMemPlcs = ignite.context().cache().context().database().dataRegions(); - assertEquals(3, allMemPlcs.size()); + assertEquals(4, allMemPlcs.size()); verifyDefaultAndSystemMemoryPolicies(allMemPlcs); } @@ -94,7 +95,7 @@ public void testCustomConfigNoDefault() throws Exception { Collection allMemPlcs = ignite.context().cache().context().database().dataRegions(); - assertEquals(4, allMemPlcs.size()); + assertEquals(5, allMemPlcs.size()); verifyDefaultAndSystemMemoryPolicies(allMemPlcs); @@ -116,7 +117,7 @@ public void testCustomConfigOverridesDefault() throws Exception { Collection allMemPlcs = dbMgr.dataRegions(); - assertEquals(3, allMemPlcs.size()); + assertEquals(4, allMemPlcs.size()); verifyDefaultAndSystemMemoryPolicies(allMemPlcs); @@ -141,7 +142,7 @@ public void testCustomConfigOverridesDefaultNameAndDeclaresDefault() throws Exce Collection allMemPlcs = dbMgr.dataRegions(); - assertEquals(4, allMemPlcs.size()); + assertEquals(5, allMemPlcs.size()); verifyDefaultAndSystemMemoryPolicies(allMemPlcs); @@ -290,6 +291,9 @@ private void verifyDefaultAndSystemMemoryPolicies(Collection allMemP assertTrue("System memory policy is not presented", isMemoryPolicyPresented(allMemPlcs, IgniteCacheDatabaseSharedManager.SYSTEM_DATA_REGION_NAME)); + + assertTrue("Volatile memory policy is not presented", + isMemoryPolicyPresented(allMemPlcs, VOLATILE_DATA_REGION_NAME)); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/PendingTreeCorruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/PendingTreeCorruptionTest.java new file mode 100644 index 00000000000000..7a748e3346b4e3 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/PendingTreeCorruptionTest.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.util.concurrent.TimeUnit; +import javax.cache.expiry.AccessedExpiryPolicy; +import javax.cache.expiry.Duration; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManagerImpl; +import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; +import org.apache.ignite.internal.processors.cache.tree.PendingRow; +import org.apache.ignite.internal.util.lang.GridCursor; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static java.util.concurrent.TimeUnit.MINUTES; + +/** */ +public class PendingTreeCorruptionTest extends GridCommonAbstractTest { + /** */ + @Before + public void before() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** */ + @After + public void after() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + cfg.setDataStorageConfiguration(new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setPersistenceEnabled(true) + ) + .setWalSegments(3) + .setWalSegmentSize(512 * 1024) + ); + + return cfg; + } + + /** */ + @Test + public void testCorruptionWhileLoadingData() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + String expireCacheName = "cacheWithExpire"; + String regularCacheName = "cacheWithoutExpire"; + String grpName = "cacheGroup"; + + IgniteCache expireCache = ig.getOrCreateCache( + new CacheConfiguration<>(expireCacheName) + .setExpiryPolicyFactory(AccessedExpiryPolicy.factoryOf(new Duration(MINUTES, 10))) + .setGroupName(grpName) + ); + + IgniteCache regularCache = ig.getOrCreateCache( + new CacheConfiguration<>(regularCacheName) + .setGroupName(grpName) + ); + + // This will initialize partition and cache structures. + expireCache.put(0, 0); + expireCache.remove(0); + + int expireCacheId = CU.cacheGroupId(expireCacheName, grpName); + + CacheGroupContext grp = ig.context().cache().cacheGroup(CU.cacheId(grpName)); + IgniteCacheOffheapManager.CacheDataStore store = ((IgniteCacheOffheapManagerImpl)grp.offheap()).dataStore(0); + + // Get pending tree of expire cache. + PendingEntriesTree pendingTree = store.pendingTree(); + + long year = TimeUnit.DAYS.toMillis(365); + long expiration = System.currentTimeMillis() + year; + + ig.context().cache().context().database().checkpointReadLock(); + + try { + // Carefully calculated number. Just enough for the first split to happen, but not more. + for (int i = 0; i < 202; i++) + pendingTree.putx(new PendingRow(expireCacheId, expiration, expiration + i)); // link != 0 + + // Open cursor, it'll cache first leaf of the tree. + GridCursor cur = pendingTree.find( + null, + new PendingRow(expireCacheId, expiration + year, 0), + PendingEntriesTree.WITHOUT_KEY + ); + + // Required for "do" loop to work. + assertTrue(cur.next()); + + int cnt = 0; + + // Emulate real expiry loop but with a more precise control. + do { + PendingRow row = cur.get(); + + pendingTree.removex(row); + + // Another carefully calculated moment. Here the page cache is exhausted AND the real page is merged + // with its sibling, meaning that cached "nextPageId" points to empty page from reuse list. + if (row.link - row.expireTime == 100) { + // Put into another cache will take a page from reuse list first. This means that cached + // "nextPageId" points to a data page. + regularCache.put(0, 0); + } + + cnt++; + } + while (cur.next()); + + assertEquals(202, cnt); + } + finally { + ig.context().cache().context().database().checkpointReadUnlock(); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java index 660c7ffb365baf..ca808ad3b04ed9 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java @@ -51,6 +51,7 @@ import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.ListeningTestLogger; @@ -236,6 +237,17 @@ private void runDeadlockScenario() throws Exception { long pageId = PageIdUtils.pageId(0, PageIdAllocator.FLAG_DATA, pageIdx); + long page = pageMem.acquirePage(CU.cacheId(cacheName), pageId); + + try { + // We do not know correct flag(FLAG_DATA or FLAG_AUX). Skip page if no luck. + if (pageId != PageIO.getPageId(page + PageMemoryImpl.PAGE_OVERHEAD)) + continue; + } + finally { + pageMem.releasePage(CU.cacheId(cacheName), pageId, page); + } + pickedPagesSet.add(new FullPageId(pageId, CU.cacheId(cacheName))); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java index 3da0c181340b0a..63fea4494f816b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java @@ -89,6 +89,13 @@ public class IgnitePdsDataRegionMetricsTest extends GridCommonAbstractTest { .setMaxSize(MAX_REGION_SIZE) .setPersistenceEnabled(true) .setMetricsEnabled(true)) + .setDataRegionConfigurations( + new DataRegionConfiguration() + .setName("EmptyRegion") + .setInitialSize(INIT_REGION_SIZE) + .setMaxSize(MAX_REGION_SIZE) + .setPersistenceEnabled(true) + .setMetricsEnabled(true)) .setCheckpointFrequency(1000); cfg.setDataStorageConfiguration(memCfg); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointListenerForRegionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointListenerForRegionTest.java new file mode 100644 index 00000000000000..fb10775a438a0b --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointListenerForRegionTest.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.checkpoint; + +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.NotNull; +import org.junit.Test; + +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME; + +/** + * + */ +public class CheckpointListenerForRegionTest extends GridCommonAbstractTest { + /** This number show how many mandatory methods will be called on checkpoint listener during checkpoint. */ + private static final int CALLS_COUNT_PER_CHECKPOINT = 3; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration storageCfg = new DataStorageConfiguration(); + + storageCfg.setCheckpointFrequency(100_000); + storageCfg.getDefaultDataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(300L * 1024 * 1024); + + cfg.setDataStorageConfiguration(storageCfg) + .setCacheConfiguration(new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAffinity(new RendezvousAffinityFunction(false, 16))); + + return cfg; + } + + /** + * 1. Start the one node. + * 2. Configure the default cache. + * 3. Set the checkpoint listeners(for default region and for all regions) to watch the checkpoint. + * 4. Fill the data and trigger the checkpoint. + * 5. Expected: Both listeners should be called. + * 6. Remove the default region from the checkpoint. + * 7. Fill the data and trigger the checkpoint. + * 8. Expected: The only listener for all regions should be called. + * 9. Return default region back to the checkpoint. + * 10. Fill the data and trigger the checkpoint. + * 11. Expected: Both listeners should be called. + * + * @throws Exception if fail. + */ + @Test + public void testCheckpointListenersInvokedOnlyIfRegionConfigured() throws Exception { + //given: One started node with default cache. + IgniteEx ignite0 = startGrid(0); + + ignite0.cluster().active(true); + + IgniteCache cache = ignite0.cache(DEFAULT_CACHE_NAME); + + GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)(ignite0.context().cache().context().database()); + + DataRegion defaultRegion = db.checkpointedDataRegions().stream() + .filter(region -> DFLT_DATA_REG_DEFAULT_NAME.equals(region.config().getName())) + .findFirst() + .orElse(null); + + assertNotNull("Expected default data region in checkpoint list is not found.", defaultRegion); + + //and: Configure the listeners(for default region and for all regions) for watching for checkpoint. + AtomicInteger checkpointListenerDefaultRegionCounter = checkpointListenerWatcher(db, defaultRegion); + AtomicInteger checkpointListenerAllRegionCounter = checkpointListenerWatcher(db, null); + + //when: Checkpoint happened. + fillDataAndCheckpoint(ignite0, cache); + + //then: Both listeners should be called. + assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get()); + assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get()); + + //Remove the default region from checkpoint. + db.checkpointedDataRegions().remove(defaultRegion); + + //when: Checkpoint happened. + fillDataAndCheckpoint(ignite0, cache); + + //then: Only listener for all regions should be called. + assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get()); + assertEquals(2 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get()); + + assertTrue( + "Expected default data region in all regions list is not found.", + db.dataRegions().stream().anyMatch(region -> DFLT_DATA_REG_DEFAULT_NAME.equals(region.config().getName())) + ); + + //Return default region back to the checkpoint. + db.checkpointedDataRegions().add(defaultRegion); + + //when: Checkpoint happened. + fillDataAndCheckpoint(ignite0, cache); + + //then: Both listeners should be called. + assertEquals(2 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get()); + assertEquals(3 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get()); + } + + /** + * Fill the data and trigger the checkpoint after that. + */ + private void fillDataAndCheckpoint( + IgniteEx ignite0, + IgniteCache cache + ) throws IgniteCheckedException { + for (int j = 0; j < 1024; j++) + cache.put(j, j); + + forceCheckpoint(ignite0); + } + + /** + * Add checkpoint listener which count the number of listener calls during each checkpoint. + * + * @param db Shared manager for manage the listeners. + * @param defaultRegion Region for which listener should be added. + * @return Integer which count the listener calls. + */ + @NotNull + private AtomicInteger checkpointListenerWatcher(GridCacheDatabaseSharedManager db, DataRegion defaultRegion) { + AtomicInteger checkpointListenerCounter = new AtomicInteger(); + + db.addCheckpointListener(new CheckpointListener() { + @Override public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException { + checkpointListenerCounter.getAndIncrement(); + } + + @Override public void onCheckpointBegin(Context ctx) throws IgniteCheckedException { + checkpointListenerCounter.getAndIncrement(); + } + + @Override public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException { + checkpointListenerCounter.getAndIncrement(); + } + }, defaultRegion); + return checkpointListenerCounter; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java index 51a4073a71c580..5d75b1ff8a73ac 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java @@ -44,7 +44,7 @@ public class CheckpointStartLoggingTest extends GridCommonAbstractTest { "walCpRecordFsyncDuration=" + VALID_MS_PATTERN + ", " + "writeCheckpointEntryDuration=" + VALID_MS_PATTERN + ", " + "splitAndSortCpPagesDuration=" + VALID_MS_PATTERN + ", " + - ".* pages=[1-9][0-9]*, " + + ".*pages=[1-9][0-9]*, " + "reason=.*"; /** */ diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java new file mode 100644 index 00000000000000..1a677165ba9bb6 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.checkpoint; + +import java.io.File; +import java.nio.file.Paths; +import java.util.Arrays; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; +import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog; +import org.apache.ignite.internal.processors.cache.persistence.CheckpointState; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.LightweightCheckpointManager; +import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.METASTORE_DATA_REGION_NAME; +import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; + +/** + * + */ +public class LightweightCheckpointTest extends GridCommonAbstractTest { + /** Data region which should not be checkpointed. */ + public static final String NOT_CHECKPOINTED_REGION = "NotCheckpointedRegion"; + + /** Cache which should not be checkpointed. */ + public static final String NOT_CHECKPOINTED_CACHE = "notCheckpointedCache"; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + +// cleanPersistenceDir(); + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration storageCfg = new DataStorageConfiguration(); + + storageCfg.setWalMode(WALMode.NONE); + storageCfg.setCheckpointFrequency(100_000); + storageCfg.setDataRegionConfigurations(new DataRegionConfiguration() + .setName(NOT_CHECKPOINTED_REGION) + .setPersistenceEnabled(true) + .setMaxSize(300L * 1024 * 1024) + + ); + storageCfg.getDefaultDataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(300L * 1024 * 1024); + + cfg.setDataStorageConfiguration(storageCfg) + + .setCacheConfiguration( + new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAffinity(new RendezvousAffinityFunction(false, 16)) + .setDataRegionName(DFLT_DATA_REG_DEFAULT_NAME), + new CacheConfiguration<>(NOT_CHECKPOINTED_CACHE) + .setAffinity(new RendezvousAffinityFunction(false, 16)) + .setDataRegionName(NOT_CHECKPOINTED_REGION) + ); + + return cfg; + } + + /** + * 1. Start the one node with disabled WAL and with two caches. + * 2. Disable default checkpoint. + * 3. Create light checkpoint for one cache and configure checkpoint listener for it. + * 4. Fill the both caches. + * 5. Trigger the light checkpoint and wait for the finish. + * 6. Stop the node and start it again. + * 7. Expected: Cache which was checkpointed would have the all data meanwhile second cache would be empty. + * + * @throws Exception if fail. + */ + @Test + public void testLightCheckpointAbleToStoreOnlyGivenDataRegion() throws Exception { + //given: One started node with default cache and cache which won't be checkpointed. + IgniteEx ignite0 = startGrid(0); + ignite0.cluster().active(true); + + IgniteCache checkpointedCache = ignite0.cache(DEFAULT_CACHE_NAME); + IgniteCache notCheckpointedCache = ignite0.cache(NOT_CHECKPOINTED_CACHE); + + GridKernalContext context = ignite0.context(); + GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)(context.cache().context().database()); + + waitForCondition(() -> !db.getCheckpointer().currentProgress().inProgress(), 10_000); + + //and: disable the default checkpoint. + db.enableCheckpoints(false); + + DataRegion regionForCheckpoint = db.dataRegion(DFLT_DATA_REG_DEFAULT_NAME); + + //and: Create light checkpoint with only one region. + LightweightCheckpointManager lightweightCheckpointManager = new LightweightCheckpointManager( + context::log, + context.igniteInstanceName(), + "light-test-checkpoint", + context.workersRegistry(), + context.config().getDataStorageConfiguration(), + () -> Arrays.asList(regionForCheckpoint), + grpId -> getPageMemoryForCacheGroup(grpId, db, context), + PageMemoryImpl.ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY, + context.cache().context().snapshot(), + db.persistentStoreMetricsImpl(), + context.longJvmPauseDetector(), + context.failure(), + context.cache() + ); + + //and: Add checkpoint listener for DEFAULT_CACHE in order of storing the meta pages. + lightweightCheckpointManager.addCheckpointListener( + (CheckpointListener)context.cache().cacheGroup(groupIdForCache(ignite0, DEFAULT_CACHE_NAME)).offheap(), + regionForCheckpoint + ); + + lightweightCheckpointManager.start(); + + //when: Fill the caches + for (int j = 0; j < 1024; j++) { + checkpointedCache.put(j, j); + notCheckpointedCache.put(j, j); + } + + //and: Trigger and wait for the checkpoint. + lightweightCheckpointManager.forceCheckpoint("test", null) + .futureFor(CheckpointState.FINISHED) + .get(); + + //and: Stop and start node. + stopAllGrids(); + + ignite0 = startGrid(0); + ignite0.cluster().active(true); + + checkpointedCache = ignite0.cache(DEFAULT_CACHE_NAME); + notCheckpointedCache = ignite0.cache(NOT_CHECKPOINTED_CACHE); + + //then: Checkpointed cache should have all data meanwhile uncheckpointed cache should be empty. + for (int j = 1; j < 1024; j++) { + assertEquals(j, checkpointedCache.get(j)); + assertNull(notCheckpointedCache.get(j)); + } + + GridCacheDatabaseSharedManager db2 = (GridCacheDatabaseSharedManager) + (ignite0.context().cache().context().database()); + + waitForCondition(() -> !db2.getCheckpointer().currentProgress().inProgress(), 10_000); + + String nodeFolderName = ignite0.context().pdsFolderResolver().resolveFolders().folderName(); + File cpMarkersDir = Paths.get(U.defaultWorkDirectory(), "db", nodeFolderName, "cp").toFile(); + + //then: Expected only two pairs checkpoint markers - both from the start of node. + assertEquals(4, cpMarkersDir.listFiles().length); + } + + /** + * @return Page memory which corresponds to grpId. + */ + private PageMemoryEx getPageMemoryForCacheGroup( + int grpId, + GridCacheDatabaseSharedManager db, + GridKernalContext context + ) throws IgniteCheckedException { + if (grpId == MetaStorage.METASTORAGE_CACHE_ID) + return (PageMemoryEx)db.dataRegion(METASTORE_DATA_REGION_NAME).pageMemory(); + + if (grpId == TxLog.TX_LOG_CACHE_ID) + return (PageMemoryEx)db.dataRegion(TxLog.TX_LOG_CACHE_NAME).pageMemory(); + + CacheGroupDescriptor desc = context.cache().cacheGroupDescriptors().get(grpId); + + if (desc == null) + return null; + + String memPlcName = desc.config().getDataRegionName(); + + return (PageMemoryEx)context.cache().context().database().dataRegion(memPlcName).pageMemory(); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java index 04a0cf1cc3861a..f829b90d69c8d0 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java @@ -1016,7 +1016,7 @@ private IgniteBiTuple, WALPointer> runCheckpointing( long writeStart = System.nanoTime(); - storeMgr.write(cacheId, fullId.pageId(), tmpBuf, tag); + storeMgr.write(cacheId, fullId.pageId(), tmpBuf, tag, true); long writeEnd = System.nanoTime(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteLocalWalSizeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteLocalWalSizeTest.java new file mode 100644 index 00000000000000..2854a2a614d669 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteLocalWalSizeTest.java @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.wal; + +import java.io.File; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.IntStream; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor; +import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager; +import org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileWriteHandle; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.Nullable; +import org.junit.Test; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.ZIP_SUFFIX; +import static org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor.fileName; +import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER; +import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.isSegmentFileName; +import static org.apache.ignite.testframework.GridTestUtils.getFieldValue; + +/** + * Class for testing local size of WAL. + */ +public class IgniteLocalWalSizeTest extends GridCommonAbstractTest { + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(); + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + return super.getConfiguration(gridName) + .setCacheConfiguration(new CacheConfiguration<>(DEFAULT_CACHE_NAME)) + .setDataStorageConfiguration( + new DataStorageConfiguration() + .setWalSegments(5) + .setWalSegmentSize((int)U.MB) + .setDefaultDataRegionConfiguration(new DataRegionConfiguration().setPersistenceEnabled(true)) + ); + } + + /** + * Checking correctness of working with local segment sizes for case: archiving only. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesArchiveOnly() throws Exception { + checkLocalSegmentSizesForOneNode(null); + } + + /** + * Checking correctness of working with local segment sizes for case: archiving and compression. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesArchiveAndCompression() throws Exception { + checkLocalSegmentSizesForOneNode(cfg -> cfg.getDataStorageConfiguration().setWalCompactionEnabled(true)); + } + + /** + * Checking correctness of working with local segment sizes for case: without archiving. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesWithoutArchive() throws Exception { + checkLocalSegmentSizesForOneNode(cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + dsCfg.setWalArchivePath(dsCfg.getWalPath()); + }); + } + + /** + * Checking correctness of working with local segment sizes for case: without archiving and with compression. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesWithoutArchiveWithCompression() throws Exception { + checkLocalSegmentSizesForOneNode(cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + dsCfg.setWalArchivePath(dsCfg.getWalPath()).setWalCompactionEnabled(true); + }); + } + + /** + * Checking whether segment file name is checked correctly. + * + * @throws Exception If failed. + */ + @Test + public void testSegmentFileName() throws Exception { + Arrays.asList(null, "", "1", "wal", fileName(0) + "1", fileName(1).replace(".wal", ".wa")) + .forEach(s -> assertFalse(s, isSegmentFileName(s))); + + IntStream.range(0, 10) + .mapToObj(FileDescriptor::fileName) + .forEach(fn -> assertTrue(fn, isSegmentFileName(fn) && isSegmentFileName(fn + ZIP_SUFFIX))); + } + + /** + * Checks whether local segment sizes are working correctly for a single node after loading and restarting. + * + * @param cfgUpdater Configuration updater. + * @throws Exception If failed. + */ + private void checkLocalSegmentSizesForOneNode( + @Nullable Consumer cfgUpdater + ) throws Exception { + IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(0)); + + if (cfgUpdater != null) + cfgUpdater.accept(cfg); + + IgniteEx n = startGrid(cfg); + n.cluster().state(ClusterState.ACTIVE); + + awaitPartitionMapExchange(); + + IgniteCache c = n.getOrCreateCache(DEFAULT_CACHE_NAME); + IntStream.range(0, 10_000).forEach(i -> c.put(i, i)); + + forceCheckpoint(); + checkLocalSegmentSizes(n); + + stopGrid(cfg.getIgniteInstanceName()); + awaitPartitionMapExchange(); + + cfg = getConfiguration(cfg.getIgniteInstanceName()); + + if (cfgUpdater != null) + cfgUpdater.accept(cfg); + + // To avoid a race between compressor and getting the segment sizes. + if (cfg.getDataStorageConfiguration().isWalCompactionEnabled()) + cfg.getDataStorageConfiguration().setWalCompactionEnabled(false); + + n = startGrid(cfg); + awaitPartitionMapExchange(); + + checkLocalSegmentSizes(n); + } + + /** + * Check that local segment sizes in the memory and actual match. + * + * @param n Node. + */ + private void checkLocalSegmentSizes(IgniteEx n) { + FileWriteAheadLogManager wal = (FileWriteAheadLogManager)n.context().cache().context().wal(); + + File walWorkDir = getFieldValue(wal, "walWorkDir"); + File walArchiveDir = getFieldValue(wal, "walArchiveDir"); + + Map expSegmentSize = new HashMap<>(); + + F.asList(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)) + .stream() + .map(FileDescriptor::new) + .forEach(fd -> { + if (fd.isCompressed()) + expSegmentSize.put(fd.idx(), fd.file().length()); + else + expSegmentSize.putIfAbsent(fd.idx(), fd.file().length()); + }); + + FileWriteHandle currHnd = getFieldValue(wal, "currHnd"); + + if (!walArchiveDir.equals(walWorkDir)) { + long absIdx = currHnd.getSegmentId(); + int segments = n.configuration().getDataStorageConfiguration().getWalSegments(); + + for (long i = absIdx - (absIdx % segments); i <= absIdx; i++) + expSegmentSize.putIfAbsent(i, new File(walWorkDir, fileName(i % segments)).length()); + } + + assertEquals(currHnd.getSegmentId() + 1, expSegmentSize.size()); + + Map segmentSize = getFieldValue(wal, "segmentSize"); + assertEquals(expSegmentSize.size(), segmentSize.size()); + + expSegmentSize.forEach((idx, size) -> { + assertEquals(idx.toString(), size, segmentSize.get(idx)); + assertEquals(idx.toString(), size.longValue(), wal.segmentSize(idx)); + }); + + assertEquals(0, wal.segmentSize(currHnd.getSegmentId() + 1)); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMapTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMapTest.java new file mode 100644 index 00000000000000..ee2d436362f873 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMapTest.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; +import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; +import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** + * Class for LinkMap tests. + */ +public class LinkMapTest extends GridCommonAbstractTest { + /** */ + protected static final int PAGE_SIZE = 512; + + /** */ + protected static final long MB = 1024 * 1024; + + /** + * Test that LinkMap works. + * @throws Exception + */ + @Test + public void test() throws Exception { + PageMemory pageMem = createPageMemory(); + + int cacheGroupId = 1; + + String groupName = "test"; + + FullPageId pageId = new FullPageId(pageMem.allocatePage(cacheGroupId, 0, PageIdAllocator.FLAG_DATA), cacheGroupId); + + LinkMap map = new LinkMap(cacheGroupId, groupName, pageMem, pageId.pageId(), true); + + for (int i = 0; i < 10_000; i++) + map.put(i, i + 1); + + for (int i = 0; i < 10_000; i++) + assertEquals(i + 1, map.get(i)); + } + + /** + * Create page memory for LinkMap tree. + */ + protected PageMemory createPageMemory() throws Exception { + DataRegionConfiguration plcCfg = new DataRegionConfiguration() + .setInitialSize(2 * MB) + .setMaxSize(2 * MB); + + PageMemory pageMem = new PageMemoryNoStoreImpl(log, + new UnsafeMemoryProvider(log), + null, + PAGE_SIZE, + plcCfg, + new LongAdderMetric("NO_OP", null), + true); + + pageMem.start(); + + return pageMem; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java index 8f985cd3bdb051..3e7328150caa65 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java @@ -108,6 +108,7 @@ public class BPlusTreePageMemoryImplTest extends BPlusTreeSelfTest { PageMemory mem = new PageMemoryImpl( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java index 1e67469a1f0a82..456c9d60160742 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java @@ -107,6 +107,7 @@ public class BPlusTreeReuseListPageMemoryImplTest extends BPlusTreeReuseSelfTest PageMemory mem = new PageMemoryImpl( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement (rotation with disk) should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java index a8f9ee2d002f98..1f6d5ce0526be9 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java @@ -275,7 +275,7 @@ private PageMemoryImpl createPageMemory(IgniteConfiguration cfg, PageStoreWriter } }; - PageMemoryImpl memory = new PageMemoryImpl(provider, sizes, sctx, pageSize, + PageMemoryImpl memory = new PageMemoryImpl(provider, sizes, sctx, sctx.pageStore(), pageSize, pageWriter, null, () -> true, memMetrics, PageMemoryImpl.ThrottlingPolicy.DISABLED, clo); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java index 67aa12b58e9c45..19e67b47c9b8fd 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java @@ -123,6 +123,7 @@ public class IndexStoragePageMemoryImplTest extends IndexStorageSelfTest { return new PageMemoryImpl( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement (rotation with disk) should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java index 3c7a2ede4a7f0c..c5cc3728a9b94c 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java @@ -23,6 +23,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.LongConsumer; import java.util.function.Predicate; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; @@ -30,11 +31,11 @@ import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.StoredCacheData; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteFuture; @@ -57,7 +58,7 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { /** {@inheritDoc} */ @Override public void initialize(int cacheId, int partitions, String workingDir, - LongAdderMetric tracker) throws IgniteCheckedException { + LongConsumer tracker) throws IgniteCheckedException { // No-op. } @@ -88,8 +89,8 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { } /** {@inheritDoc} */ - @Override public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException { - + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { + // No-op. } /** {@inheritDoc} */ @@ -103,8 +104,9 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { } /** {@inheritDoc} */ - @Override public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { // No-op. + return null; } /** {@inheritDoc} */ @@ -150,11 +152,6 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { return allocator.get(); } - /** {@inheritDoc} */ - @Override public long metaPageId(int grpId) { - return 1; - } - /** {@inheritDoc} */ @Override public void start(GridCacheSharedContext cctx) throws IgniteCheckedException { // No-op. diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java index b40d6c56d01795..2e78ad03c4b4af 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java @@ -164,6 +164,11 @@ public class NoOpWALManager implements IgniteWriteAheadLogManager { // No-op. } + /** {@inheritDoc} */ + @Override public long currentSegment() { + return 0; + } + /** {@inheritDoc} */ @Override public int walArchiveSegments() { return 0; @@ -183,4 +188,14 @@ public class NoOpWALManager implements IgniteWriteAheadLogManager { @Override public long maxArchivedSegmentToDelete() { return -1; } + + /** {@inheritDoc} */ + @Override public long segmentSize(long idx) { + return -1; + } + + /** {@inheritDoc} */ + @Override public WALPointer lastWritePointer() { + return null; + } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java index 9f74ec498b2ebd..51e29fff39ec09 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java @@ -114,6 +114,7 @@ public class PageMemoryImplNoLoadTest extends PageMemoryNoLoadSelfTest { provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement (rotation with disk) should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java index 9ea27f99653762..1632e733db8ec8 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java @@ -43,6 +43,7 @@ import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; @@ -327,7 +328,7 @@ private void doCheckpoint( PageStoreWriter pageStoreWriter = (fullPageId, buf, tag) -> { assertNotNull(tag); - pageStoreMgr.write(fullPageId.groupId(), fullPageId.pageId(), buf, 1); + pageStoreMgr.write(fullPageId.groupId(), fullPageId.pageId(), buf, 1, false); }; for (FullPageId cpPage : cpPages) { @@ -642,6 +643,7 @@ private PageMemoryImpl createPageMemory( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, replaceWriter, new GridInClosure3X() { @@ -661,6 +663,7 @@ private PageMemoryImpl createPageMemory( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, replaceWriter, new GridInClosure3X() { @@ -701,7 +704,7 @@ private static class TestPageStoreManager extends NoOpPageStoreManager implement private Map storedPages = new HashMap<>(); /** {@inheritDoc} */ - @Override public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException { + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { FullPageId fullPageId = new FullPageId(pageId, grpId); byte[] bytes = storedPages.get(fullPageId); @@ -713,12 +716,14 @@ private static class TestPageStoreManager extends NoOpPageStoreManager implement } /** {@inheritDoc} */ - @Override public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { byte[] data = new byte[PAGE_SIZE]; pageBuf.get(data); storedPages.put(new FullPageId(pageId, grpId), data); + + return null; } /** {@inheritDoc} */ diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java new file mode 100644 index 00000000000000..87a51d85cfccb8 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import javax.cache.Cache; +import javax.cache.expiry.Duration; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** + * A base for tests that check the behaviour of scan queries run on a data set that is modified concurrently. + * Actual tests should implement a way of cache creation, modification and destruction. + */ +public abstract class ScanQueryConcurrentUpdatesAbstractTest extends GridCommonAbstractTest { + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrids(4); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + } + + /** + * Creates a cache with given parameters. + * + * @param cacheName Name of the cache. + * @param cacheMode Cache mode. + * @param expiration {@link Duration} for {@link javax.cache.expiry.ExpiryPolicy}. If {@code null}, then + * {@link javax.cache.expiry.ExpiryPolicy} won't be configured. + * + * @return Instance of the created cache. + */ + protected abstract IgniteCache createCache(String cacheName, CacheMode cacheMode, + Duration expiration); + + /** + * Performs modification of a provided cache. Records with keys in range {@code 0..(recordsNum - 1)} are updated. + * + * @param cache Cache to update. + * @param recordsNum Number of records to update. + */ + protected abstract void updateCache(IgniteCache cache, int recordsNum); + + /** + * Destroys the provided cache. + * + * @param cache Cache to destroy. + */ + protected abstract void destroyCache(IgniteCache cache); + + /** + * Tests behaviour of scan queries with concurrent modification. + * + * @param cache Cache to test. + * @param recordsNum Number of records to load to the cache. + */ + private void testStableDataset(IgniteCache cache, int recordsNum) { + int iterations = 1000; + + AtomicBoolean finished = new AtomicBoolean(); + + try { + updateCache(cache, recordsNum); + GridTestUtils.runAsync(() -> { + while (!finished.get()) + updateCache(cache, recordsNum); + }); + + for (int i = 0; i < iterations; i++) { + List> res = cache.query(new ScanQuery()).getAll(); + + assertEquals("Unexpected query result size.", recordsNum, res.size()); + + for (Cache.Entry e : res) + assertEquals(e.getKey(), e.getValue()); + } + } + finally { + finished.set(true); + destroyCache(cache); + } + } + + /** + * Tests behaviour of scan queries with entries expired and modified concurrently. + * + * @param cache Cache to test. + */ + private void testExpiringDataset(IgniteCache cache) { + int iterations = 100; + int recordsNum = 100; + + try { + for (int i = 0; i < iterations; i++) { + updateCache(cache, recordsNum); + + long updateTime = U.currentTimeMillis(); + + List> res = cache.query(new ScanQuery()).getAll(); + + assertTrue("Query result set is too big: " + res.size(), res.size() <= recordsNum); + + for (Cache.Entry e : res) + assertEquals(e.getKey(), e.getValue()); + + while (U.currentTimeMillis() == updateTime) + doSleep(10L); + } + } + finally { + destroyCache(cache); + } + } + + /** */ + @Test + public void testReplicatedOneRecordLongExpiry() { + testStableDataset(createCache("replicated_long_expiry", + CacheMode.REPLICATED, Duration.ONE_HOUR), 1); + } + + /** */ + @Test + public void testReplicatedManyRecordsLongExpiry() { + testStableDataset(createCache("replicated_long_expiry", + CacheMode.REPLICATED, Duration.ONE_HOUR), 1000); + } + + /** */ + @Test + public void testReplicatedOneRecordNoExpiry() { + testStableDataset(createCache("replicated_no_expiry", + CacheMode.REPLICATED, null), 1); + } + + /** */ + @Test + public void testReplicatedManyRecordsNoExpiry() { + testStableDataset(createCache("replicated_no_expiry", + CacheMode.REPLICATED, null), 1000); + } + + /** */ + @Test + public void testPartitionedOneRecordLongExpiry() { + testStableDataset(createCache("partitioned_long_expiry", + CacheMode.PARTITIONED, Duration.ONE_HOUR), 1); + } + + /** */ + @Test + public void testPartitionedManyRecordsLongExpiry() { + testStableDataset(createCache("partitioned_long_expiry", + CacheMode.PARTITIONED, Duration.ONE_HOUR), 1000); + } + + /** */ + @Test + public void testPartitionedOneRecordNoExpiry() { + testStableDataset(createCache("partitioned_no_expiry", + CacheMode.PARTITIONED, null), 1); + } + + /** */ + @Test + public void testPartitionedManyRecordsNoExpiry() { + testStableDataset(createCache("partitioned_no_expiry", + CacheMode.PARTITIONED, null), 1000); + } + + /** */ + @Test + public void testPartitionedShortExpiry() { + testExpiringDataset(createCache("partitioned_short_expiry", + CacheMode.PARTITIONED, new Duration(TimeUnit.MILLISECONDS, 1))); + } + + /** */ + @Test + public void testReplicatedShortExpiry() { + testExpiringDataset(createCache("partitioned_short_expiry", + CacheMode.REPLICATED, new Duration(TimeUnit.MILLISECONDS, 1))); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java new file mode 100644 index 00000000000000..598e89e49938e6 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.configuration.CacheConfiguration; + +/** + * {@link ScanQueryConcurrentUpdatesAbstractTest} with caches created, updates and destroyed using Java API. + */ +public class ScanQueryConcurrentUpdatesTest extends ScanQueryConcurrentUpdatesAbstractTest { + /** {@inheritDoc} */ + @Override protected IgniteCache createCache(String cacheName, CacheMode cacheMode, + Duration expiration) { + CacheConfiguration cacheCfg = new CacheConfiguration<>(cacheName); + cacheCfg.setCacheMode(cacheMode); + if (expiration != null) { + cacheCfg.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(expiration)); + cacheCfg.setEagerTtl(true); + } + + return grid(0).createCache(cacheCfg); + } + + /** {@inheritDoc} */ + @Override protected void updateCache(IgniteCache cache, int recordsNum) { + for (int i = 0; i < recordsNum; i++) + cache.put(i, i); + } + + /** {@inheritDoc} */ + @Override protected void destroyCache(IgniteCache cache) { + cache.destroy(); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java index 734b4609ab3986..e0ce155540c5e7 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java @@ -78,6 +78,8 @@ public class TxOptimisticDeadlockDetectionCrossCacheTest extends GridCommonAbstr cfg.setCacheConfiguration(ccfg0, ccfg1); + cfg.setIncludeEventTypes(EventType.EVT_CACHE_OBJECT_LOCKED); + return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java index cab54a45319ba7..db00de358a6e81 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java @@ -20,6 +20,7 @@ import java.util.concurrent.ConcurrentLinkedDeque; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; @@ -59,5 +60,10 @@ private static class FakeReuseList implements ReuseList { @Override public long recycledPagesCount() throws IgniteCheckedException { return deque.size(); } + + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return pageId; + } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java index 8d50fadac472d0..94b3db652e91a8 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java @@ -21,6 +21,7 @@ import java.util.Set; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; @@ -83,7 +84,7 @@ public TestReuseList( boolean initNew, GridKernalContext ctx ) throws IgniteCheckedException { - super(cacheId, name, pageMem, wal, metaPageId, initNew, new TestPageLockListener(), ctx, null); + super(cacheId, name, pageMem, wal, metaPageId, initNew, new TestPageLockListener(), ctx, null, PageIdAllocator.FLAG_IDX); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java index 82f11ade5fc6c1..9465dc7ff91c59 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java @@ -2773,6 +2773,7 @@ public TestTree( reuseList, new IOVersions<>(new LongInnerIO(canGetRow)), new IOVersions<>(new LongLeafIO()), + PageIdAllocator.FLAG_IDX, new FailureProcessor(new GridTestKernalContext(log)) { @Override public boolean process(FailureContext failureCtx) { lockTrackerManager.dumpLocksToLog(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java index 68f1668cd121bb..1c0f33dde647ce 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java @@ -530,7 +530,8 @@ protected FreeList createFreeList(int pageSize) throws Exception { true, null, new GridTestKernalContext(log), - null + null, + PageIdAllocator.FLAG_IDX ); } @@ -705,6 +706,10 @@ private TestCacheObject(int size) { /** {@inheritDoc} */ @Nullable @Override public T value(CacheObjectValueContext ctx, boolean cpy) { + return value(ctx, cpy, null); + } + + @Override public @Nullable T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) { return (T)data; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java index 027895a2c837aa..d694e5ff8b371f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java @@ -196,7 +196,7 @@ public void testDefaultThrottlingTimeout() throws Exception { IgniteEx ignite = ignite(0); assertEquals( - ignite.context().failure().dumpThreadsTrottlingTimeout, + ignite.context().failure().dumpThreadsTrottlingTimeout(), ignite.configuration().getFailureDetectionTimeout().longValue() ); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java index c5cfce11628c05..5f0b04ff9324f6 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java @@ -30,16 +30,21 @@ import org.apache.ignite.internal.managers.IgniteMBeansManager; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheContextInfo; import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.RootPage; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; import org.apache.ignite.internal.util.GridAtomicLong; import org.apache.ignite.internal.util.GridSpinBusyLock; +import org.apache.ignite.internal.util.collection.IntMap; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; @@ -333,4 +338,15 @@ public class DummyQueryIndexing implements GridQueryIndexing { String colNamePtrn) { return null; } + + /** {@inheritDoc} */ + @Override public void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPart, + CheckpointTimeoutLock cpLock + ) throws IgniteCheckedException { + // No-op. + } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java index 1230aba3ebca5b..152e2570a38c6f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java @@ -262,6 +262,15 @@ public void testSysOperation() throws Exception { assertThrowsWithCause(() -> runOperation(CLIENT, op), ClientAuthorizationException.class); } + /** */ + @Test + public void testAllowedOperationAfterSecurityViolation() throws Exception { + try (IgniteClient client = startClient(CLIENT_READ)) { + assertThrowsWithCause(() -> client.cache(CACHE).put("key", "value"), ClientAuthorizationException.class); + assertNull(client.cache(CACHE).get("key")); + } + } + /** * Gets all operations. * diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java new file mode 100644 index 00000000000000..9f4bca931df50e --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.util; + +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.testframework.GridTestUtils; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/** + * Rate limiter tests. + */ +public class BasicRateLimiterTest { + /** + * Check change speed at runtime. + */ + @Test + public void checkSpeedLimitChange() throws IgniteInterruptedCheckedException { + BasicRateLimiter limiter = new BasicRateLimiter(2); + + checkRate(limiter, 10); + + limiter.setRate(3); + + checkRate(limiter, 15); + + limiter.setRate(0.5); + + checkRate(limiter, 5); + } + + /** + * Check the average rate of the limiter. + * + * @param limiter Rate limiter. + * @param totalOps Number of operations. + */ + private void checkRate(BasicRateLimiter limiter, int totalOps) throws IgniteInterruptedCheckedException { + double permitsPerSec = limiter.getRate(); + long startTime = System.currentTimeMillis(); + + for (int i = 0; i < totalOps; i++) + limiter.acquire(1); + + long timeSpent = System.currentTimeMillis() - startTime; + + // Rate limiter aims for an average rate of permits per second. + assertEquals(1, Math.round((double)timeSpent / 1000 / totalOps * permitsPerSec)); + } + + /** + * Check that the rate can be set as unlimited. + */ + @Test + public void testUnlimitedRate() throws IgniteInterruptedCheckedException { + BasicRateLimiter limiter = new BasicRateLimiter(0); + limiter.acquire(Integer.MAX_VALUE); + + limiter.setRate(1); + limiter.acquire(1); + + limiter.setRate(0); + limiter.acquire(Integer.MAX_VALUE); + } + + /** + * Check rate limit with multiple threads. + */ + @Test + public void checkLimitMultithreaded() throws Exception { + int permitsPerSec = 1_000; + int totalOps = 10_000; + + BasicRateLimiter limiter = new BasicRateLimiter(permitsPerSec); + + int threads = Runtime.getRuntime().availableProcessors(); + + CyclicBarrier ready = new CyclicBarrier(threads + 1); + + AtomicInteger cntr = new AtomicInteger(); + + IgniteInternalFuture fut = GridTestUtils.runMultiThreadedAsync(() -> { + ready.await(); + + do { + limiter.acquire(1); + } + while (!Thread.currentThread().isInterrupted() && cntr.incrementAndGet() < totalOps); + + return null; + }, threads, "worker"); + + ready.await(); + + long startTime = System.currentTimeMillis(); + + fut.get(); + + long timeSpent = System.currentTimeMillis() - startTime; + + // Rate limiter aims for an average rate of permits per second. + assertEquals(1, Math.round((double)timeSpent / 1000 / totalOps * permitsPerSec)); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java index 75711a6cefb9c9..d09d2725b41f32 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java @@ -34,13 +34,13 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_HOME; import static org.apache.ignite.internal.util.IgniteUtils.nullifyHomeDirectory; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; /** * Checks that node can be started without operations with undefined IGNITE_HOME. *

    - * Notes: - * 1. The test is intentionally made independent from {@link GridCommonAbstractTest} stuff. - * 2. Do not replace native Java asserts with JUnit ones - test won't fall on TeamCity. + * The test is intentionally made independent from {@link GridCommonAbstractTest} stuff. */ public class GridStartupWithUndefinedIgniteHomeSelfTest { /** */ @@ -67,13 +67,13 @@ public void testStartStopWithUndefinedIgniteHome() { // it will initialize cached value which is forbidden to override. String igniteHome = IgniteSystemProperties.getString(IGNITE_HOME); - assert igniteHome != null; + assertNotNull(igniteHome); U.setIgniteHome(null); String igniteHome0 = U.getIgniteHome(); - assert igniteHome0 == null; + assertNull(igniteHome0); IgniteLogger log = new JavaLogger(); @@ -93,11 +93,11 @@ public void testStartStopWithUndefinedIgniteHome() { cfg.setConnectorConfiguration(null); try (Ignite ignite = G.start(cfg)) { - assert ignite != null; + assertNotNull(ignite); igniteHome0 = U.getIgniteHome(); - assert igniteHome0 == null; + assertNull(igniteHome0); X.println("Stopping grid " + ignite.cluster().localNode().id()); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteUtilsWorkDirectoryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteUtilsWorkDirectoryTest.java new file mode 100644 index 00000000000000..03092b4200bb04 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteUtilsWorkDirectoryTest.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.InputStreamReader; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static java.lang.String.join; +import static java.lang.System.clearProperty; +import static java.lang.System.getProperty; +import static java.lang.System.setProperty; +import static org.apache.ignite.internal.util.IgniteUtils.workDirectory; +import static org.apache.ignite.internal.util.typedef.internal.U.getIgniteHome; +import static org.apache.ignite.internal.util.typedef.internal.U.nullifyHomeDirectory; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** */ +public class IgniteUtilsWorkDirectoryTest { + /** */ + private static final String USER_WORK_DIR = join(File.separator, + getIgniteHome(), "userWorkDirTest"); + + /** */ + private static final String USER_IGNITE_HOME = join(File.separator, + getIgniteHome(), "userIgniteHomeTest"); + + /** */ + private static final String USER_DIR_PROPERTY_VALUE = join(File.separator, + new File(getIgniteHome()).getParent(), "userDirPropertyTest"); + + /** */ + private static String dfltIgniteHome; + + /** */ + private static String dfltUserDir; + + /** */ + @Before + public void setup() { + dfltIgniteHome = getProperty(IgniteSystemProperties.IGNITE_HOME); + dfltUserDir = getProperty("user.dir"); + clearProperty(IgniteSystemProperties.IGNITE_HOME); + clearProperty("user.dir"); + } + + /** */ + @After + public void tearDown() { + if (dfltIgniteHome != null) + setProperty(IgniteSystemProperties.IGNITE_HOME, dfltIgniteHome); + if (dfltUserDir != null) + setProperty("user.dir", dfltUserDir); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory1() { + executeGenericTest(true, false, false, + USER_WORK_DIR); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory2() { + executeGenericTest(true, false, true, + USER_WORK_DIR); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory3() { + executeGenericTest(true, true, false, + USER_WORK_DIR); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory4() { + executeGenericTest(true, true, true, + USER_WORK_DIR); + } + + /** + * The method set/clear "user.dir" system property and invoke + * {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + * with ignite work directory and ignite home directory provided by user + * + * @param userWorkDirFlag need or not to pass userWorkDir to {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + * @param userIgniteHomeFlag need or not to pass userIgniteHome to {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + * @param userDirPropFlag need to set or clear "user.dir" system property + * @param expWorkDir expected Ignite work directory that will be returned by {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + */ + private void executeGenericTest(boolean userWorkDirFlag, boolean userIgniteHomeFlag, + boolean userDirPropFlag, String expWorkDir) { + if (userDirPropFlag) + setProperty("user.dir", USER_DIR_PROPERTY_VALUE); + else + clearProperty("user.dir"); + + String userWorkDir = ""; + if (userWorkDirFlag) + userWorkDir = USER_WORK_DIR; + + nullifyHomeDirectory(); + clearProperty(IgniteSystemProperties.IGNITE_HOME); + String userIgniteHome = ""; + if (userIgniteHomeFlag) + userIgniteHome = USER_IGNITE_HOME; + + String actualWorkDir = null; + try { + actualWorkDir = workDirectory(userWorkDir, userIgniteHome); + } + catch (Throwable e) { + fail(); + } + + assertEquals(expWorkDir, actualWorkDir); + + } + + /** */ + @Test + public void testNonAbsolutePathWorkDir() { + genericPathExceptionTest("nonAbsolutePathTestDirectory", + "Work directory path must be absolute: nonAbsolutePathTestDirectory"); + } + + /** + * This test only makes sense on Linux platform. + */ + @Test + public void testDisabledWriteToWorkDir() { + String strDir = join(File.separator, USER_WORK_DIR, "CannotWriteTestDirectory"); + File dir = new File(strDir); + + if (dir.exists()) { + resetPermission(strDir); + boolean deleted = U.delete(dir); + assertTrue("cannot delete file", deleted); + } + + dir.mkdirs(); + + try { + executeCommand("chmod 444 " + strDir); + executeCommand("chattr +i " + strDir); + + genericPathExceptionTest(strDir, "Cannot write to work directory: " + strDir); + } + finally { + resetPermission(strDir); + } + } + + /** + * This test only makes sense on Linux platform. + */ + @Test + public void testDisabledWorkDirCreation() { + String strDirParent = join(File.separator, USER_WORK_DIR, "CannotWriteTestDirectory"); + File dirParent = new File(strDirParent); + + if (dirParent.exists()) { + resetPermission(strDirParent); + boolean deleted = U.delete(dirParent); + assertTrue("cannot delete file", deleted); + } + dirParent.mkdirs(); + + try { + executeCommand("chmod 444 " + strDirParent); + executeCommand("chattr +i " + strDirParent); + + String strDir = join(File.separator, strDirParent, "newDirectory"); + + genericPathExceptionTest(strDir, "Work directory does not exist and cannot be created: " + strDir); + } + finally { + resetPermission(strDirParent); + } + } + + /** */ + private static void resetPermission(String dir) { + executeCommand("chattr -i " + dir); + executeCommand("chmod 777 " + dir); + } + + /** */ + private static void executeCommand(String cmd) { + X.println("Command to execute: " + cmd); + + try { + Process proc = Runtime.getRuntime().exec(cmd); + + BufferedReader stdInput = new BufferedReader(new + InputStreamReader(proc.getInputStream())); + BufferedReader stdError = new BufferedReader(new + InputStreamReader(proc.getErrorStream())); + + String s; + + while ((s = stdInput.readLine()) != null) + X.println("stdInput: " + s); + while ((s = stdError.readLine()) != null) + X.println("stdError:" + s); + } + catch (Exception e) { + fail(); + } + } + + /** */ + private void genericPathExceptionTest(String userWorkDir, String expMsg) { + assertThrows(null, + () -> workDirectory(userWorkDir, null), + IgniteCheckedException.class, + expMsg + ); + } + +} diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java index a751ac402325ea..0db781a4f9eff1 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java @@ -21,7 +21,9 @@ import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketTimeoutException; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -211,16 +213,31 @@ public void testServerGetsSegmentedOnBecomeDangling() throws Exception { * Ensures sequential failure of two nodes has no additional issues. */ @Test - public void testFailTwoNodes() throws Exception { + public void testSequentialFailTwoNodes() throws Exception { + simulateFailureOfTwoNodes(true); + } + + /** + * Ensures sequential failure of two nodes has no additional issues. + */ + @Test + public void testNotSequentialFailTwoNodes() throws Exception { + simulateFailureOfTwoNodes(false); + } + + /** */ + private void simulateFailureOfTwoNodes(boolean sequentionally) throws Exception { failureDetectionTimeout = 1000; - startGrids(5); + int gridCnt = 7; + + startGrids(gridCnt); awaitPartitionMapExchange(); final CountDownLatch failLatch = new CountDownLatch(2); - for (int i = 0; i < 5; i++) { + for (int i = 0; i < gridCnt; i++) { ignite(i).events().localListen(evt -> { failLatch.countDown(); @@ -236,20 +253,28 @@ public void testFailTwoNodes() throws Exception { }, EVT_NODE_SEGMENTED); } - processNetworkThreads(ignite(2), t -> t.suspend()); - processNetworkThreads(ignite(3), t -> t.suspend()); + Set failedNodes = new HashSet<>(); + + failedNodes.add(2); + + if (sequentionally) + failedNodes.add(3); + else + failedNodes.add(4); + + failedNodes.forEach(idx -> processNetworkThreads(ignite(idx), Thread::suspend)); try { failLatch.await(10, TimeUnit.SECONDS); } finally { - processNetworkThreads(ignite(2), t -> t.resume()); - processNetworkThreads(ignite(3), t -> t.resume()); + failedNodes.forEach(idx -> processNetworkThreads(ignite(idx), Thread::resume)); } - assertFalse(segmentedNodes.contains(0)); - assertFalse(segmentedNodes.contains(1)); - assertFalse(segmentedNodes.contains(4)); + for (int i = 0; i < gridCnt; i++) { + if (!failedNodes.contains(i)) + assertFalse(segmentedNodes.contains(i)); + } } /** diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java index dd935a96b7f35d..be2f147527b05b 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java @@ -39,7 +39,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.ignite.events.EventType.EVT_NODE_SEGMENTED; -import static org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi.DISCO_METRICS; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; /** diff --git a/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/DummyMBeanServer.java b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/DummyMBeanServer.java new file mode 100644 index 00000000000000..4d9c467738b994 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/DummyMBeanServer.java @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.metric.jmx; + +import java.io.ObjectInputStream; +import java.util.Set; +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.MBeanInfo; +import javax.management.MBeanServer; +import javax.management.NotificationFilter; +import javax.management.NotificationListener; +import javax.management.ObjectInstance; +import javax.management.ObjectName; +import javax.management.QueryExp; +import javax.management.loading.ClassLoaderRepository; + +/** + * + */ +class DummyMBeanServer implements MBeanServer { + /** */ + public static final String[] DOMAINS = new String[0]; + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name, ObjectName ldrName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name, ObjectName ldrName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance registerMBean(Object obj, ObjectName name) { + return new ObjectInstance(name, obj.getClass().getName()); + } + + /** + * {@inheritDoc} + */ + @Override public void unregisterMBean(ObjectName name) { + + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance getObjectInstance(ObjectName name) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Set queryMBeans(ObjectName name, QueryExp qry) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Set queryNames(ObjectName name, QueryExp qry) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public boolean isRegistered(ObjectName name) { + return false; + } + + /** + * {@inheritDoc} + */ + @Override public Integer getMBeanCount() { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object getAttribute(ObjectName name, String attribute) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public AttributeList getAttributes(ObjectName name, String[] atts) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public void setAttribute(ObjectName name, Attribute attribute) { + + } + + /** + * {@inheritDoc} + */ + @Override public AttributeList setAttributes(ObjectName name, AttributeList atts) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object invoke(ObjectName name, String operationName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public String getDefaultDomain() { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public String[] getDomains() { + return DOMAINS; + } + + /** + * {@inheritDoc} + */ + @Override public void addNotificationListener(ObjectName name, NotificationListener lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public void addNotificationListener(ObjectName name, ObjectName lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, ObjectName lsnr) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, ObjectName lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, NotificationListener lsnr) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, NotificationListener lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public MBeanInfo getMBeanInfo(ObjectName name) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public boolean isInstanceOf(ObjectName name, String clsName) { + return false; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName, ObjectName ldrName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName, ObjectName ldrName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInputStream deserialize(ObjectName name, byte[] data) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInputStream deserialize(String clsName, byte[] data) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInputStream deserialize(String clsName, ObjectName ldrName, byte[] data) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ClassLoader getClassLoaderFor(ObjectName mbeanName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ClassLoader getClassLoader(ObjectName ldrName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ClassLoaderRepository getClassLoaderRepository() { + return null; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpiTest.java b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpiTest.java new file mode 100644 index 00000000000000..ee6e0f94f8965d --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpiTest.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.metric.jmx; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import org.apache.commons.collections.iterators.EmptyIterator; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.spi.metric.Metric; +import org.apache.ignite.spi.metric.ReadOnlyMetricManager; +import org.apache.ignite.spi.metric.ReadOnlyMetricRegistry; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.IgniteTestResources; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.junit.Test; + +/** + * + */ +public class JmxMetricExporterSpiTest extends GridCommonAbstractTest { + /** + * + */ + @Test + public void testConcurrentRegistration() throws IgniteCheckedException { + JmxMetricExporterSpi spi = new JmxMetricExporterSpi(); + + new IgniteTestResources(new DummyMBeanServer()).inject(spi); + + TestMetricsManager testMgr = new TestMetricsManager(); + + spi.setMetricRegistry(testMgr); + + spi.spiStart("testInstance"); + + testMgr.runRegistersConcurrent(); + testMgr.runUnregisters(); + } + + /** + * + */ + @SuppressWarnings("unchecked") + private static class TestMetricsManager implements ReadOnlyMetricManager { + /** */ + private final List> creation = new ArrayList<>(); + + /** */ + private final List> rmv = new ArrayList<>(); + + /** {@inheritDoc} */ + @Override public void addMetricRegistryCreationListener(Consumer lsnr) { + creation.add(lsnr); + } + + /** {@inheritDoc} */ + @Override public void addMetricRegistryRemoveListener(Consumer lsnr) { + rmv.add(lsnr); + } + + /** {@inheritDoc} */ + @NotNull @Override public Iterator iterator() { + return EmptyIterator.INSTANCE; + } + + /** + * + */ + public void runRegistersConcurrent() { + final AtomicInteger cntr = new AtomicInteger(); + + GridTestUtils.runMultiThreadedAsync(() -> { + for (int i = 0; i < 20; i++) { + for (Consumer lsnr : creation) + lsnr.accept(new ReadOnlyMetricRegistryStub("stub-" + cntr.getAndIncrement())); + } + }, Runtime.getRuntime().availableProcessors() * 2, "runner-"); + + } + + /** + * + */ + public void runUnregisters() { + for (int i = 0; i < Runtime.getRuntime().availableProcessors() * 2 * 20; i++) { + for (Consumer lsnr : creation) + lsnr.accept(new ReadOnlyMetricRegistryStub("stub-" + i)); + } + } + + /** + * + */ + private static class ReadOnlyMetricRegistryStub implements ReadOnlyMetricRegistry { + /** */ + private final String name; + + /** + * @param name Stub name. + */ + private ReadOnlyMetricRegistryStub(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String name() { + return name; + } + + /** {@inheritDoc} */ + @Override public @Nullable M findMetric(String name) { + return null; + } + + /** {@inheritDoc} */ + @NotNull @Override public Iterator iterator() { + return EmptyIterator.INSTANCE; + } + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java index 9e68277ed017fb..0b19056594c018 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java @@ -123,6 +123,9 @@ import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -1931,13 +1934,13 @@ public static boolean waitForCondition(GridAbsPredicate cond, BooleanSupplier wa * @throws IOException If keystore cannot be accessed. */ public static SSLContext sslContext() throws GeneralSecurityException, IOException { - SSLContext ctx = SSLContext.getInstance("TLS"); + SSLContext ctx = SSLContext.getInstance(DFLT_SSL_PROTOCOL); char[] storePass = keyStorePassword().toCharArray(); - KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509"); + KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance(DFLT_KEY_ALGORITHM); - KeyStore keyStore = KeyStore.getInstance("JKS"); + KeyStore keyStore = KeyStore.getInstance(DFLT_STORE_TYPE); keyStore.load(new FileInputStream(U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path"))), storePass); diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java index 3fa2aaf3772f86..d2f044b04f3869 100755 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java @@ -156,6 +156,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT; import static org.apache.ignite.IgniteSystemProperties.IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY; import static org.apache.ignite.IgniteSystemProperties.IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_TEST_ENV; import static org.apache.ignite.IgniteSystemProperties.IGNITE_TO_STRING_INCLUDE_SENSITIVE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_UPDATE_NOTIFIER; import static org.apache.ignite.IgniteSystemProperties.getBoolean; @@ -281,6 +282,7 @@ public String getName() { System.setProperty(IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY, "1"); System.setProperty(IGNITE_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT, "1000"); System.setProperty(IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP, "false"); + System.setProperty(IGNITE_TEST_ENV, "true"); S.setIncludeSensitiveSupplier(() -> getBoolean(IGNITE_TO_STRING_INCLUDE_SENSITIVE, true)); diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java index 7e557a1ec7281f..8a35caa51e70e7 100755 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java @@ -107,6 +107,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager; import org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2; import org.apache.ignite.internal.processors.service.IgniteServiceProcessor; +import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.G; @@ -2559,6 +2560,8 @@ private boolean validateMetricsMethod(Method m) { * @throws IgniteCheckedException If failed. */ protected void enableCheckpoints(Collection nodes, boolean enable) throws IgniteCheckedException { + GridCompoundFuture fut = new GridCompoundFuture<>(); + for (Ignite node : nodes) { assert !node.cluster().localNode().isClient(); @@ -2569,8 +2572,12 @@ protected void enableCheckpoints(Collection nodes, boolean enable) throw GridCacheDatabaseSharedManager dbMgr0 = (GridCacheDatabaseSharedManager) dbMgr; - dbMgr0.enableCheckpoints(enable).get(); + fut.add(dbMgr0.enableCheckpoints(enable)); } + + fut.markInitialized(); + + fut.get(); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java index f5da43d5a65aa3..27aa08a7a59731 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java @@ -26,12 +26,13 @@ import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; import org.apache.ignite.internal.pagemem.wal.record.DataRecord; import org.apache.ignite.internal.pagemem.wal.record.ExchangeRecord; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord; import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord; import org.apache.ignite.internal.pagemem.wal.record.MvccDataRecord; import org.apache.ignite.internal.pagemem.wal.record.MvccTxRecord; import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; import org.apache.ignite.internal.pagemem.wal.record.RollbackRecord; import org.apache.ignite.internal.pagemem.wal.record.SnapshotRecord; import org.apache.ignite.internal.pagemem.wal.record.SwitchSegmentRecord; @@ -55,12 +56,14 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateIndexDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastAllocatedIndex; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulFullSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateNextSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV3; import org.apache.ignite.internal.pagemem.wal.record.delta.NewRootInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PageListMetaResetCountRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListAddPageRecord; @@ -110,11 +113,15 @@ import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_PAGE_UPDATE_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.EXCHANGE; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.HEADER_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.INDEX_META_PAGE_DELTA_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.INIT_NEW_PAGE_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MEMORY_RECOVERY; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.METASTORE_DATA_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.META_PAGE_INIT; @@ -136,9 +143,11 @@ import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PAGE_LIST_META_RESET_COUNT_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PAGE_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_DESTROY; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_META_PAGE_DELTA_RECORD_V3; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_META_PAGE_UPDATE_COUNTERS; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_META_PAGE_UPDATE_COUNTERS_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PART_META_UPDATE_STATE; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.REENCRYPTION_START_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.RESERVED; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ROLLBACK_TX_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ROTATED_ID_PART_RECORD; @@ -189,6 +198,7 @@ public class RecordUtils { put(PAGES_LIST_ADD_PAGE, RecordUtils::buildPagesListAddPageRecord); put(PAGES_LIST_REMOVE_PAGE, RecordUtils::buildPagesListRemovePageRecord); put(META_PAGE_INIT, RecordUtils::buildMetaPageInitRecord); + put(INDEX_META_PAGE_DELTA_RECORD, RecordUtils::buildMetaPageIndexDeltaRecord); put(PARTITION_META_PAGE_UPDATE_COUNTERS, RecordUtils::buildMetaPageUpdatePartitionDataRecord); put(MEMORY_RECOVERY, RecordUtils::buildMemoryRecoveryRecord); put(TRACKING_PAGE_DELTA, RecordUtils::buildTrackingPageDeltaRecord); @@ -209,13 +219,18 @@ public class RecordUtils { put(RESERVED, RecordUtils::buildReservedRecord); put(ROLLBACK_TX_RECORD, RecordUtils::buildRollbackRecord); put(PARTITION_META_PAGE_UPDATE_COUNTERS_V2, RecordUtils::buildMetaPageUpdatePartitionDataRecordV2); + put(PARTITION_META_PAGE_DELTA_RECORD_V3, RecordUtils::buildMetaPageUpdatePartitionDataRecordV3); put(MASTER_KEY_CHANGE_RECORD, RecordUtils::buildMasterKeyChangeRecord); + put(MASTER_KEY_CHANGE_RECORD_V2, RecordUtils::buildMasterKeyChangeRecordV2); + put(REENCRYPTION_START_RECORD, RecordUtils::buildEncryptionStatusRecord); put(ROTATED_ID_PART_RECORD, RecordUtils::buildRotatedIdPartRecord); put(MVCC_DATA_PAGE_MARK_UPDATED_RECORD, RecordUtils::buildDataPageMvccMarkUpdatedRecord); put(MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD, RecordUtils::buildDataPageMvccUpdateTxStateHintRecord); put(MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD, RecordUtils::buildDataPageMvccUpdateNewTxStateHintRecord); put(ENCRYPTED_RECORD, RecordUtils::buildEncryptedRecord); put(ENCRYPTED_DATA_RECORD, RecordUtils::buildEncryptedDataRecord); + put(ENCRYPTED_RECORD_V2, RecordUtils::buildEncryptedRecordV2); + put(ENCRYPTED_DATA_RECORD_V2, RecordUtils::buildEncryptedDataRecordV2); put(MVCC_DATA_RECORD, RecordUtils::buildMvccDataRecord); put(MVCC_TX_RECORD, RecordUtils::buildMvccTxRecord); put(CONSISTENT_CUT, RecordUtils::buildConsistentCutRecord); @@ -409,6 +424,11 @@ public static MetaPageInitRecord buildMetaPageInitRecord() { return new MetaPageInitRecord(1, 1, 1, 1, 1, 1); } + /** **/ + public static MetaPageUpdateIndexDataRecord buildMetaPageIndexDeltaRecord() { + return new MetaPageUpdateIndexDataRecord(1, 1, 0, 0); + } + /** **/ public static MetaPageUpdatePartitionDataRecord buildMetaPageUpdatePartitionDataRecord() { return new MetaPageUpdatePartitionDataRecord(1, 1, 1, 1, 1, 1, (byte)1, 1); @@ -514,8 +534,23 @@ public static MetaPageUpdatePartitionDataRecordV2 buildMetaPageUpdatePartitionDa } /** **/ - public static MasterKeyChangeRecord buildMasterKeyChangeRecord() { - return new MasterKeyChangeRecord("", new HashMap<>()); + public static MetaPageUpdatePartitionDataRecordV3 buildMetaPageUpdatePartitionDataRecordV3() { + return new MetaPageUpdatePartitionDataRecordV3(1, 1, 1, 1, 1, 1, (byte)1, 1, 1, 0, 0); + } + + /** **/ + public static UnsupportedWalRecord buildMasterKeyChangeRecord() { + return new UnsupportedWalRecord(MASTER_KEY_CHANGE_RECORD); + } + + /** **/ + public static MasterKeyChangeRecordV2 buildMasterKeyChangeRecordV2() { + return new MasterKeyChangeRecordV2("", Collections.emptyList()); + } + + /** **/ + public static ReencryptionStartRecord buildEncryptionStatusRecord() { + return new ReencryptionStartRecord(Collections.emptyMap()); } /** **/ @@ -548,6 +583,16 @@ public static UnsupportedWalRecord buildEncryptedDataRecord() { return new UnsupportedWalRecord(ENCRYPTED_DATA_RECORD); } + /** **/ + public static UnsupportedWalRecord buildEncryptedRecordV2() { + return new UnsupportedWalRecord(ENCRYPTED_RECORD_V2); + } + + /** **/ + public static UnsupportedWalRecord buildEncryptedDataRecordV2() { + return new UnsupportedWalRecord(ENCRYPTED_DATA_RECORD_V2); + } + /** **/ public static MvccDataRecord buildMvccDataRecord() { return new MvccDataRecord(Collections.emptyList(), 1); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java index d548e382687d29..93ca870a2d40b8 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java @@ -75,6 +75,7 @@ import org.apache.ignite.internal.processors.cache.SetTxTimeoutOnPartitionMapExchangeTest; import org.apache.ignite.internal.processors.cache.distributed.IgniteRejectConnectOnNodeStopTest; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.EvictPartitionInLogTest; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMapTest; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PagePoolTest; import org.apache.ignite.internal.processors.cache.query.continuous.DiscoveryDataDeserializationFailureHanderTest; import org.apache.ignite.internal.processors.cache.transactions.AtomicOperationsInTxTest; @@ -295,6 +296,8 @@ ClusterActivationStartedEventTest.class, IgniteThreadGroupNodeRestartTest.class, + + LinkMapTest.class, }) public class IgniteBasicTestSuite { } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java index 210b1c7e85092f..6addefe5f98919 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java @@ -24,6 +24,8 @@ import org.apache.ignite.internal.ClusterBaselineNodesMetricsSelfTest; import org.apache.ignite.internal.GridNodeMetricsLogPdsSelfTest; import org.apache.ignite.internal.cluster.IgniteClusterIdTagTest; +import org.apache.ignite.internal.encryption.CacheGroupKeyChangeTest; +import org.apache.ignite.internal.encryption.CacheGroupReencryptionTest; import org.apache.ignite.internal.encryption.EncryptedCacheBigEntryTest; import org.apache.ignite.internal.encryption.EncryptedCacheCreateTest; import org.apache.ignite.internal.encryption.EncryptedCacheDestroyTest; @@ -74,6 +76,9 @@ MasterKeyChangeTest.class, MasterKeyChangeConsistencyCheckTest.class, + CacheGroupKeyChangeTest.class, + CacheGroupReencryptionTest.class, + EncryptionMXBeanTest.class, IgniteSnapshotManagerSelfTest.class, diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java index 0e09d4cb62e017..4a1abc13500474 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java @@ -41,6 +41,7 @@ import org.apache.ignite.internal.processors.cache.datastructures.IgniteQueueClusterReadOnlyTest; import org.apache.ignite.internal.processors.cache.datastructures.IgniteSequenceInternalCleanupTest; import org.apache.ignite.internal.processors.cache.datastructures.IgniteSetClusterReadOnlyTest; +import org.apache.ignite.internal.processors.cache.datastructures.OutOfMemoryVolatileRegionTest; import org.apache.ignite.internal.processors.cache.datastructures.SemaphoreFailoverNoWaitingAcquirerTest; import org.apache.ignite.internal.processors.cache.datastructures.SemaphoreFailoverSafeReleasePermitsTest; import org.apache.ignite.internal.processors.cache.datastructures.local.GridCacheLocalAtomicQueueApiSelfTest; @@ -134,6 +135,7 @@ IgniteReplicatedLockSelfTest.class, IgniteCacheAtomicReplicatedNodeRestartSelfTest.class, GridCacheReplicatedQueueRemoveSelfTest.class, + OutOfMemoryVolatileRegionTest.class, GridCachePartitionedSequenceApiSelfTest.class, GridCachePartitionedSequenceMultiNodeSelfTest.class, diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java index 52ca6487577e23..e0589f73cb2bd4 100755 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java @@ -63,6 +63,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheStopSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheTcpClientDiscoveryMultiThreadedTest; import org.apache.ignite.internal.processors.cache.GridDataStorageConfigurationConsistencySelfTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicConcurrentUnorderedUpdateAllTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalWithStoreInvokeTest; @@ -151,6 +152,7 @@ public static List> suite() { ignoredTests.add(IgniteCacheAtomicWithStoreInvokeTest.class); ignoredTests.add(IgniteCacheAtomicLocalInvokeTest.class); ignoredTests.add(IgniteCacheAtomicLocalWithStoreInvokeTest.class); + ignoredTests.add(IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.class); ignoredTests.add(GridCachePartitionedLocalStoreSelfTest.class); ignoredTests.add(GridCacheReplicatedLocalStoreSelfTest.class); ignoredTests.add(CacheStoreReadFromBackupTest.class); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java index 5876beef2ce690..f89759a2d0eecb 100755 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java @@ -102,6 +102,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheTxPartitionedLocalStoreSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheTxUsersAffinityMapperSelfTest; import org.apache.ignite.internal.processors.cache.GridDataStorageConfigurationConsistencySelfTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicConcurrentUnorderedUpdateAllTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalWithStoreInvokeTest; @@ -215,6 +216,7 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicWithStoreInvokeTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicLocalInvokeTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicLocalWithStoreInvokeTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteCacheTxInvokeTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CacheEntryProcessorNonSerializableTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CacheEntryProcessorExternalizableFailedTest.class, ignoredTests); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java index 4935d49054a627..4fb38bd3e7e116 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java @@ -58,6 +58,8 @@ import org.apache.ignite.internal.processors.cache.MemoryPolicyConfigValidationTest; import org.apache.ignite.internal.processors.cache.NoPresentCacheInterceptorOnClientTest; import org.apache.ignite.internal.processors.cache.NonAffinityCoordinatorDynamicStartStopTest; +import org.apache.ignite.internal.processors.cache.TransactionValidationTest; +import org.apache.ignite.internal.processors.cache.distributed.CacheDetectLostPartitionsTest; import org.apache.ignite.internal.processors.cache.distributed.CacheLoadingConcurrentGridStartSelfTest; import org.apache.ignite.internal.processors.cache.distributed.CacheLoadingConcurrentGridStartSelfTestAllowOverwrite; import org.apache.ignite.internal.processors.cache.distributed.CacheLockReleaseNodeLeaveTest; @@ -381,6 +383,8 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, CachePartitionPartialCountersMapSelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteReflectionFactorySelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, NoPresentCacheInterceptorOnClientTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, CacheDetectLostPartitionsTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, TransactionValidationTest.class, ignoredTests); return suite; } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java index ebe0b5a2b6dd0a..1e4f59cdcbdbdd 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java @@ -52,6 +52,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearPartitionedP2PDisabledByteArrayValuesSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearPartitionedP2PEnabledByteArrayValuesSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCachePutArrayValueSelfTest; +import org.apache.ignite.internal.processors.cache.distributed.near.IgniteTxExceptionNodeFailTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteTxReentryNearSelfTest; import org.apache.ignite.internal.processors.cache.distributed.replicated.GridCacheDaemonNodeReplicatedSelfTest; import org.apache.ignite.internal.processors.cache.distributed.replicated.GridCacheReplicatedAtomicGetAndTransformStoreSelfTest; @@ -152,6 +153,7 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, IgniteTxReentryNearSelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteTxReentryColocatedSelfTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgniteTxExceptionNodeFailTest.class, ignoredTests); // Test for byte array value special case. GridTestUtils.addTestIfNeeded(suite, GridCacheLocalByteArrayValuesSelfTest.class, ignoredTests); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java index 4c388ff8468006..b79d174669e9d4 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java @@ -37,6 +37,7 @@ import org.apache.ignite.internal.GridVersionSelfTest; import org.apache.ignite.internal.IgniteConcurrentEntryProcessorAccessStopTest; import org.apache.ignite.internal.IgniteConnectionConcurrentReserveAndRemoveTest; +import org.apache.ignite.internal.IgniteNodeValidationFailedEventTest; import org.apache.ignite.internal.IgniteUpdateNotifierPerClusterSettingSelfTest; import org.apache.ignite.internal.LongJVMPauseDetectorTest; import org.apache.ignite.internal.ThreadNameValidationTest; @@ -58,6 +59,7 @@ import org.apache.ignite.internal.processors.cluster.GridUpdateNotifierSelfTest; import org.apache.ignite.internal.processors.port.GridPortProcessorSelfTest; import org.apache.ignite.internal.util.GridStartupWithUndefinedIgniteHomeSelfTest; +import org.apache.ignite.internal.util.IgniteUtilsWorkDirectoryTest; import org.apache.ignite.spi.communication.GridCacheMessageSelfTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -91,6 +93,7 @@ GridPortProcessorSelfTest.class, GridHomePathSelfTest.class, GridStartupWithUndefinedIgniteHomeSelfTest.class, + IgniteUtilsWorkDirectoryTest.class, GridVersionSelfTest.class, GridListenActorSelfTest.class, GridNodeLocalSelfTest.class, @@ -109,7 +112,8 @@ DeploymentRequestOfUnknownClassProcessingTest.class, ThreadNameValidationTest.class, NodeWithFilterRestartTest.class, - ClusterActiveStateChangeWithNodeOutOfBaselineTest.class + ClusterActiveStateChangeWithNodeOutOfBaselineTest.class, + IgniteNodeValidationFailedEventTest.class }) public class IgniteKernalSelfTestSuite { } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java index eafb75a7c65502..301aee7fdff52b 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java @@ -20,6 +20,7 @@ import org.apache.ignite.internal.GridP2PAffinitySelfTest; import org.apache.ignite.internal.RaceOnDeployClassesWithSameAliases; import org.apache.ignite.internal.managers.deployment.GridDeploymentMessageCountSelfTest; +import org.apache.ignite.internal.managers.deployment.P2PCacheOperationIntoComputeTest; import org.apache.ignite.p2p.DeploymentClassLoaderCallableTest; import org.apache.ignite.p2p.GridP2PClassLoadingSelfTest; import org.apache.ignite.p2p.GridP2PComputeWithNestedEntryProcessorTest; @@ -72,7 +73,8 @@ GridP2PComputeWithNestedEntryProcessorTest.class, GridP2PCountTiesLoadClassDirectlyFromClassLoaderTest.class, RaceOnDeployClassesWithSameAliases.class, - GridP2PScanQueryWithTransformerTest.class + GridP2PScanQueryWithTransformerTest.class, + P2PCacheOperationIntoComputeTest.class }) public class IgniteP2PSelfTestSuite { } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java index 9978761d887e42..23256a0b24b6cd 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java @@ -22,6 +22,9 @@ import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheEntriesExpirationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationEncryptionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationRandomLruEvictionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsTaskCancelingTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPartitionPreloadTest; import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.PageLockTrackerManagerTest; @@ -68,6 +71,11 @@ public static List> suite() { ignoredTests.add(OffHeapLockStackTest.class); ignoredTests.add(IgnitePdsCacheEntriesExpirationTest.class); + // Defragmentation. + ignoredTests.add(IgnitePdsDefragmentationTest.class); + ignoredTests.add(IgnitePdsDefragmentationRandomLruEvictionTest.class); + ignoredTests.add(IgnitePdsDefragmentationEncryptionTest.class); + return IgnitePdsTestSuite4.suite(ignoredTests); } } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java index 8ff45e44efa438..5510c4be527865 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.HistoricalRebalanceHeuristicsTest; import org.apache.ignite.internal.processors.cache.persistence.IgniteDataStorageMetricsSelfTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheStartStopWithFreqCheckpointTest; @@ -52,10 +51,13 @@ import org.apache.ignite.internal.processors.cache.persistence.db.SlowHistoricalRebalanceSmallHistoryTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointFailBeforeWriteMarkTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointFreeListTest; +import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointListenerForRegionTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointStartLoggingTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.IgniteCheckpointDirtyPagesForLowLoadTest; +import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.LightweightCheckpointTest; import org.apache.ignite.internal.processors.cache.persistence.db.filename.IgniteUidAsConsistentIdMigrationTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.FsyncWalRolloverDoesNotBlockTest; +import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteLocalWalSizeTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteNodeStoppedDuringDisableWALTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWALTailIsReachedDuringIterationOverArchiveTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushBackgroundSelfTest; @@ -211,6 +213,8 @@ public static void addRealPageStoreTests(List> suite, Collection GridTestUtils.addTestIfNeeded(suite, IgnitePdsCorruptedStoreTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CheckpointFailBeforeWriteMarkTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CheckpointFreeListTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, CheckpointListenerForRegionTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, LightweightCheckpointTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CheckpointStartLoggingTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, FreeListCachingTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteWalIteratorSwitchSegmentTest.class, ignoredTests); @@ -231,5 +235,7 @@ public static void addRealPageStoreTests(List> suite, Collection GridTestUtils.addTestIfNeeded(suite, IgniteWalRebalanceLoggingTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, HistoricalRebalanceHeuristicsTest.class, ignoredTests); + + GridTestUtils.addTestIfNeeded(suite, IgniteLocalWalSizeTest.class, ignoredTests); } } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java index dfca7461eabd68..d63439587b536a 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java @@ -35,11 +35,15 @@ import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheEntriesExpirationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsConsistencyOnDelayedPartitionOwning; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationEncryptionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationRandomLruEvictionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRecoveryAfterFileCorruptionTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRemoveDuringRebalancingTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRestartAfterFailedToWriteMetaPageTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsSpuriousRebalancingOnNodeJoinTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsTaskCancelingTest; +import org.apache.ignite.internal.processors.cache.persistence.PendingTreeCorruptionTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsCacheWalDisabledOnRebalancingTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPageReplacementDuringPartitionClearTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPartitionPreloadTest; @@ -121,6 +125,13 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, WarmUpSelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, LoadAllWarmUpStrategySelfTest.class, ignoredTests); + // Defragmentation. + GridTestUtils.addTestIfNeeded(suite, IgnitePdsDefragmentationTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgnitePdsDefragmentationRandomLruEvictionTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgnitePdsDefragmentationEncryptionTest.class, ignoredTests); + + GridTestUtils.addTestIfNeeded(suite, PendingTreeCorruptionTest.class, ignoredTests); + return suite; } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java index 8a7f0daa7760ec..bd53f923556692 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java @@ -20,6 +20,7 @@ import org.apache.ignite.internal.managers.GridManagerLocalMessageListenerSelfTest; import org.apache.ignite.internal.managers.GridNoopManagerSelfTest; import org.apache.ignite.spi.encryption.KeystoreEncryptionSpiSelfTest; +import org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpiTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -58,7 +59,9 @@ // Local Message Listener tests. GridManagerLocalMessageListenerSelfTest.class, - KeystoreEncryptionSpiSelfTest.class + KeystoreEncryptionSpiSelfTest.class, + + JmxMetricExporterSpiTest.class }) public class IgniteSpiTestSuite { } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java index e45ac074be172c..1e110b1fba6ff0 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java @@ -20,6 +20,7 @@ import org.apache.ignite.internal.IgniteVersionUtilsSelfTest; import org.apache.ignite.internal.pagemem.impl.PageIdUtilsSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheUtilsSelfTest; +import org.apache.ignite.internal.util.BasicRateLimiterTest; import org.apache.ignite.internal.util.DistributedProcessCoordinatorLeftTest; import org.apache.ignite.internal.util.GridArraysSelfTest; import org.apache.ignite.internal.util.GridConcurrentMultiPairQueueTest; @@ -137,7 +138,9 @@ GridCountDownCallbackTest.class, - DistributedProcessCoordinatorLeftTest.class + DistributedProcessCoordinatorLeftTest.class, + + BasicRateLimiterTest.class }) public class IgniteUtilSelfTestSuite { } diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index e84d8fb87815ba..f2c2ad20f6d894 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -27,7 +27,10 @@ This utility can do the following commands: ACTIVE_READ_ONLY - Activate cluster. Cache updates are denied. Print cluster baseline topology: - control.(sh|bat) --baseline + control.(sh|bat) --baseline [--verbose] + + Parameters: + verbose - Show the full list of node ips. Add nodes into baseline topology: control.(sh|bat) --baseline add consistentId1[,consistentId2,....,consistentIdN] [--yes] @@ -68,6 +71,27 @@ This utility can do the following commands: Change the master key: control.(sh|bat) --encryption change_master_key newMasterKeyName + Change the encryption key of the cache group: + control.(sh|bat) --encryption change_cache_key cacheGroupName + + View encryption key identifiers of the cache group: + control.(sh|bat) --encryption cache_key_ids cacheGroupName + + Display re-encryption status of the cache group: + control.(sh|bat) --encryption reencryption_status cacheGroupName + + Suspend re-encryption of the cache group: + control.(sh|bat) --encryption suspend_reencryption cacheGroupName + + Resume re-encryption of the cache group: + control.(sh|bat) --encryption resume_reencryption cacheGroupName + + View/change re-encryption rate limit: + control.(sh|bat) --encryption reencryption_rate_limit [new_limit] + + Parameters: + new_limit - Decimal value to change re-encryption rate limit (MB/s). + Kill compute task by session id: control.(sh|bat) --kill COMPUTE session_id @@ -174,10 +198,10 @@ If the file name isn't specified the output file name is: '.bin' Print list of available properties: control.(sh|bat) --property list - Get the property value + Get the property value: control.(sh|bat) --property get --name - Set the property value + Set the property value: control.(sh|bat) --property set --name --val Print system view content: @@ -194,6 +218,30 @@ If the file name isn't specified the output file name is: '.bin' name - Name of the metric which value should be printed. If name of the metric registry is specified, value of all its metrics will be printed. node_id - ID of the node to get the metric values from. If not set, random node will be chosen. + Print information about potentially corrupted caches on local node: + control.(sh|bat) --persistence + + The same information is printed when info subcommand is passed: + control.(sh|bat) --persistence info + + Clean directories of caches with corrupted data files: + control.(sh|bat) --persistence clean corrupted + + Clean directories of all caches: + control.(sh|bat) --persistence clean all + + Clean directories of only given caches: + control.(sh|bat) --persistence clean caches cache1,cache2,cache3 + + Backup data files of corrupted caches only: + control.(sh|bat) --persistence backup corrupted + + Backup data files of all caches: + control.(sh|bat) --persistence backup all + + Backup data files of only given caches: + control.(sh|bat) --persistence backup caches cache1,cache2,cache3 + By default commands affecting the cluster require interactive confirmation. Use --yes option to disable it. diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index e84d8fb87815ba..f2c2ad20f6d894 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -27,7 +27,10 @@ This utility can do the following commands: ACTIVE_READ_ONLY - Activate cluster. Cache updates are denied. Print cluster baseline topology: - control.(sh|bat) --baseline + control.(sh|bat) --baseline [--verbose] + + Parameters: + verbose - Show the full list of node ips. Add nodes into baseline topology: control.(sh|bat) --baseline add consistentId1[,consistentId2,....,consistentIdN] [--yes] @@ -68,6 +71,27 @@ This utility can do the following commands: Change the master key: control.(sh|bat) --encryption change_master_key newMasterKeyName + Change the encryption key of the cache group: + control.(sh|bat) --encryption change_cache_key cacheGroupName + + View encryption key identifiers of the cache group: + control.(sh|bat) --encryption cache_key_ids cacheGroupName + + Display re-encryption status of the cache group: + control.(sh|bat) --encryption reencryption_status cacheGroupName + + Suspend re-encryption of the cache group: + control.(sh|bat) --encryption suspend_reencryption cacheGroupName + + Resume re-encryption of the cache group: + control.(sh|bat) --encryption resume_reencryption cacheGroupName + + View/change re-encryption rate limit: + control.(sh|bat) --encryption reencryption_rate_limit [new_limit] + + Parameters: + new_limit - Decimal value to change re-encryption rate limit (MB/s). + Kill compute task by session id: control.(sh|bat) --kill COMPUTE session_id @@ -174,10 +198,10 @@ If the file name isn't specified the output file name is: '.bin' Print list of available properties: control.(sh|bat) --property list - Get the property value + Get the property value: control.(sh|bat) --property get --name - Set the property value + Set the property value: control.(sh|bat) --property set --name --val Print system view content: @@ -194,6 +218,30 @@ If the file name isn't specified the output file name is: '.bin' name - Name of the metric which value should be printed. If name of the metric registry is specified, value of all its metrics will be printed. node_id - ID of the node to get the metric values from. If not set, random node will be chosen. + Print information about potentially corrupted caches on local node: + control.(sh|bat) --persistence + + The same information is printed when info subcommand is passed: + control.(sh|bat) --persistence info + + Clean directories of caches with corrupted data files: + control.(sh|bat) --persistence clean corrupted + + Clean directories of all caches: + control.(sh|bat) --persistence clean all + + Clean directories of only given caches: + control.(sh|bat) --persistence clean caches cache1,cache2,cache3 + + Backup data files of corrupted caches only: + control.(sh|bat) --persistence backup corrupted + + Backup data files of all caches: + control.(sh|bat) --persistence backup all + + Backup data files of only given caches: + control.(sh|bat) --persistence backup caches cache1,cache2,cache3 + By default commands affecting the cluster require interactive confirmation. Use --yes option to disable it. diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/AveragePersonSalaryCallable.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/AveragePersonSalaryCallable.java new file mode 100644 index 00000000000000..b7ab2c31bd77f6 --- /dev/null +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/AveragePersonSalaryCallable.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.tests.p2p.compute; + +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import javax.cache.processor.MutableEntry; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.resources.LoggerResource; +import org.apache.ignite.tests.p2p.cache.Person; +import org.apache.ignite.transactions.Transaction; +import org.apache.ignite.transactions.TransactionConcurrency; +import org.apache.ignite.transactions.TransactionIsolation; + +/** + * This closure calculates average salary of person in the defined key range. + */ +public class AveragePersonSalaryCallable implements IgniteCallable { + /** Ignite instance. */ + @IgniteInstanceResource + private Ignite ignite; + + /** Logger. */ + @LoggerResource + private IgniteLogger log; + + /** Cache name. */ + private final String cacheName; + + /** Left range border. */ + private final int from; + + /** Right range border. */ + private final int to; + + /** + * @param cacheName Cache name. + * @param from First entry key. + * @param to Up border of keys. + */ + public AveragePersonSalaryCallable(String cacheName, int from, int to) { + this.cacheName = cacheName; + this.from = from; + this.to = to; + } + + /** {@inheritDoc} */ + @Override public Double call() { + log.info("Job was started with parameters: [node=" + ignite.name() + + ", cache=" + cacheName + + ", from=" + from + + ", to=" + to + ']'); + + IgniteCache cache = ignite.cache(cacheName); + + if (cache == null) + return 0D; + + double avgSalary = calculateAverageSalary(cache); + + addPersonWithAverageSalary(cache, avgSalary); + + checkAverageSalaryThroughInvoke(cache, avgSalary); + + if (isTxCache(cache)) { + log.info("Transaction cache checks was triggered here."); + + for (TransactionIsolation isolation : TransactionIsolation.values()) { + for (TransactionConcurrency concurrency : TransactionConcurrency.values()) { + try (Transaction tx = ignite.transactions().txStart(concurrency, isolation)) { + double txAvgSalary = calculateAverageSalary(cache); + + assert Double.compare(txAvgSalary, avgSalary) == 0; + } + } + } + + for (TransactionIsolation isolation : TransactionIsolation.values()) { + for (TransactionConcurrency concurrency : TransactionConcurrency.values()) { + try (Transaction tx = ignite.transactions().txStart(concurrency, isolation)) { + addPersonWithAverageSalary(cache, avgSalary); + + checkAverageSalaryThroughInvoke(cache, avgSalary); + } + } + } + } + + return avgSalary; + } + + /** + * @param cache Ignite cache. + * @param avgSalary Average salary calculated previously. + */ + private void checkAverageSalaryThroughInvoke(IgniteCache cache, double avgSalary) { + double amount = 0; + + for (int i = from; i < to; i++) { + amount += cache.invoke(i, (MutableEntry entry, Object... arguments) -> + entry.getValue().getSalary()); + } + + assert Double.compare(avgSalary, amount / (to - from)) == 0; + } + + private boolean isTxCache(IgniteCache cache) { + CacheConfiguration ccfg = cache.getConfiguration(CacheConfiguration.class); + + return ccfg.getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL; + } + + /** + * Adds some person with average salary. + * + * @param cache Ignite cache. + * @param avgSalary Average salary. + */ + private void addPersonWithAverageSalary(IgniteCache cache, double avgSalary) { + Map persons = IntStream.range(from, to).boxed().map(id -> createAveragePerson(avgSalary, to + id)) + .collect(Collectors.toMap(Person::getId, Function.identity(), (u, v) -> { + throw new IllegalStateException(String.format("Duplicate key %s", u)); + }, TreeMap::new)); + + cache.putAll(persons); + + for (Integer key : persons.keySet()) { + Person p = cache.getAndPut(to + key, createAveragePerson(avgSalary, to + key)); + + assert p == null || Double.compare(avgSalary, p.getSalary()) == 0; + } + } + + /** + * Calculates average salary. + * + * @param cache Ignite cache. + * @return Average salary. + */ + private double calculateAverageSalary(IgniteCache cache) { + double amount = 0; + + Set keys = IntStream.range(from, to).boxed().collect(Collectors.toSet()); + + Map entries = cache.getAll(keys); + + for (Integer key : keys) { + Person p = cache.get(key); + + Person p1 = entries.get(key); + + assert p.equals(p1); + + amount += p.getSalary(); + } + + return amount / (to - from); + } + + /** + * Creates average person. + * + * @param avgSalary Average salary. + * @param id Id. + */ + private Person createAveragePerson(double avgSalary, Integer id) { + Person p = new Person("John " + id); + + p.setId(id); + p.setLastName("Smith"); + p.setSalary(avgSalary); + + return p; + } +} diff --git a/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java b/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java index b1dab6e82dcd8d..35b5847f0fad83 100644 --- a/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java +++ b/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java @@ -53,16 +53,16 @@ public void testIndexesView() throws Exception { execSql("CREATE SPATIAL INDEX IDX_GEO_1 ON PUBLIC.AFF_CACHE(GEOM)"); String idxSql = "SELECT " + - " CACHE_ID," + - " CACHE_NAME," + - " SCHEMA_NAME," + - " TABLE_NAME," + - " INDEX_NAME," + - " INDEX_TYPE," + - " COLUMNS," + - " IS_PK," + - " IS_UNIQUE," + - " INLINE_SIZE" + + " CACHE_ID," + + " CACHE_NAME," + + " SCHEMA_NAME," + + " TABLE_NAME," + + " INDEX_NAME," + + " INDEX_TYPE," + + " COLUMNS," + + " IS_PK," + + " IS_UNIQUE," + + " INLINE_SIZE" + " FROM SYS.INDEXES ORDER BY TABLE_NAME, INDEX_NAME"; List> srvNodeIndexes = execSql(srv, idxSql); @@ -72,23 +72,22 @@ public void testIndexesView() throws Exception { for (List idx : clientNodeNodeIndexes) assertTrue(srvNodeIndexes.contains(idx)); - String[][] expectedResults = { - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "IDX_GEO_1", "SPATIAL", "\"GEOM\" ASC", "false", "false", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC", "true", "true", "5"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC", "true", "true", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_proxy", "BTREE", "\"ID1\" ASC", "false", "false", "0"} + Object[][] expectedResults = { + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "IDX_GEO_1", "SPATIAL", "\"GEOM\" ASC", false, false, null}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC", true, true, 5}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC", false, true, null} }; for (int i = 0; i < srvNodeIndexes.size(); i++) { List resRow = srvNodeIndexes.get(i); - String[] expRow = expectedResults[i]; + Object[] expRow = expectedResults[i]; assertEquals(expRow.length, resRow.size()); for (int j = 0; j < expRow.length; j++) - assertEquals(expRow[j], String.valueOf(resRow.get(j))); + assertEquals(expRow[j], resRow.get(j)); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java index 4a2cdd51984191..6384c716fd202f 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java @@ -17,7 +17,6 @@ package org.apache.ignite.internal.managers.systemview.walker; -import org.apache.ignite.internal.processors.query.h2.database.H2IndexType; import org.apache.ignite.spi.systemview.view.SqlIndexView; import org.apache.ignite.spi.systemview.view.SystemViewRowAttributeWalker; @@ -30,34 +29,38 @@ public class SqlIndexViewWalker implements SystemViewRowAttributeWalker { /** {@inheritDoc} */ @Override public void visitAll(AttributeVisitor v) { - v.accept(0, "indexName", String.class); - v.accept(1, "indexType", H2IndexType.class); - v.accept(2, "columns", String.class); - v.accept(3, "schemaName", String.class); - v.accept(4, "tableName", String.class); - v.accept(5, "cacheName", String.class); - v.accept(6, "cacheId", int.class); - v.accept(7, "inlineSize", int.class); - v.accept(8, "isPk", boolean.class); - v.accept(9, "isUnique", boolean.class); + v.accept(0, "cacheGroupId", int.class); + v.accept(1, "cacheGroupName", String.class); + v.accept(2, "cacheId", int.class); + v.accept(3, "cacheName", String.class); + v.accept(4, "schemaName", String.class); + v.accept(5, "tableName", String.class); + v.accept(6, "indexName", String.class); + v.accept(7, "indexType", String.class); + v.accept(8, "columns", String.class); + v.accept(9, "isPk", boolean.class); + v.accept(10, "isUnique", boolean.class); + v.accept(11, "inlineSize", Integer.class); } /** {@inheritDoc} */ @Override public void visitAll(SqlIndexView row, AttributeWithValueVisitor v) { - v.accept(0, "indexName", String.class, row.indexName()); - v.accept(1, "indexType", H2IndexType.class, row.indexType()); - v.accept(2, "columns", String.class, row.columns()); - v.accept(3, "schemaName", String.class, row.schemaName()); - v.accept(4, "tableName", String.class, row.tableName()); - v.accept(5, "cacheName", String.class, row.cacheName()); - v.acceptInt(6, "cacheId", row.cacheId()); - v.acceptInt(7, "inlineSize", row.inlineSize()); - v.acceptBoolean(8, "isPk", row.isPk()); - v.acceptBoolean(9, "isUnique", row.isUnique()); + v.acceptInt(0, "cacheGroupId", row.cacheGroupId()); + v.accept(1, "cacheGroupName", String.class, row.cacheGroupName()); + v.acceptInt(2, "cacheId", row.cacheId()); + v.accept(3, "cacheName", String.class, row.cacheName()); + v.accept(4, "schemaName", String.class, row.schemaName()); + v.accept(5, "tableName", String.class, row.tableName()); + v.accept(6, "indexName", String.class, row.indexName()); + v.accept(7, "indexType", String.class, row.indexType()); + v.accept(8, "columns", String.class, row.columns()); + v.acceptBoolean(9, "isPk", row.isPk()); + v.acceptBoolean(10, "isUnique", row.isUnique()); + v.accept(11, "inlineSize", Integer.class, row.inlineSize()); } /** {@inheritDoc} */ @Override public int count() { - return 10; + return 12; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java index d43a9031e4b051..ca440e89b6ba8a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java @@ -29,13 +29,13 @@ public class SqlSchemaViewWalker implements SystemViewRowAttributeWalker { /** {@inheritDoc} */ @Override public void visitAll(AttributeVisitor v) { - v.accept(0, "name", String.class); + v.accept(0, "schemaName", String.class); v.accept(1, "predefined", boolean.class); } /** {@inheritDoc} */ @Override public void visitAll(SqlSchemaView row, AttributeWithValueVisitor v) { - v.accept(0, "name", String.class, row.name()); + v.accept(0, "schemaName", String.class, row.schemaName()); v.acceptBoolean(1, "predefined", row.predefined()); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java index 8fd2cb2d6f08f7..0826618376acb3 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java @@ -29,34 +29,38 @@ public class SqlTableViewWalker implements SystemViewRowAttributeWalker { /** {@inheritDoc} */ @Override public void visitAll(AttributeVisitor v) { - v.accept(0, "tableName", String.class); - v.accept(1, "schemaName", String.class); - v.accept(2, "cacheName", String.class); - v.accept(3, "cacheId", int.class); - v.accept(4, "affinityKeyColumn", String.class); - v.accept(5, "keyAlias", String.class); - v.accept(6, "valueAlias", String.class); - v.accept(7, "keyTypeName", String.class); - v.accept(8, "valueTypeName", String.class); - v.accept(9, "isIndexRebuildInProgress", boolean.class); + v.accept(0, "cacheGroupId", int.class); + v.accept(1, "cacheGroupName", String.class); + v.accept(2, "cacheId", int.class); + v.accept(3, "cacheName", String.class); + v.accept(4, "schemaName", String.class); + v.accept(5, "tableName", String.class); + v.accept(6, "affinityKeyColumn", String.class); + v.accept(7, "keyAlias", String.class); + v.accept(8, "valueAlias", String.class); + v.accept(9, "keyTypeName", String.class); + v.accept(10, "valueTypeName", String.class); + v.accept(11, "isIndexRebuildInProgress", boolean.class); } /** {@inheritDoc} */ @Override public void visitAll(SqlTableView row, AttributeWithValueVisitor v) { - v.accept(0, "tableName", String.class, row.tableName()); - v.accept(1, "schemaName", String.class, row.schemaName()); - v.accept(2, "cacheName", String.class, row.cacheName()); - v.acceptInt(3, "cacheId", row.cacheId()); - v.accept(4, "affinityKeyColumn", String.class, row.affinityKeyColumn()); - v.accept(5, "keyAlias", String.class, row.keyAlias()); - v.accept(6, "valueAlias", String.class, row.valueAlias()); - v.accept(7, "keyTypeName", String.class, row.keyTypeName()); - v.accept(8, "valueTypeName", String.class, row.valueTypeName()); - v.acceptBoolean(9, "isIndexRebuildInProgress", row.isIndexRebuildInProgress()); + v.acceptInt(0, "cacheGroupId", row.cacheGroupId()); + v.accept(1, "cacheGroupName", String.class, row.cacheGroupName()); + v.acceptInt(2, "cacheId", row.cacheId()); + v.accept(3, "cacheName", String.class, row.cacheName()); + v.accept(4, "schemaName", String.class, row.schemaName()); + v.accept(5, "tableName", String.class, row.tableName()); + v.accept(6, "affinityKeyColumn", String.class, row.affinityKeyColumn()); + v.accept(7, "keyAlias", String.class, row.keyAlias()); + v.accept(8, "valueAlias", String.class, row.valueAlias()); + v.accept(9, "keyTypeName", String.class, row.keyTypeName()); + v.accept(10, "valueTypeName", String.class, row.valueTypeName()); + v.acceptBoolean(11, "isIndexRebuildInProgress", row.isIndexRebuildInProgress()); } /** {@inheritDoc} */ @Override public int count() { - return 10; + return 12; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java index 23e9af7d2ee8cc..4d9166059041f3 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java @@ -28,6 +28,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.persistence.metastorage.pendingtask.DurableBackgroundTask; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.query.h2.database.H2Tree; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; @@ -141,7 +142,8 @@ public DurableBackgroundCleanupIndexTreeTask( null, stats, null, - 0 + 0, + PageIoResolver.DEFAULT_PAGE_IO_RESOLVER ); trees0.add(tree); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java index fcd27ce653ad27..ecd81691954855 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java @@ -114,7 +114,7 @@ */ public class H2Utils { /** Query context H2 variable name. */ - static final String QCTX_VARIABLE_NAME = "_IGNITE_QUERY_CONTEXT"; + public static final String QCTX_VARIABLE_NAME = "_IGNITE_QUERY_CONTEXT"; /** * The default precision for a char/varchar value. @@ -992,7 +992,6 @@ else if (cctx.config().getQueryParallelism() != expectedParallelism) { * * @return Array of key and affinity columns. Key's, if it possible, splitted into simple components. */ - @SuppressWarnings("ZeroLengthArrayAllocation") @NotNull public static IndexColumn[] unwrapKeyColumns(GridH2Table tbl, IndexColumn[] idxCols) { ArrayList keyCols = new ArrayList<>(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index de64068f438253..517bc69c8d90ce 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -51,6 +51,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.EventType; +import org.apache.ignite.events.SqlQueryExecutionEvent; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridTopic; import org.apache.ignite.internal.IgniteInternalFuture; @@ -60,9 +61,11 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.mxbean.SqlQueryMXBean; import org.apache.ignite.internal.mxbean.SqlQueryMXBeanImpl; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.CacheObjectValueContext; import org.apache.ignite.internal.processors.cache.CacheOperationContext; @@ -80,6 +83,9 @@ import org.apache.ignite.internal.processors.cache.mvcc.StaticMvccQueryTracker; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.RootPage; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; @@ -126,6 +132,7 @@ import org.apache.ignite.internal.processors.query.h2.database.io.H2LeafIO; import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccInnerIO; import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccLeafIO; +import org.apache.ignite.internal.processors.query.h2.defragmentation.IndexingDefragmentation; import org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedPlanInfo; import org.apache.ignite.internal.processors.query.h2.dml.DmlUpdateResultsIterator; import org.apache.ignite.internal.processors.query.h2.dml.DmlUpdateSingleEntryIterator; @@ -162,6 +169,7 @@ import org.apache.ignite.internal.util.GridEmptyCloseableIterator; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.internal.util.collection.IntMap; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.internal.util.lang.GridPlainRunnable; @@ -199,6 +207,7 @@ import static java.util.Objects.isNull; import static java.util.Objects.nonNull; import static org.apache.ignite.IgniteSystemProperties.IGNITE_MVCC_TX_SIZE_CACHING_THRESHOLD; +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; import static org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager.TX_SIZE_THRESHOLD; import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.checkActive; import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccEnabled; @@ -292,6 +301,8 @@ public class IgniteH2Indexing implements GridQueryIndexing { /** Parser. */ private QueryParser parser; + private IndexingDefragmentation defragmentation = new IndexingDefragmentation(this); + /** */ private final IgniteInClosure> logger = new IgniteInClosure>() { @Override public void apply(IgniteInternalFuture fut) { @@ -479,7 +490,6 @@ GridH2IndexBase createSortedIndex(String name, GridH2Table tbl, boolean pk, bool wrappedCols, inlineSize, segments, - qryCtxRegistry, log ); } @@ -1019,7 +1029,7 @@ private FieldsQueryCursor> executeCommand( IgniteQueryErrorCode.UNSUPPORTED_OPERATION); } - Long qryId = registerRunningQuery(qryDesc, null); + Long qryId = registerRunningQuery(qryDesc, qryParams, null); CommandResult res = null; @@ -1202,7 +1212,7 @@ private List>> executeDml( ) { IndexingQueryFilter filter = (qryDesc.local() ? backupFilter(null, qryParams.partitions()) : null); - Long qryId = registerRunningQuery(qryDesc, cancel); + Long qryId = registerRunningQuery(qryDesc, qryParams, cancel); Exception failReason = null; @@ -1287,7 +1297,7 @@ private List>> executeSelect( assert cancel != null; // Register query. - Long qryId = registerRunningQuery(qryDesc, cancel); + Long qryId = registerRunningQuery(qryDesc, qryParams, cancel); try (TraceSurroundings ignored = MTC.support(ctx.tracing().create(SQL_CURSOR_OPEN, MTC.span()))) { GridNearTxLocal tx = null; @@ -1546,17 +1556,29 @@ private Iterable> lockSelectedRows(Iterable> cur, GridCacheConte * Register running query. * * @param qryDesc Query descriptor. + * @param qryParams Query parameters. * @param cancel Query cancel state holder. * @return Id of registered query or {@code null} if query wasn't registered. */ - private Long registerRunningQuery(QueryDescriptor qryDesc, GridQueryCancel cancel) { - return runningQryMgr.register( + private Long registerRunningQuery(QueryDescriptor qryDesc, QueryParameters qryParams, GridQueryCancel cancel) { + Long res = runningQryMgr.register( qryDesc.sql(), GridCacheQueryType.SQL_FIELDS, qryDesc.schemaName(), qryDesc.local(), cancel ); + + if (ctx.event().isRecordable(EVT_SQL_QUERY_EXECUTION)) { + ctx.event().record(new SqlQueryExecutionEvent( + ctx.discovery().localNode(), + GridCacheQueryType.SQL_FIELDS.name() + " query execution.", + qryDesc.sql(), + qryParams.arguments(), + ctx.security().enabled() ? ctx.security().securityContext().subject().id() : null)); + } + + return res; } /** @@ -1600,6 +1622,7 @@ private void checkSecurity(Collection cacheIds) { fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS); fldsQry.setPageSize(pageSize); fldsQry.setLocal(true); + fldsQry.setLazy(U.isFlagSet(flags, GridH2QueryRequest.FLAG_LAZY)); boolean loc = true; @@ -1643,7 +1666,8 @@ private void checkSecurity(Collection cacheIds) { .setEnforceJoinOrder(fldsQry.isEnforceJoinOrder()) .setLocal(fldsQry.isLocal()) .setPageSize(fldsQry.getPageSize()) - .setTimeout(fldsQry.getTimeout(), TimeUnit.MILLISECONDS); + .setTimeout(fldsQry.getTimeout(), TimeUnit.MILLISECONDS) + .setLazy(fldsQry.isLazy()); QueryCursorImpl> cur; @@ -2448,6 +2472,7 @@ private JavaObjectSerializer h2Serializer() { reuseList, H2ExtrasInnerIO.getVersions(inlineSize, mvccEnabled), H2ExtrasLeafIO.getVersions(inlineSize, mvccEnabled), + PageIdAllocator.FLAG_IDX, ctx.failure(), lockLsnr ) { @@ -2880,7 +2905,12 @@ private UpdateResult executeUpdateNonTransactional( .setEnforceJoinOrder(qryDesc.enforceJoinOrder()) .setLocal(qryDesc.local()) .setPageSize(qryParams.pageSize()) - .setTimeout(qryParams.timeout(), TimeUnit.MILLISECONDS); + .setTimeout(qryParams.timeout(), TimeUnit.MILLISECONDS) + // On no MVCC mode we cannot use lazy mode when UPDATE query contains updated columns + // in WHERE condition because it may be cause of update one entry several times + // (when index for such columns is selected for scan): + // e.g. : UPDATE test SET val = val + 1 WHERE val >= ? + .setLazy(qryParams.lazy() && plan.canSelectBeLazy()); Iterable> cur; @@ -3013,7 +3043,10 @@ else if (plan.hasRows()) { .setEnforceJoinOrder(qryDesc.enforceJoinOrder()) .setLocal(qryDesc.local()) .setPageSize(qryParams.pageSize()) - .setTimeout((int)timeout, TimeUnit.MILLISECONDS); + .setTimeout((int)timeout, TimeUnit.MILLISECONDS) + // In MVCC mode we can use lazy mode always (when is set up) without dependency on + // updated columns and WHERE condition. + .setLazy(qryParams.lazy()); FieldsQueryCursor> cur = executeSelectForDml( qryDesc.schemaName(), @@ -3054,6 +3087,9 @@ else if (plan.hasRows()) { if (distributedPlan.isReplicatedOnly()) flags |= GridH2QueryRequest.FLAG_REPLICATED; + if (qryParams.lazy()) + flags |= GridH2QueryRequest.FLAG_LAZY; + flags = GridH2QueryRequest.setDataPageScanEnabled(flags, qryParams.dataPageScanEnabled()); @@ -3158,4 +3194,15 @@ public DistributedSqlConfiguration distributedConfiguration() { return map; } + + /** {@inheritDoc} */ + @Override public void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPart, + CheckpointTimeoutLock cpLock + ) throws IgniteCheckedException { + defragmentation.defragment(grpCtx, newCtx, partPageMem, mappingByPart, cpLock, log); + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java index 6091b4e91996c1..ad09200f2fc42a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java @@ -169,7 +169,7 @@ public SchemaManager(GridKernalContext ctx, ConnectionManager connMgr) { ctx.systemView().registerInnerCollectionView(SQL_IDXS_VIEW, SQL_IDXS_VIEW_DESC, new SqlIndexViewWalker(), dataTables.values(), - GridH2Table::getIndexes, + GridH2Table::indexesInformation, SqlIndexView::new); ctx.systemView().registerInnerArrayView(SQL_TBL_COLS_VIEW, SQL_TBL_COLS_VIEW_DESC, diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java index 66badc0a8ffe92..4fb9a41a5051a3 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java @@ -30,6 +30,7 @@ import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.metric.IoStatisticsHolder; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; @@ -41,6 +42,7 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.CorruptedTreeException; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow; import org.apache.ignite.internal.processors.failure.FailureProcessor; @@ -208,7 +210,8 @@ public H2Tree( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { super( name, @@ -219,8 +222,10 @@ public H2Tree( globalRmvId, metaPageId, reuseList, + PageIdAllocator.FLAG_IDX, failureProcessor, - null + null, + pageIoRslvr ); this.cctx = cctx; @@ -296,7 +301,7 @@ public H2Tree( * * @return Indexed columns. */ - IndexColumn[] cols() { + public IndexColumn[] cols() { return cols; } @@ -308,11 +313,15 @@ IndexColumn[] cols() { * @throws IgniteCheckedException if failed. */ public H2Row createRow(long link) throws IgniteCheckedException { + return createRow(link, true); + } + + public H2Row createRow(long link, boolean follow) throws IgniteCheckedException { if (rowCache != null) { H2CacheRow row = rowCache.get(link); if (row == null) { - row = createRow0(link); + row = createRow0(link, follow); rowCache.put(row); } @@ -320,7 +329,7 @@ public H2Row createRow(long link) throws IgniteCheckedException { return row; } else - return createRow0(link); + return createRow0(link, follow); } /** @@ -332,14 +341,16 @@ public H2Row createRow(long link) throws IgniteCheckedException { * @return Row. * @throws IgniteCheckedException If failed. */ - private H2CacheRow createRow0(long link) throws IgniteCheckedException { + private H2CacheRow createRow0(long link, boolean follow) throws IgniteCheckedException { CacheDataRowAdapter row = new CacheDataRowAdapter(link); - row.initFromLink( - cctx.group(), - CacheDataRowAdapter.RowData.FULL, - true - ); + if (follow) { + row.initFromLink( + cctx.group(), + CacheDataRowAdapter.RowData.FULL, + true + ); + } return table.rowDescriptor().createRow(row); } @@ -352,12 +363,35 @@ private H2CacheRow createRow0(long link) throws IgniteCheckedException { * @return Row. * @throws IgniteCheckedException if failed. */ - public H2Row createMvccRow(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException { + public H2Row createMvccRow( + long link, + long mvccCrdVer, + long mvccCntr, + int mvccOpCntr + ) throws IgniteCheckedException { + return createMvccRow(link, mvccCrdVer, mvccCntr, mvccOpCntr, null); + } + + /** + * Create row from link. + * + * @param link Link. + * @param mvccOpCntr MVCC operation counter. + * @return Row. + * @throws IgniteCheckedException if failed. + */ + public H2Row createMvccRow( + long link, + long mvccCrdVer, + long mvccCntr, + int mvccOpCntr, + CacheDataRowAdapter.RowData rowData + ) throws IgniteCheckedException { if (rowCache != null) { H2CacheRow row = rowCache.get(link); if (row == null) { - row = createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr); + row = createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr, rowData); rowCache.put(row); } @@ -365,7 +399,15 @@ public H2Row createMvccRow(long link, long mvccCrdVer, long mvccCntr, int mvccOp return row; } else - return createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr); + return createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr, rowData); + } + + public boolean getPk() { + return pk; + } + + public boolean getAffinityKey() { + return affinityKey; } /** @@ -375,8 +417,8 @@ public H2Row createMvccRow(long link, long mvccCrdVer, long mvccCntr, int mvccOp * @param mvccOpCntr Mvcc operation counter. * @return Row. */ - private H2CacheRow createMvccRow0(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr) - throws IgniteCheckedException { + private H2CacheRow createMvccRow0(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr, CacheDataRowAdapter.RowData rowData) + throws IgniteCheckedException { int partId = PageIdUtils.partId(PageIdUtils.pageId(link)); MvccDataRow row = new MvccDataRow( @@ -384,7 +426,7 @@ private H2CacheRow createMvccRow0(long link, long mvccCrdVer, long mvccCntr, int 0, link, partId, - null, + rowData, mvccCrdVer, mvccCntr, mvccOpCntr, @@ -583,6 +625,40 @@ public int compareRows(H2Row r1, H2Row r2) { return mvccCompare(r1, r2); } + /** + * Checks both rows are the same.

    + * Primarly used to verify both search rows are the same and we can apply + * the single row lookup optimization. + * + * @param r1 The first row. + * @param r2 Another row. + * @return {@code true} in case both rows are efficiently the same, {@code false} otherwise. + */ + boolean checkRowsTheSame(H2Row r1, H2Row r2) { + if (r1 == r2) + return true; + + for (int i = 0, len = cols.length; i < len; i++) { + IndexColumn idxCol = cols[i]; + + int idx = idxCol.column.getColumnId(); + + Value v1 = r1.getValue(idx); + Value v2 = r2.getValue(idx); + + if (v1 == null && v2 == null) + continue; + + if (!(v1 != null && v2 != null)) + return false; + + if (compareValues(v1, v2) != 0) + return false; + } + + return true; + } + /** * @param io IO. * @param pageAddr Page address. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java index 339ee7009a21fc..4701e8a054a507 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java @@ -26,6 +26,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; + import javax.cache.CacheException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; @@ -41,11 +42,13 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager; import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.RootPage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.pendingtask.DurableBackgroundTask; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; @@ -240,7 +243,6 @@ private H2TreeIndex( * @param wrappedCols Index columns as is. * @param inlineSize Inline size. * @param segmentsCnt Count of tree segments. - * @param qryCtxRegistry Query context registry. * @throws IgniteCheckedException If failed. */ public static H2TreeIndex createIndex( @@ -254,7 +256,31 @@ public static H2TreeIndex createIndex( List wrappedCols, int inlineSize, int segmentsCnt, - QueryContextRegistry qryCtxRegistry, + IgniteLogger log + ) throws IgniteCheckedException { + return createIndex(cctx, rowCache, tbl, idxName, pk, affinityKey, unwrappedCols, wrappedCols, inlineSize, + segmentsCnt, cctx.dataRegion().pageMemory(), + cctx.offheap(), + PageIoResolver.DEFAULT_PAGE_IO_RESOLVER, + log + ); + } + + /** */ + public static H2TreeIndex createIndex( + GridCacheContext cctx, + @Nullable H2RowCache rowCache, + GridH2Table tbl, + String idxName, + boolean pk, + boolean affinityKey, + List unwrappedCols, + List wrappedCols, + int inlineSize, + int segmentsCnt, + PageMemory pageMemory, + IgniteCacheOffheapManager offheap, + PageIoResolver pageIoRslvr, IgniteLogger log ) throws IgniteCheckedException { assert segmentsCnt > 0 : segmentsCnt; @@ -274,10 +300,10 @@ public static H2TreeIndex createIndex( AtomicInteger maxCalculatedInlineSize = new AtomicInteger(); IoStatisticsHolderIndex stats = new IoStatisticsHolderIndex( - SORTED_INDEX, - cctx.name(), - idxName, - cctx.kernalContext().metric() + SORTED_INDEX, + cctx.name(), + idxName, + cctx.kernalContext().metric() ); InlineIndexColumnFactory idxHelperFactory = new InlineIndexColumnFactory(tbl.getCompareMode()); @@ -286,7 +312,7 @@ public static H2TreeIndex createIndex( db.checkpointReadLock(); try { - RootPage page = getMetaPage(cctx, treeName, i); + RootPage page = getMetaPage(offheap, cctx, treeName, i); segments[i] = h2TreeFactory.create( cctx, @@ -295,12 +321,12 @@ public static H2TreeIndex createIndex( idxName, tbl.getName(), tbl.cacheName(), - cctx.offheap().reuseListForIndex(treeName), + offheap.reuseListForIndex(treeName), cctx.groupId(), cctx.group().name(), - cctx.dataRegion().pageMemory(), + pageMemory, cctx.shared().wal(), - cctx.offheap().globalRemoveId(), + offheap.globalRemoveId(), page.pageId().pageId(), page.isAllocated(), unwrappedCols, @@ -314,7 +340,8 @@ public static H2TreeIndex createIndex( log, stats, idxHelperFactory, - inlineSize + inlineSize, + pageIoRslvr ); } finally { @@ -397,7 +424,7 @@ public boolean rebuildRequired() { /** */ private boolean isSingleRowLookup(SearchRow lower, SearchRow upper, H2Tree tree) { return !cctx.mvccEnabled() && indexType.isPrimaryKey() && lower != null && upper != null && - tree.compareRows((H2Row)lower, (H2Row)upper) == 0 && hasAllIndexColumns(lower); + tree.checkRowsTheSame((H2Row)lower, (H2Row)upper) && hasAllIndexColumns(lower); } /** */ @@ -593,7 +620,7 @@ private static boolean isExpired(@NotNull H2Row row) { * @param segment Segment Id. * @return Snapshot for requested segment if there is one. */ - private H2Tree treeForRead(int segment) { + public H2Tree treeForRead(int segment) { return segments[segment]; } @@ -627,9 +654,9 @@ private BPlusTree.TreeRowClosure filter(QueryContext qctx) { * @return RootPage for meta page. * @throws IgniteCheckedException If failed. */ - private static RootPage getMetaPage(GridCacheContext cctx, String treeName, int segIdx) + private static RootPage getMetaPage(IgniteCacheOffheapManager offheap, GridCacheContext cctx, String treeName, int segIdx) throws IgniteCheckedException { - return cctx.offheap().rootPageForIndex(cctx.cacheId(), treeName, segIdx); + return offheap.rootPageForIndex(cctx.cacheId(), treeName, segIdx); } /** @@ -1018,7 +1045,8 @@ public H2Tree create( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java index 313c5421c5ce5d..7380c532658045 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java @@ -100,7 +100,7 @@ protected AbstractInlineIndexColumn(Column col, int type, short size) { * * @return Restored value or {@code null} if value can't be restored. */ - @Nullable Value get(long pageAddr, int off, int maxSize) { + @Nullable public Value get(long pageAddr, int off, int maxSize) { if (size > 0 && size + 1 > maxSize) return null; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java index 7226a5a50eeed7..b1464365fd4aff 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java @@ -17,8 +17,11 @@ package org.apache.ignite.internal.processors.query.h2.database.inlinecolumn; +import java.sql.PreparedStatement; +import java.sql.SQLException; import org.apache.ignite.internal.pagemem.PageUtils; import org.h2.table.Column; +import org.h2.value.CompareMode; import org.h2.value.Value; import org.h2.value.ValueInt; @@ -60,7 +63,9 @@ public ObjectHashInlineIndexColumn(Column col) { /** {@inheritDoc} */ @Override protected Value get0(long pageAddr, int off) { - return null; + int hashCode = PageUtils.getInt(pageAddr, off + 1); + + return new ValueObjectHashCode(hashCode); } /** @@ -80,4 +85,91 @@ ValueInt inlinedValue(long pageAddr, int off) { return size() + 1; } + + /** + * Value for object with hashcode. + */ + private static class ValueObjectHashCode extends Value { + /** + * The precision in digits. + */ + public static final int PRECISION = 10; + + /** + * The maximum display size of an int. + * Example: -2147483648 + */ + public static final int DISPLAY_SIZE = 11; + + /** + * Hashcode of object. + */ + private final int value; + + public ValueObjectHashCode(int value) { + this.value = value; + } + + /** {@inheritDoc} */ + @Override public String getSQL() { + return getString(); + } + + /** {@inheritDoc} */ + @Override public int getType() { + return Value.JAVA_OBJECT; + } + + /** {@inheritDoc} */ + @Override public int getInt() { + return value; + } + + /** {@inheritDoc} */ + @Override public long getLong() { + return value; + } + + /** {@inheritDoc} */ + @Override protected int compareSecure(Value o, CompareMode mode) { + ValueObjectHashCode v = (ValueObjectHashCode) o; + return Integer.compare(value, v.value); + } + + /** {@inheritDoc} */ + @Override public String getString() { + return String.valueOf(value); + } + + /** {@inheritDoc} */ + @Override public long getPrecision() { + return PRECISION; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return value; + } + + /** {@inheritDoc} */ + @Override public Object getObject() { + return value; + } + + /** {@inheritDoc} */ + @Override public void set(PreparedStatement prep, int parameterIndex) + throws SQLException { + prep.setInt(parameterIndex, value); + } + + /** {@inheritDoc} */ + @Override public int getDisplaySize() { + return DISPLAY_SIZE; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object other) { + return other instanceof ValueObjectHashCode && value == ((ValueObjectHashCode) other).value; + } + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java index f62952f4891316..efb942c29312b3 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java @@ -161,4 +161,9 @@ private static IOVersions getVersions(short t @Override public final long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx) + payloadSize); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return payloadSize; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java index 4bddaeec20dbbc..6e4236c23fd26e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java @@ -64,7 +64,6 @@ private static void register(boolean mvcc) { * @param mvccEnabled Mvcc flag. * @return IOVersions for given payload. */ - @SuppressWarnings("unchecked") public static IOVersions> getVersions(int payload, boolean mvccEnabled) { assert payload >= 0 && payload <= PageIO.MAX_PAYLOAD_SIZE; @@ -140,7 +139,7 @@ private static IOVersions getVersions(short ty } /** {@inheritDoc} */ - @Override public final H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { long link = getLink(pageAddr, idx); @@ -159,4 +158,9 @@ private static IOVersions getVersions(short ty @Override public final long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx) + payloadSize); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return payloadSize; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java index a782ffb9ad83a1..76cccf31f43641 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java @@ -71,4 +71,9 @@ public abstract class AbstractH2InnerIO extends BPlusInnerIO implements H @Override public long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx)); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return 0; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java index ccacb4ea0dc0cb..058b5bc92e1c97 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java @@ -54,7 +54,7 @@ public abstract class AbstractH2LeafIO extends BPlusLeafIO implements H2R } /** {@inheritDoc} */ - @Override public final H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { long link = getLink(pageAddr, idx); @@ -73,4 +73,9 @@ public abstract class AbstractH2LeafIO extends BPlusLeafIO implements H2R @Override public long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx)); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return 0; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java index 085f98bd9817e5..3c79df79fc6f9e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java @@ -26,7 +26,7 @@ public class H2ExtrasLeafIO extends AbstractH2ExtrasLeafIO { * @param ver Page format version. * @param payloadSize Payload size. */ - H2ExtrasLeafIO(short type, int ver, int payloadSize) { + public H2ExtrasLeafIO(short type, int ver, int payloadSize) { super(type, ver, 8, payloadSize); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java index 8954de08e31755..466cd1c2886d97 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java @@ -31,7 +31,7 @@ public class H2LeafIO extends AbstractH2LeafIO { /** * @param ver Page format version. */ - private H2LeafIO(int ver) { + public H2LeafIO(int ver) { super(T_H2_REF_LEAF, ver, 8); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java index 60a15989b93793..75854711087f60 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java @@ -22,13 +22,13 @@ /** * Leaf page for H2 row references. */ -class H2MvccExtrasLeafIO extends AbstractH2ExtrasLeafIO { +public class H2MvccExtrasLeafIO extends AbstractH2ExtrasLeafIO { /** * @param type Page type. * @param ver Page format version. * @param payloadSize Payload size. */ - H2MvccExtrasLeafIO(short type, int ver, int payloadSize) { + protected H2MvccExtrasLeafIO(short type, int ver, int payloadSize) { super(type, ver, 28, payloadSize); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java index c7cd99823852c5..5575806d22971d 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java @@ -32,7 +32,7 @@ public class H2MvccLeafIO extends AbstractH2LeafIO { /** * @param ver Page format version. */ - private H2MvccLeafIO(int ver) { + protected H2MvccLeafIO(int ver) { super(T_H2_MVCC_REF_LEAF, ver, 28); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java index 1942069ab24080..55a36d6172dd35 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java @@ -61,4 +61,9 @@ public default int getMvccOperationCounter(long pageAddr, int idx) { public default boolean storeMvccInfo() { return false; } + + /** + * @return Size of reserved data array for data inlining. + */ + public int getPayloadSize(); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java new file mode 100644 index 00000000000000..c41f587dd3c000 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java @@ -0,0 +1,430 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.defragmentation; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.TreeIterator; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.InsertLast; +import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.processors.query.h2.database.H2Tree; +import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex; +import org.apache.ignite.internal.processors.query.h2.database.InlineIndexColumn; +import org.apache.ignite.internal.processors.query.h2.database.inlinecolumn.AbstractInlineIndexColumn; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2ExtrasInnerIO; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2ExtrasLeafIO; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2InnerIO; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2LeafIO; +import org.apache.ignite.internal.processors.query.h2.database.io.H2RowLinkIO; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; +import org.apache.ignite.internal.processors.query.h2.opt.H2CacheRow; +import org.apache.ignite.internal.processors.query.h2.opt.H2Row; +import org.apache.ignite.internal.util.collection.IntMap; +import org.h2.index.Index; +import org.h2.value.Value; + +/** + * + */ +public class IndexingDefragmentation { + /** Indexing. */ + private final IgniteH2Indexing indexing; + + /** Constructor. */ + public IndexingDefragmentation(IgniteH2Indexing indexing) { + this.indexing = indexing; + } + + /** + * Defragment index partition. + * + * @param grpCtx Old group context. + * @param newCtx New group context. + * @param partPageMem Partition page memory. + * @param mappingByPartition Mapping page memory. + * @param cpLock Defragmentation checkpoint read lock. + * @param log Log. + * + * @throws IgniteCheckedException If failed. + */ + public void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPartition, + CheckpointTimeoutLock cpLock, + IgniteLogger log + ) throws IgniteCheckedException { + int pageSize = grpCtx.cacheObjectContext().kernalContext().grid().configuration().getDataStorageConfiguration().getPageSize(); + + TreeIterator treeIterator = new TreeIterator(pageSize); + + PageMemoryEx oldCachePageMem = (PageMemoryEx)grpCtx.dataRegion().pageMemory(); + + PageMemory newCachePageMemory = partPageMem; + + Collection tables = indexing.schemaManager().dataTables(); + + long cpLockThreshold = 150L; + + cpLock.checkpointReadLock(); + + try { + AtomicLong lastCpLockTs = new AtomicLong(System.currentTimeMillis()); + + for (GridH2Table table : tables) { + GridCacheContext cctx = table.cacheContext(); + + if (cctx.groupId() != grpCtx.groupId()) + continue; // Not our index. + + GridH2RowDescriptor rowDesc = table.rowDescriptor(); + + List indexes = table.getIndexes(); + H2TreeIndex oldH2Idx = (H2TreeIndex)indexes.get(2); + + int segments = oldH2Idx.segmentsCount(); + + H2Tree firstTree = oldH2Idx.treeForRead(0); + + PageIoResolver pageIoRslvr = pageAddr -> { + PageIO io = PageIoResolver.DEFAULT_PAGE_IO_RESOLVER.resolve(pageAddr); + + if (io instanceof BPlusMetaIO) + return io; + + //noinspection unchecked,rawtypes,rawtypes + return wrap((BPlusIO)io); + }; + + H2TreeIndex newIdx = H2TreeIndex.createIndex( + cctx, + null, + table, + oldH2Idx.getName(), + firstTree.getPk(), + firstTree.getAffinityKey(), + Arrays.asList(firstTree.cols()), + Arrays.asList(firstTree.cols()), + oldH2Idx.inlineSize(), + segments, + newCachePageMemory, + newCtx.offheap(), + pageIoRslvr, + log + ); + + for (int i = 0; i < segments; i++) { + H2Tree tree = oldH2Idx.treeForRead(i); + + treeIterator.iterate(tree, oldCachePageMem, (theTree, io, pageAddr, idx) -> { + if (System.currentTimeMillis() - lastCpLockTs.get() >= cpLockThreshold) { + cpLock.checkpointReadUnlock(); + + cpLock.checkpointReadLock(); + + lastCpLockTs.set(System.currentTimeMillis()); + } + + assert 1 == io.getVersion() + : "IO version " + io.getVersion() + " is not supported by current defragmentation algorithm." + + " Please implement copying of tree in a new format."; + + BPlusIO h2IO = wrap(io); + + H2Row row = theTree.getRow(h2IO, pageAddr, idx); + + if (row instanceof H2CacheRowWithIndex) { + H2CacheRowWithIndex h2CacheRow = (H2CacheRowWithIndex)row; + + CacheDataRow cacheDataRow = h2CacheRow.getRow(); + + int partition = cacheDataRow.partition(); + + long link = h2CacheRow.link(); + + LinkMap map = mappingByPartition.get(partition); + + long newLink = map.get(link); + + H2CacheRowWithIndex newRow = H2CacheRowWithIndex.create( + rowDesc, + newLink, + h2CacheRow, + ((H2RowLinkIO)io).storeMvccInfo() + ); + + newIdx.putx(newRow); + } + + return true; + }); + } + } + } + finally { + cpLock.checkpointReadUnlock(); + } + } + + /** */ + private static & H2RowLinkIO> H2Row lookupRow( + BPlusTree tree, + long pageAddr, + int idx, + T io + ) throws IgniteCheckedException { + long link = io.getLink(pageAddr, idx); + + List inlineIdxs = ((H2Tree) tree).inlineIndexes(); + + int off = io.offset(idx); + + List values = new ArrayList<>(); + + if (inlineIdxs != null) { + int fieldOff = 0; + + for (int i = 0; i < inlineIdxs.size(); i++) { + AbstractInlineIndexColumn inlineIndexColumn = (AbstractInlineIndexColumn) inlineIdxs.get(i); + + Value value = inlineIndexColumn.get(pageAddr, off + fieldOff, io.getPayloadSize() - fieldOff); + + fieldOff += inlineIndexColumn.inlineSizeOf(value); + + values.add(value); + } + } + + if (io.storeMvccInfo()) { + long mvccCrdVer = io.getMvccCoordinatorVersion(pageAddr, idx); + long mvccCntr = io.getMvccCounter(pageAddr, idx); + int mvccOpCntr = io.getMvccOperationCounter(pageAddr, idx); + + H2CacheRow row = (H2CacheRow) ((H2Tree) tree).createMvccRow(link, mvccCrdVer, mvccCntr, mvccOpCntr, CacheDataRowAdapter.RowData.LINK_ONLY); + + return new H2CacheRowWithIndex(row.getDesc(), row.getRow(), values); + } + + H2CacheRow row = (H2CacheRow) ((H2Tree) tree).createRow(link, false); + + return new H2CacheRowWithIndex(row.getDesc(), row.getRow(), values); + } + + /** */ + @SuppressWarnings({"unchecked", "rawtypes"}) + private static BPlusIO wrap(BPlusIO io) { + assert io instanceof H2RowLinkIO; + + if (io instanceof BPlusInnerIO) { + assert io instanceof AbstractH2ExtrasInnerIO + || io instanceof AbstractH2InnerIO; + + return new BPlusInnerIoDelegate((BPlusInnerIO)io); + } + else { + assert io instanceof AbstractH2ExtrasLeafIO + || io instanceof AbstractH2LeafIO; + + return new BPlusLeafIoDelegate((BPlusLeafIO)io); + } + } + + /** */ + private static class BPlusInnerIoDelegate & H2RowLinkIO> + extends BPlusInnerIO implements H2RowLinkIO { + /** */ + private final IO io; + + /** */ + public BPlusInnerIoDelegate(IO io) { + super(io.getType(), io.getVersion(), io.canGetRow(), io.getItemSize()); + this.io = io; + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, H2Row row) throws IgniteCheckedException { + io.storeByOffset(pageAddr, off, row); + } + + /** {@inheritDoc} */ + @Override public void store(long dstPageAddr, int dstIdx, BPlusIO srcIo, long srcPageAddr, int srcIdx) + throws IgniteCheckedException + { + io.store(dstPageAddr, dstIdx, srcIo, srcPageAddr, srcIdx); + } + + /** {@inheritDoc} */ + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { + return lookupRow(tree, pageAddr, idx, this); + } + + /** {@inheritDoc} */ + @Override public long getLink(long pageAddr, int idx) { + return io.getLink(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) { + return io.getMvccCoordinatorVersion(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCounter(long pageAddr, int idx) { + return io.getMvccCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public int getMvccOperationCounter(long pageAddr, int idx) { + return io.getMvccOperationCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public boolean storeMvccInfo() { + return io.storeMvccInfo(); + } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return io.getPayloadSize(); + } + } + + /** */ + private static class BPlusLeafIoDelegate & H2RowLinkIO> + extends BPlusLeafIO implements H2RowLinkIO { + /** */ + private final IO io; + + /** */ + public BPlusLeafIoDelegate(IO io) { + super(io.getType(), io.getVersion(), io.getItemSize()); + this.io = io; + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, H2Row row) throws IgniteCheckedException { + io.storeByOffset(pageAddr, off, row); + } + + /** {@inheritDoc} */ + @Override public void store(long dstPageAddr, int dstIdx, BPlusIO srcIo, long srcPageAddr, int srcIdx) + throws IgniteCheckedException + { + io.store(dstPageAddr, dstIdx, srcIo, srcPageAddr, srcIdx); + } + + /** {@inheritDoc} */ + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { + return lookupRow(tree, pageAddr, idx, this); + } + + /** {@inheritDoc} */ + @Override public long getLink(long pageAddr, int idx) { + return io.getLink(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) { + return io.getMvccCoordinatorVersion(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCounter(long pageAddr, int idx) { + return io.getMvccCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public int getMvccOperationCounter(long pageAddr, int idx) { + return io.getMvccOperationCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public boolean storeMvccInfo() { + return io.storeMvccInfo(); + } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return io.getPayloadSize(); + } + } + + /** + * H2CacheRow with stored index values + */ + private static class H2CacheRowWithIndex extends H2CacheRow implements InsertLast { + /** List of index values. */ + private final List values; + + /** Constructor. */ + public H2CacheRowWithIndex(GridH2RowDescriptor desc, CacheDataRow row, List values) { + super(desc, row); + this.values = values; + } + + public static H2CacheRowWithIndex create( + GridH2RowDescriptor desc, + long newLink, + H2CacheRowWithIndex oldValue, + boolean storeMvcc + ) { + CacheDataRow row = oldValue.getRow(); + + CacheDataRow newDataRow; + + if (storeMvcc) { + newDataRow = new MvccDataRow(newLink); + newDataRow.mvccVersion(row); + } else + newDataRow = new CacheDataRowAdapter(newLink); + + return new H2CacheRowWithIndex(desc, newDataRow, oldValue.values); + } + + /** {@inheritDoc} */ + @Override public Value getValue(int col) { + if (values.isEmpty()) + return null; + + return values.get(col); + } + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java index e679ff5d558f00..ee9daa4ca8f51a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java @@ -18,9 +18,11 @@ package org.apache.ignite.internal.processors.query.h2.dml; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.Collectors; import org.apache.ignite.IgniteException; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.query.IgniteSQLException; @@ -54,6 +56,7 @@ import org.apache.ignite.lang.IgnitePredicate; import org.h2.command.Parser; import org.h2.expression.Expression; +import org.h2.index.Index; import org.h2.table.Column; import org.h2.table.Table; import org.h2.util.IntArray; @@ -349,6 +352,7 @@ public static GridSqlSelect selectForUpdate(GridSqlUpdate update) { for (GridSqlColumn c : update.cols()) { String newColName = Parser.quoteIdentifier("_upd_" + c.columnName()); + // We have to use aliases to cover cases when the user // wants to update _val field directly (if it's a literal) GridSqlAlias alias = new GridSqlAlias(newColName, elementOrDefault(update.set().get(c.columnName()), c), true); @@ -358,12 +362,74 @@ public static GridSqlSelect selectForUpdate(GridSqlUpdate update) { GridSqlElement where = update.where(); + // On no MVCC mode we cannot use lazy mode when UPDATE query contains index with updated columns + // and that index may be chosen to scan by WHERE condition + // because in this case any rows update may be updated several times. + // e.g. in the cases below we cannot use lazy mode: + // + // 1. CREATE INDEX idx on test(val) + // UPDATE test SET val = val + 1 WHERE val >= ? + // + // 2. CREATE INDEX idx on test(val0, val1) + // UPDATE test SET val1 = val1 + 1 WHERE val0 >= ? + mapQry.canBeLazy(!isIndexWithUpdateColumnsMayBeUsed( + gridTbl, + update.cols().stream() + .map(GridSqlColumn::column) + .collect(Collectors.toSet()), + extractColumns(gridTbl, where))); + mapQry.where(where); mapQry.limit(update.limit()); return mapQry; } + /** + * @return Set columns of the specified table that are used in expression. + */ + private static Set extractColumns(GridH2Table tbl, GridSqlAst expr) { + if (expr == null) + return Collections.emptySet(); + + if (expr instanceof GridSqlColumn && ((GridSqlColumn)expr).column().getTable().equals(tbl)) + return Collections.singleton(((GridSqlColumn)expr).column()); + + HashSet set = new HashSet<>(); + + for (int i = 0; i < expr.size(); ++i) + set.addAll(extractColumns(tbl, expr.child(i))); + + return set; + } + + /** + * @return {@code true} if the index contains update columns may be potentially used for scan. + */ + private static boolean isIndexWithUpdateColumnsMayBeUsed( + GridH2Table tbl, + Set updateCols, + Set whereCols) { + if (F.isEmpty(whereCols)) + return false; + + if (updateCols.size() == 1 && whereCols.size() == 1 + && tbl.rowDescriptor().isValueColumn(F.first(updateCols).getColumnId()) + && tbl.rowDescriptor().isValueColumn(F.first(whereCols).getColumnId())) + return true; + + for (Index idx : tbl.getIndexes()) { + if (idx.equals(tbl.getPrimaryKey()) || whereCols.contains(idx.getColumns()[0])) { + for (Column idxCol : idx.getColumns()) { + if (updateCols.contains(idxCol)) + return true; + } + } + } + + return false; + } + /** * Do what we can to compute default value for this column (mimics H2 behavior). * @see Table#getDefaultValue diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index 66c4bb2c319aa8..aeaea29ce723a5 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -93,6 +93,9 @@ public final class UpdatePlan { /** Additional info for distributed update. */ private final DmlDistributedPlanInfo distributed; + /** Additional info for distributed update. */ + private final boolean canSelectBeLazy; + /** * Constructor. * @@ -125,7 +128,8 @@ public UpdatePlan( List> rows, int rowsNum, @Nullable FastUpdate fastUpdate, - @Nullable DmlDistributedPlanInfo distributed + @Nullable DmlDistributedPlanInfo distributed, + boolean canSelectBeLazy ) { this.colNames = colNames; this.colTypes = colTypes; @@ -145,6 +149,7 @@ public UpdatePlan( this.isLocSubqry = isLocSubqry; this.fastUpdate = fastUpdate; this.distributed = distributed; + this.canSelectBeLazy = canSelectBeLazy; } /** @@ -177,7 +182,8 @@ public UpdatePlan( null, 0, fastUpdate, - distributed + distributed, + true ); } @@ -599,6 +605,13 @@ public Object processRowForTx(List row) throws IgniteCheckedException { } } + /** + * @return {@code true} is the SELECT query may be executed in lazy mode. + */ + public boolean canSelectBeLazy() { + return canSelectBeLazy; + } + /** * Abstract iterator. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index 8cc41b216ba940..fd9496ca8a4e7a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -311,7 +311,8 @@ else if (stmt instanceof GridSqlMerge) { rows, rowsNum, null, - distributed + distributed, + false ); } @@ -472,7 +473,8 @@ else if (stmt instanceof GridSqlDelete) { null, 0, null, - distributed + distributed, + sel.canBeLazy() ); } else { @@ -590,7 +592,8 @@ public static UpdatePlan planForBulkLoad(SqlBulkLoadCommand cmd, GridH2Table tbl null, 0, null, - null + null, + true ); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java index 527a33ec814099..86df146cdc29c9 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java @@ -182,6 +182,13 @@ private Value wrap(Object val, int type) { } } + /** + * @return Cache data row. + */ + public CacheDataRow getRow() { + return row; + } + /** * @return {@code True} if this is removed row (doesn't have value). */ @@ -345,4 +352,11 @@ private boolean removedRow() { return sb.toString(); } + + /** + * @return H2 row descriptor. + */ + public GridH2RowDescriptor getDesc() { + return desc; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java index 6ecf002998edd7..93c1b8eae6d963 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java @@ -61,6 +61,13 @@ public class GridSqlSelect extends GridSqlQuery { /** */ private boolean isForUpdate; + /** Used only for SELECT based on UPDATE. + * It cannot be lazy when updated columns are used in the conditions. + * In this case index based on these columns may be chosen to scan and some rows may be updated + * more than once time. + */ + private boolean canBeLazy; + /** * @param colIdx Column index as for {@link #column(int)}. * @return Child index for {@link #child(int)}. @@ -437,4 +444,23 @@ public GridSqlSelect copySelectForUpdate() { return copy; } + + /** + * @param canBeLazy see {@link #canBeLazy()}. + */ + public void canBeLazy(boolean canBeLazy) { + this.canBeLazy = canBeLazy; + } + + /** + * Used only for SELECT based on UPDATE. + * It cannot be lazy when updated columns are used in the conditions. + * In this case index based on these columns may be chosen to scan and some rows may be updated + * more than once time. + * + * @return {@code true} is lazy flag is applicable. + */ + public boolean canBeLazy() { + return canBeLazy; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java b/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java index 139468715c8352..d0107027f729fd 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java @@ -48,6 +48,9 @@ import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.PartitionUpdateCounter; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker; +import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; +import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; @@ -58,11 +61,14 @@ import org.apache.ignite.internal.processors.query.GridQueryProcessor; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.QueryTypeDescriptorImpl; +import org.apache.ignite.internal.processors.query.h2.ConnectionManager; +import org.apache.ignite.internal.processors.query.h2.H2Utils; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndexBase; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.h2.opt.H2CacheRow; +import org.apache.ignite.internal.processors.query.h2.opt.QueryContext; import org.apache.ignite.internal.util.lang.GridIterator; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; @@ -76,6 +82,7 @@ import org.h2.engine.Session; import org.h2.index.Cursor; import org.h2.index.Index; +import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.jetbrains.annotations.Nullable; @@ -519,141 +526,215 @@ private Map processPartition( PartitionUpdateCounter updateCntrBefore = updCntr == null ? null : updCntr.copy(); - GridIterator it = grpCtx.offheap().partitionIterator(part.id()); - partRes = new ValidateIndexesPartitionResult(); - boolean enoughIssues = false; + boolean hasMvcc = grpCtx.caches().stream().anyMatch(GridCacheContext::mvccEnabled); - GridQueryProcessor qryProcessor = ignite.context().query(); + if (hasMvcc) { + for (GridCacheContext context : grpCtx.caches()) { + try (Session session = mvccSession(context)) { + MvccSnapshot mvccSnapshot = null; - final boolean skipConditions = checkFirst > 0 || checkThrough > 0; - final boolean bothSkipConditions = checkFirst > 0 && checkThrough > 0; + boolean mvccEnabled = context.mvccEnabled(); - long current = 0; - long processedNumber = 0; + if (mvccEnabled) + mvccSnapshot = ((QueryContext) session.getVariable(H2Utils.QCTX_VARIABLE_NAME).getObject()).mvccSnapshot(); - while (it.hasNextX()) { - if (enoughIssues) - break; + GridIterator iterator = grpCtx.offheap().cachePartitionIterator( + context.cacheId(), + part.id(), + mvccSnapshot, + null + ); - CacheDataRow row = it.nextX(); + processPartIterator(grpCtx, partRes, session, iterator); + } + } + } + else + processPartIterator(grpCtx, partRes, null, grpCtx.offheap().partitionIterator(part.id())); - if (skipConditions) { - if (bothSkipConditions) { - if (processedNumber > checkFirst) + PartitionUpdateCounter updateCntrAfter = part.dataStore().partUpdateCounter(); + + if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) { + throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + + ", grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "] changed during index validation " + + "[before=" + updateCntrBefore + ", after=" + updateCntrAfter + "]"); + } + } + catch (IgniteCheckedException e) { + error(log, "Failed to process partition [grpId=" + grpCtx.groupId() + + ", partId=" + part.id() + "]", e); + + return emptyMap(); + } + finally { + part.release(); + + printProgressOfIndexValidationIfNeeded(); + } + + PartitionKey partKey = new PartitionKey(grpCtx.groupId(), part.id(), grpCtx.cacheOrGroupName()); + + processedPartitions.incrementAndGet(); + + return Collections.singletonMap(partKey, partRes); + } + + /** + * Process partition iterator. + * + * @param grpCtx Cache group context. + * @param partRes Result object. + * @param session H2 session. + * @param it Partition iterator. + * @throws IgniteCheckedException + */ + private void processPartIterator( + CacheGroupContext grpCtx, + ValidateIndexesPartitionResult partRes, + Session session, + GridIterator it + ) throws IgniteCheckedException { + boolean enoughIssues = false; + + GridQueryProcessor qryProcessor = ignite.context().query(); + + final boolean skipConditions = checkFirst > 0 || checkThrough > 0; + final boolean bothSkipConditions = checkFirst > 0 && checkThrough > 0; + + long current = 0; + long processedNumber = 0; + + while (it.hasNextX()) { + if (enoughIssues) + break; + + CacheDataRow row = it.nextX(); + + if (skipConditions) { + if (bothSkipConditions) { + if (processedNumber > checkFirst) + break; + else if (current++ % checkThrough > 0) + continue; + else + processedNumber++; + } else { + if (checkFirst > 0) { + if (current++ > checkFirst) break; - else if (current++ % checkThrough > 0) + } else { + if (current++ % checkThrough > 0) continue; - else - processedNumber++; - } - else { - if (checkFirst > 0) { - if (current++ > checkFirst) - break; - } - else { - if (current++ % checkThrough > 0) - continue; - } } } + } - int cacheId = row.cacheId() == 0 ? grpCtx.groupId() : row.cacheId(); + int cacheId = row.cacheId() == 0 ? grpCtx.groupId() : row.cacheId(); - GridCacheContext cacheCtx = row.cacheId() == 0 ? - grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(row.cacheId()); + GridCacheContext cacheCtx = row.cacheId() == 0 ? + grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(row.cacheId()); - if (cacheCtx == null) - throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId); + if (cacheCtx == null) + throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId); - if (row.link() == 0L) { - String errMsg = "Invalid partition row, possibly deleted"; + if (row.link() == 0L) { + String errMsg = "Invalid partition row, possibly deleted"; - log.error(errMsg); + log.error(errMsg); - IndexValidationIssue is = new IndexValidationIssue(null, cacheCtx.name(), null, - new IgniteCheckedException(errMsg)); + IndexValidationIssue is = new IndexValidationIssue(null, cacheCtx.name(), null, + new IgniteCheckedException(errMsg)); - enoughIssues |= partRes.reportIssue(is); + enoughIssues |= partRes.reportIssue(is); - continue; - } + continue; + } - QueryTypeDescriptorImpl res = qryProcessor.typeByValue( - cacheCtx.name(), - cacheCtx.cacheObjectContext(), - row.key(), - row.value(), - true - ); + QueryTypeDescriptorImpl res = qryProcessor.typeByValue( + cacheCtx.name(), + cacheCtx.cacheObjectContext(), + row.key(), + row.value(), + true + ); - if (res == null) - continue; // Tolerate - (k, v) is just not indexed. + if (res == null) + continue; // Tolerate - (k, v) is just not indexed. - IgniteH2Indexing indexing = (IgniteH2Indexing)qryProcessor.getIndexing(); + IgniteH2Indexing indexing = (IgniteH2Indexing) qryProcessor.getIndexing(); - GridH2Table gridH2Tbl = indexing.schemaManager().dataTable(cacheCtx.name(), res.tableName()); + GridH2Table gridH2Tbl = indexing.schemaManager().dataTable(cacheCtx.name(), res.tableName()); - if (gridH2Tbl == null) - continue; // Tolerate - (k, v) is just not indexed. + if (gridH2Tbl == null) + continue; // Tolerate - (k, v) is just not indexed. - GridH2RowDescriptor gridH2RowDesc = gridH2Tbl.rowDescriptor(); + GridH2RowDescriptor gridH2RowDesc = gridH2Tbl.rowDescriptor(); - H2CacheRow h2Row = gridH2RowDesc.createRow(row); + H2CacheRow h2Row = gridH2RowDesc.createRow(row); - ArrayList indexes = gridH2Tbl.getIndexes(); + ArrayList indexes = gridH2Tbl.getIndexes(); - for (Index idx : indexes) { - if (!(idx instanceof H2TreeIndexBase)) - continue; + for (Index idx : indexes) { + if (!(idx instanceof H2TreeIndexBase)) + continue; - try { - Cursor cursor = idx.find((Session)null, h2Row, h2Row); + try { + Cursor cursor = idx.find(session, h2Row, h2Row); - if (cursor == null || !cursor.next()) - throw new IgniteCheckedException("Key is present in CacheDataTree, but can't be found in SQL index."); - } - catch (Throwable t) { - Object o = CacheObjectUtils.unwrapBinaryIfNeeded( + if (cursor == null || !cursor.next()) + throw new IgniteCheckedException("Key is present in CacheDataTree, but can't be found in SQL index."); + } catch (Throwable t) { + Object o = CacheObjectUtils.unwrapBinaryIfNeeded( grpCtx.cacheObjectContext(), row.key(), true, true); - IndexValidationIssue is = new IndexValidationIssue( + IndexValidationIssue is = new IndexValidationIssue( o.toString(), cacheCtx.name(), idx.getName(), t); - log.error("Failed to lookup key: " + is.toString(), t); + log.error("Failed to lookup key: " + is.toString(), t); - enoughIssues |= partRes.reportIssue(is); - } + enoughIssues |= partRes.reportIssue(is); } } + } + } - PartitionUpdateCounter updateCntrAfter = part.dataStore().partUpdateCounter(); + /** + * Get session with MVCC snapshot and QueryContext. + * + * @param cctx Cache context. + * @return Session with QueryContext and MVCC snapshot. + * @throws IgniteCheckedException If failed. + */ + private Session mvccSession(GridCacheContext cctx) throws IgniteCheckedException { + Session session = null; - if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) { - throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + - ", grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "] changed during index validation " + - "[before=" + updateCntrBefore + ", after=" + updateCntrAfter + "]"); - } - } - catch (IgniteCheckedException e) { - error(log, "Failed to process partition [grpId=" + grpCtx.groupId() + - ", partId=" + part.id() + "]", e); + boolean mvccEnabled = cctx.mvccEnabled(); - return emptyMap(); - } - finally { - part.release(); + if (mvccEnabled) { + ConnectionManager connMgr = ((IgniteH2Indexing) ignite.context().query().getIndexing()).connections(); - printProgressOfIndexValidationIfNeeded(); - } + JdbcConnection connection = (JdbcConnection) connMgr.connection().connection(); - PartitionKey partKey = new PartitionKey(grpCtx.groupId(), part.id(), grpCtx.cacheOrGroupName()); + session = (Session) connection.getSession(); - processedPartitions.incrementAndGet(); + MvccQueryTracker tracker = MvccUtils.mvccTracker(cctx, true); - return Collections.singletonMap(partKey, partRes); + MvccSnapshot mvccSnapshot = tracker.snapshot(); + + final QueryContext qctx = new QueryContext( + 0, + cacheName -> null, + null, + mvccSnapshot, + null, + true + ); + + session.setVariable(H2Utils.QCTX_VARIABLE_NAME, new H2Utils.ValueRuntimeSimpleObject<>(qctx)); + } + return session; } /** @@ -713,8 +794,8 @@ private Map processIndex( Cursor cursor = null; - try { - cursor = idx.find((Session)null, null, null); + try (Session session = mvccSession(cacheCtxWithIdx.get1())) { + cursor = idx.find(session, null, null); if (cursor == null) throw new IgniteCheckedException("Can't iterate through index: " + idx); diff --git a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java index 05eda7db3d119e..664be994f2cb19 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java +++ b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java @@ -18,14 +18,8 @@ package org.apache.ignite.spi.systemview.view; import org.apache.ignite.internal.managers.systemview.walker.Order; -import org.apache.ignite.internal.processors.query.h2.H2Utils; -import org.apache.ignite.internal.processors.query.h2.database.H2IndexType; -import org.apache.ignite.internal.processors.query.h2.database.H2PkHashIndex; -import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndexBase; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2ProxyIndex; +import org.apache.ignite.internal.processors.query.h2.database.IndexInformation; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; -import org.apache.ignite.internal.processors.query.h2.opt.H2TableScanIndex; -import org.h2.index.Index; /** * Sql index representation for a {@link SystemView}. @@ -35,103 +29,130 @@ public class SqlIndexView { private final GridH2Table tbl; /** Index. */ - private final Index idx; - - /** Index type. */ - private final H2IndexType type; + private final IndexInformation idx; /** */ - public SqlIndexView(GridH2Table tbl, Index idx) { + public SqlIndexView(GridH2Table tbl, IndexInformation idx) { this.tbl = tbl; this.idx = idx; - this.type = type(idx); } - /** @return Cache id. */ + /** + * Returns cache group ID. + * + * @return Cache group ID. + */ + @Order() + public int cacheGroupId() { + return tbl.cacheInfo().groupId(); + } + + /** + * Returns Cache group name. + * + * @return Cache group name. + */ + @Order(1) + public String cacheGroupName() { + return tbl.cacheInfo().groupName(); + } + + /** + * Returns cache ID. + * @return Cache ID. + */ + @Order(2) public int cacheId() { return tbl.cacheId(); } - /** @return Cache name. */ - @Order(5) + /** + * Returns cache name. + * + * @return Cache name. + */ + @Order(3) public String cacheName() { return tbl.cacheName(); } - /** @return Schema name. */ - @Order(3) + /** + * Returns schema name. + * + * @return Schema name. + */ + @Order(4) public String schemaName() { return tbl.getSchema().getName(); } - /** @return Table name. */ - @Order(4) + /** + * Returns table name. + * + * @return Table name. + */ + @Order(5) public String tableName() { return tbl.identifier().table(); } - /** @return Index name. */ - @Order() + /** + * Returns index name. + * + * @return Index name. + */ + @Order(6) public String indexName() { - return idx.getName(); + return idx.name(); } - /** @return Index type. */ - @Order(1) - public H2IndexType indexType() { - return type; + /** + * Returns index type. + * + * @return Index type. + */ + @Order(7) + public String indexType() { + return idx.type(); } - /** @return Indexed columns. */ - @Order(2) + /** + * Returns all columns on which index is built. + * + * @return Coma separated indexed columns. + */ + @Order(8) public String columns() { - switch (type) { - case HASH: - case BTREE: - return H2Utils.indexColumnsSql(H2Utils.unwrapKeyColumns(tbl, idx.getIndexColumns())); - - case SPATIAL: - return H2Utils.indexColumnsSql(idx.getIndexColumns()); - - case SCAN: - return null; - - default: - return "???"; - } + return idx.keySql(); } - /** @return {@code True} if primary key index, {@code false} otherwise. */ + /** + * Returns boolean value which indicates whether this index is for primary key or not. + * + * @return {@code True} if primary key index, {@code false} otherwise. + */ + @Order(9) public boolean isPk() { - return idx.getIndexType().isPrimaryKey(); + return idx.pk(); } - /** @return {@code True} if unique index, {@code false} otherwise. */ + /** + * Returns boolean value which indicates whether this index is unique or not. + * + * @return {@code True} if unique index, {@code false} otherwise. + */ + @Order(10) public boolean isUnique() { - return idx.getIndexType().isUnique(); - } - - /** @return Inline size. */ - public int inlineSize() { - return idx instanceof H2TreeIndexBase ? ((H2TreeIndexBase)idx).inlineSize() : 0; + return idx.unique(); } /** - * @param idx Inde. - * @return Index type. + * Returns inline size in bytes. + * + * @return Inline size. */ - private static H2IndexType type(Index idx) { - if (idx instanceof H2TreeIndexBase) { - return H2IndexType.BTREE; - } else if (idx instanceof H2PkHashIndex) - return H2IndexType.HASH; - else if (idx instanceof H2TableScanIndex) - return H2IndexType.SCAN; - else if (idx instanceof GridH2ProxyIndex) - return type(((GridH2ProxyIndex)idx).underlyingIndex()); - else if (idx.getIndexType().isSpatial()) - return H2IndexType.SPATIAL; - - return null; + @Order(11) + public Integer inlineSize() { + return idx.inlineSize(); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java index f09163266a3189..7a1ecbd415f7e6 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java +++ b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java @@ -36,7 +36,7 @@ public SqlSchemaView(H2Schema schema) { /** @return Schema name. */ @Order - public String name() { + public String schemaName() { return schema.schemaName(); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java index cc2a5111fdefbb..c2b8a896a69223 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java +++ b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java @@ -47,56 +47,112 @@ public SqlTableView(GridH2Table tbl) { } } - /** @return Cache id. */ - @Order(3) + /** + * Returns cache group ID. + * + * @return Cache group ID. + */ + @Order() + public int cacheGroupId() { + return tbl.cacheInfo().groupId(); + } + + /** + * Returns Cache group name. + * + * @return Cache group name. + */ + @Order(1) + public String cacheGroupName() { + return tbl.cacheInfo().groupName(); + } + + /** + * Returns cache ID. + * + * @return Cache ID. + */ + @Order(2) public int cacheId() { return tbl.cacheId(); } - /** @return Cache name. */ - @Order(2) + /** + * Returns cache name. + * + * @return Cache name. + */ + @Order(3) public String cacheName() { return tbl.cacheName(); } - /** @return Schema name. */ - @Order(1) + /** + * Returns schema name. + * + * @return Schema name. + */ + @Order(4) public String schemaName() { return tbl.getSchema().getName(); } - /** @return Table name. */ - @Order + /** + * Returns table name. + * + * @return Table name. + */ + @Order(5) public String tableName() { return tbl.identifier().table(); } - /** @return Affinity key column. */ - @Order(4) + /** + * Returns name of affinity key column. + * + * @return Affinity key column name. + */ + @Order(6) public String affinityKeyColumn() { return affColName; } - /** @return Key alias. */ - @Order(5) + /** + * Returns alias for key column. + * + * @return Key alias. + */ + @Order(7) public String keyAlias() { return tbl.rowDescriptor().type().keyFieldAlias(); } - /** @return Value alias. */ - @Order(6) + /** + * Returns alias for value column. + * + * @return Value alias. + */ + @Order(8) public String valueAlias() { return tbl.rowDescriptor().type().valueFieldAlias(); } - /** @return Key type name. */ - @Order(7) + /** + * Returns name of key type. + * + * @return Key type name. + */ + @Order(9) public String keyTypeName() { return tbl.rowDescriptor().type().keyTypeName(); } - /** @return Value type name. */ - @Order(8) + /** + * Returns name of value type. + * + * @return Value type name. + */ + @Order(10) public String valueTypeName() { return tbl.rowDescriptor().type().valueTypeName(); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java index 55a24617984bb8..48d346f38e3f4b 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java @@ -27,6 +27,7 @@ import org.apache.ignite.internal.client.thin.ThinClientPartitionAwarenessResourceReleaseTest; import org.apache.ignite.internal.client.thin.ThinClientPartitionAwarenessStableTopologyTest; import org.apache.ignite.internal.client.thin.ThinClientPartitionAwarenessUnstableTopologyTest; +import org.apache.ignite.internal.client.thin.TimeoutTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -60,7 +61,8 @@ ThinClientPartitionAwarenessResourceReleaseTest.class, ThinClientPartitionAwarenessDiscoveryTest.class, ReliableChannelTest.class, - CacheAsyncTest.class + CacheAsyncTest.class, + TimeoutTest.class }) public class ClientTestSuite { // No-op. diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java index f1b4e2ead2176e..8614086f695f87 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.client; import java.lang.invoke.SerializedLambda; +import java.lang.reflect.Field; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -270,6 +271,32 @@ public void testMixedQueryAndCacheApiOperations() throws Exception { } } + /** Tests {@link SqlFieldsQuery} parameter validation. */ + @Test + public void testSqlParameterValidation() throws Exception { + try (Ignite ignored = Ignition.start(Config.getServerConfiguration()); + IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(Config.SERVER)) + ) { + // Set fields with reflection to bypass client-side validation and verify server-side check. + SqlFieldsQuery qry = new SqlFieldsQuery("SELECT * FROM Person"); + + Field updateBatchSize = SqlFieldsQuery.class.getDeclaredField("updateBatchSize"); + updateBatchSize.setAccessible(true); + updateBatchSize.setInt(qry, -1); + + GridTestUtils.assertThrowsAnyCause(null, () -> client.query(qry).getAll(), + ClientException.class, "updateBatchSize cannot be lower than 1"); + + Field parts = SqlFieldsQuery.class.getDeclaredField("parts"); + parts.setAccessible(true); + parts.set(qry, new int[] {-1}); + qry.setUpdateBatchSize(2); + + GridTestUtils.assertThrowsAnyCause(null, () -> client.query(qry).getAll(), + ClientException.class, "Illegal partition"); + } + } + /** */ private static ClientConfiguration getClientConfiguration() { return new ClientConfiguration().setAddresses(Config.SERVER) diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java b/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java index dae7a9e3625325..e64713a5c9baf7 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java @@ -40,6 +40,8 @@ import org.junit.Test; import org.junit.rules.Timeout; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -113,12 +115,12 @@ public void testEncryption() throws Exception { try (IgniteClient client = Ignition.startClient(clientCfg .setSslMode(SslMode.REQUIRED) .setSslClientCertificateKeyStorePath(rsrcPath.apply("/client.jks")) - .setSslClientCertificateKeyStoreType("JKS") + .setSslClientCertificateKeyStoreType(DFLT_STORE_TYPE) .setSslClientCertificateKeyStorePassword("123456") .setSslTrustCertificateKeyStorePath(rsrcPath.apply("/trust.jks")) - .setSslTrustCertificateKeyStoreType("JKS") + .setSslTrustCertificateKeyStoreType(DFLT_STORE_TYPE) .setSslTrustCertificateKeyStorePassword("123456") - .setSslKeyAlgorithm("SunX509") + .setSslKeyAlgorithm(DFLT_KEY_ALGORITHM) .setSslTrustAll(false) .setSslProtocol(SslProtocol.TLS) )) { diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java index 5368382d23b571..83e4c2e7ee4962 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java @@ -301,6 +301,7 @@ public void testLocalSelectFailed() { public void testLocalSelectCanceled() { assertMetricsIncrementedOnlyOnReducer(() -> startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID <> suspendHook(ID)").setLocal(true)), + 2, "success", "failed", "canceled"); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java index 74b21cb3c42b3c..abe9afefcc7bc0 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java @@ -92,7 +92,7 @@ public void testMetricsOnRemoteMapFail() throws Exception { SuspendQuerySqlFunctions.setProcessRowsToSuspend(1); assertMetricsIncrementedOnlyOnReducer(() -> - startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), + startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), 2, "success", "failed", "canceled"); } @@ -128,7 +128,7 @@ public void testMetricsOnLocalMapFail() throws Exception { SuspendQuerySqlFunctions.setProcessRowsToSuspend(1); assertMetricsIncrementedOnlyOnReducer(() -> - startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), + startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), 2, "success", "failed", "canceled"); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java index 15a346d3933220..611e2849ca96f1 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java @@ -18,21 +18,27 @@ package org.apache.ignite.internal.metric; import java.util.Collection; +import java.util.Collections; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.ignite.cache.query.QueryCancelledException; import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.events.SqlQueryExecutionEvent; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.query.GridRunningQueryInfo; +import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.metric.LongMetric; import org.apache.ignite.spi.metric.Metric; import org.apache.ignite.testframework.GridTestUtils; import org.junit.Assert; +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; import static org.apache.ignite.internal.processors.query.RunningQueryManager.SQL_USER_QUERIES_REG_NAME; /** @@ -52,13 +58,36 @@ public class UserQueriesTestBase extends SqlStatisticsAbstractTest { /** The second node index. This node should execute only map parts of the queries. */ protected static final int MAPPER_IDX = 1; + /** */ + private static final AtomicInteger SQL_QRY_EXEC_EVT_CNTR = new AtomicInteger(); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + IgnitePredicate lsnr = evt -> { + assertNotNull(evt.text()); + + SQL_QRY_EXEC_EVT_CNTR.incrementAndGet(); + + return true; + }; + + int[] evts = new int[] {EVT_SQL_QUERY_EXECUTION}; + + cfg.setIncludeEventTypes(evts); + cfg.setLocalEventListeners(Collections.singletonMap(lsnr, evts)); + + return cfg; + } + /** * Verify that after specified action is performed, all metrics are left unchanged. * * @param act Action. */ protected void assertMetricsRemainTheSame(Runnable act) { - assertMetricsAre(fetchAllMetrics(REDUCER_IDX), fetchAllMetrics(MAPPER_IDX), act); + assertMetricsAre(fetchAllMetrics(REDUCER_IDX), fetchAllMetrics(MAPPER_IDX), act, 0); } /** @@ -68,6 +97,21 @@ protected void assertMetricsRemainTheSame(Runnable act) { * @param incrementedMetrics array of metrics to check. */ protected void assertMetricsIncrementedOnlyOnReducer(Runnable act, String... incrementedMetrics) { + assertMetricsIncrementedOnlyOnReducer(act, 1, incrementedMetrics); + } + + /** + * Verify that after action is performed, specified metrics gets incremented only on reducer node. + * + * @param act action (callback) to perform. + * @param qryCnt Amount of queries. + * @param incrementedMetrics array of metrics to check. + */ + protected void assertMetricsIncrementedOnlyOnReducer( + Runnable act, + int qryCnt, + String... incrementedMetrics + ) { Map expValuesMapper = fetchAllMetrics(MAPPER_IDX); Map expValuesReducer = fetchAllMetrics(REDUCER_IDX); @@ -75,7 +119,7 @@ protected void assertMetricsIncrementedOnlyOnReducer(Runnable act, String... inc for (String incMet : incrementedMetrics) expValuesReducer.compute(incMet, (name, val) -> val + 1); - assertMetricsAre(expValuesReducer, expValuesMapper, act); + assertMetricsAre(expValuesReducer, expValuesMapper, act, qryCnt); } /** @@ -97,11 +141,16 @@ private Map fetchAllMetrics(int nodeIdx) { * @param expMetricsReducer Expected metrics on reducer. * @param expMetricsMapper Expected metrics on mapper. * @param act callback to perform. Usually sql query execution. + * @param qryEvtCnt Expected sql query events. */ private void assertMetricsAre( Map expMetricsReducer, Map expMetricsMapper, - Runnable act) { + Runnable act, + int qryEvtCnt + ) { + SQL_QRY_EXEC_EVT_CNTR.set(0); + act.run(); expMetricsReducer.forEach((mName, expVal) -> { @@ -115,6 +164,9 @@ private void assertMetricsAre( Assert.assertEquals("Unexpected value for metric " + mName, (long)expVal, actVal); }); + + Assert.assertEquals("Unexpected records for SqlQueryExecutionEvent.", + qryEvtCnt, SQL_QRY_EXEC_EVT_CNTR.get()); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java index 114c4aa6b09c25..660f56e6c392e1 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java @@ -46,6 +46,8 @@ import org.apache.ignite.IgniteBinary; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.Ignition; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMode; @@ -63,7 +65,10 @@ import org.apache.ignite.cache.query.annotations.QueryTextField; import org.apache.ignite.cache.store.CacheStore; import org.apache.ignite.cache.store.CacheStoreAdapter; +import org.apache.ignite.client.Config; +import org.apache.ignite.client.IgniteClient; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; @@ -71,6 +76,7 @@ import org.apache.ignite.events.CacheQueryReadEvent; import org.apache.ignite.events.Event; import org.apache.ignite.events.EventType; +import org.apache.ignite.events.SqlQueryExecutionEvent; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.processors.cache.query.QueryCursorEx; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; @@ -84,6 +90,7 @@ import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.WithSystemProperty; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; @@ -95,6 +102,7 @@ import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_EXECUTED; import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_OBJECT_READ; +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; import static org.apache.ignite.internal.processors.cache.query.CacheQueryType.FULL_TEXT; import static org.apache.ignite.internal.processors.cache.query.CacheQueryType.SCAN; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -1457,14 +1465,6 @@ public void testArray() throws Exception { assertArrayEquals(new Long[]{4L, 5L, 6L}, e.getValue().arr); } - /** - * @throws Exception If failed. - */ - @Test - public void testSqlQueryEvents() throws Exception { - checkSqlQueryEvents(); - } - /** * @throws Exception If failed. */ @@ -1491,7 +1491,8 @@ public void testFieldsQueryMetadata() throws Exception { /** * @throws Exception If failed. */ - private void checkSqlQueryEvents() throws Exception { + @Test + public void testSqlQueryEvents() throws Exception { final IgniteCache cache = jcache(Integer.class, Integer.class); final boolean evtsDisabled = cache.getConfiguration(CacheConfiguration.class).isEventsDisabled(); final CountDownLatch execLatch = new CountDownLatch(evtsDisabled ? 0 : @@ -1545,6 +1546,75 @@ private void checkSqlQueryEvents() throws Exception { } } + /** @throws Exception If failed. */ + @Test + @WithSystemProperty(key = IgniteSystemProperties.IGNITE_TO_STRING_INCLUDE_SENSITIVE, value = "false") + public void testClientQueryExecutedEvents() throws Exception { + doTestClientQueryExecutedEvents(false); + } + + /** @throws Exception If failed. */ + @Test + @WithSystemProperty(key = IgniteSystemProperties.IGNITE_TO_STRING_INCLUDE_SENSITIVE, value = "true") + public void testClientQueryExecutedEventsIncludeSensitive() throws Exception { + doTestClientQueryExecutedEvents(true); + } + + /** */ + public void doTestClientQueryExecutedEvents(boolean inclSens) throws Exception { + CountDownLatch execLatch = new CountDownLatch(9); + + IgnitePredicate lsnr = evt -> { + assertNotNull(evt.text()); + if (inclSens) + assertTrue(evt.toString().contains("args=")); + else + assertFalse(evt.toString().contains("args=")); + + execLatch.countDown(); + + return true; + }; + + ignite().events().localListen(lsnr, EVT_SQL_QUERY_EXECUTION); + + ClientConfiguration cc = new ClientConfiguration().setAddresses(Config.SERVER); + + try (IgniteClient client = Ignition.startClient(cc)) { + client.query(new SqlFieldsQuery("create table TEST_TABLE(key int primary key, val int)")) + .getAll(); + + client.query(new SqlFieldsQuery("insert into TEST_TABLE values (?, ?)").setArgs(1, 1)) + .getAll(); + + client.query(new SqlFieldsQuery("update TEST_TABLE set val = ?2 where key = ?1").setArgs(1, 2)) + .getAll(); + + client.query(new SqlFieldsQuery("select * from TEST_TABLE")) + .getAll(); + + client.query(new SqlFieldsQuery("create index idx_1 on TEST_TABLE(key)")) + .getAll(); + + client.query(new SqlFieldsQuery("drop index idx_1")) + .getAll(); + + client.query(new SqlFieldsQuery("alter table TEST_TABLE add column val2 int")) + .getAll(); + + client.query(new SqlFieldsQuery("alter table TEST_TABLE drop val2")) + .getAll(); + + client.query(new SqlFieldsQuery("drop table TEST_TABLE")) + .getAll(); + + assert execLatch.await(3_000, MILLISECONDS); + } + finally { + ignite().events().stopLocalListen(lsnr, EVT_SQL_QUERY_EXECUTION); + } + } + /** * @throws Exception If failed. */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java index 95925ff705ee10..3607be078eb78e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java @@ -400,7 +400,7 @@ private List> execute(SqlFieldsQuery qry) { /** * */ - static final class AllTypes implements Serializable { + public static final class AllTypes implements Serializable { /** * Data Long. */ @@ -602,7 +602,7 @@ private void init(Long key, String str) { } /** */ - AllTypes(Long key) { + public AllTypes(Long key) { this.init(key, Long.toString(key)); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java index 24a37597434067..9673af847a6e01 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java @@ -1372,6 +1372,21 @@ public void testStopNodeOnSqlQueryWithIncompatibleType() throws Exception { assertFalse(grid().context().isStopping()); } + /** */ + @Test + public void testOpenRangePredicateOnCompoundPk() throws Exception { + inlineSize = 10; + + startGrid(); + + sql("create table test (id1 int, id2 int, val int, constraint pk primary key (id1, id2))"); + + for (int i = 1; i <= 5; i++) + sql("insert into test (id1, id2, val) values (?, ?, ?)", 0, i, i); + + assertEquals(5, sql("select * from test where id1 = 0 and id2 > 0").getAll().size()); + } + /** */ private void checkAll() { IgniteCache cache = grid(0).cache(DEFAULT_CACHE_NAME); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java index e28aed9e6bed4f..bdaa1ba4d10f79 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java @@ -226,8 +226,7 @@ public void testNodeJoinOnPendingOperation() throws Exception { ignitionStart(serverConfiguration(2), finishLatch); ignitionStart(serverConfiguration(3), finishLatch); - // TODO: https://issues.apache.org/jira/browse/IGNITE-13572 - awaitPartitionMapExchange(true, true, null); + awaitPartitionMapExchange(); assertFalse(tblFut.isDone()); @@ -332,8 +331,7 @@ public void testConcurrentRebalance() throws Exception { ignitionStart(serverConfiguration(4)); - // TODO: https://issues.apache.org/jira/browse/IGNITE-13572 - awaitPartitionMapExchange(true, true, null); + awaitPartitionMapExchange(); tblFut.get(); @@ -495,8 +493,7 @@ public void testConcurrentEnableIndexing() throws Exception { // Check that only one successful attempt. assertEquals(1, success.get()); - // TODO: https://issues.apache.org/jira/browse/IGNITE-13572 - awaitPartitionMapExchange(true, true, null); + awaitPartitionMapExchange(); for (Ignite g: G.allGrids()) { assertEquals(LARGE_NUM_ENTRIES, query(g, SELECT_ALL_QUERY).size()); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlPartitionEvictionTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlPartitionEvictionTest.java new file mode 100644 index 00000000000000..baeade3d386507 --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlPartitionEvictionTest.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.index; + +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.CacheWriteSynchronizationMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; +import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.concurrent.TimeUnit.SECONDS; + +/** */ +@RunWith(Parameterized.class) +public class SqlPartitionEvictionTest extends GridCommonAbstractTest { + /** */ + private static final String POI_CACHE_NAME = "POI_CACHE"; + + /** */ + private static final String POI_SCHEMA_NAME = "DOMAIN"; + + /** */ + private static final String POI_TABLE_NAME = "POI"; + + /** */ + private static final String POI_CLASS_NAME = "PointOfInterest"; + + /** */ + private static final String ID_FIELD_NAME = "id"; + + /** */ + private static final String NAME_FIELD_NAME = "name"; + + /** */ + private static final String LATITUDE_FIELD_NAME = "latitude"; + + /** */ + private static final String LONGITUDE_FIELD_NAME = "longitude"; + + /** */ + private static final int NUM_ENTITIES = 1_000; + + /** Test parameters. */ + @Parameterized.Parameters(name = "backups_count={0}") + public static Iterable params() { + return Arrays.asList( + new Object[] { 0 }, + new Object[] { 1 }, + new Object[] { 2 } + ); + } + + /** + * Number of partition backups. + */ + @Parameterized.Parameter + public int backupsCount; + + /** + * For awaiting of eviction start. + */ + private static final CountDownLatch LATCH = new CountDownLatch(1); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setCacheConfiguration(new CacheConfiguration<>(POI_CACHE_NAME) + .setAtomicityMode(CacheAtomicityMode.ATOMIC) + .setSqlSchema("DOMAIN") + .setQueryEntities(Collections.singletonList(queryEntity())) + .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC) + .setCacheMode(CacheMode.PARTITIONED) + .setBackups(backupsCount) + ); + + cfg.setActiveOnStart(true); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(true); + + super.afterTest(); + } + + /** + * Tests sql query result after eviction partitions. + */ + @Test + public void testSqlConsistencyOnEviction() throws Exception { + IgniteEx ig = null; + + int idx = 0; + while (idx <= backupsCount) + ig = ignitionStart(idx++); + + loadData(ig, 0, NUM_ENTITIES); + + ignitionStart(idx); + + awaitPartitionMapExchange(); + + U.await(LATCH, 10, SECONDS); + + for (Ignite g: G.allGrids()) + assertEquals(NUM_ENTITIES, query(g, "SELECT * FROM " + POI_TABLE_NAME).size()); + } + + /** */ + private void loadData(IgniteEx node, int start, int end) { + try (IgniteDataStreamer streamer = node.dataStreamer(POI_CACHE_NAME)) { + Random rnd = ThreadLocalRandom.current(); + + for (int i = start; i < end; i++) { + BinaryObject bo = node.binary().builder(POI_CLASS_NAME) + .setField(NAME_FIELD_NAME, "POI_" + i, String.class) + .setField(LATITUDE_FIELD_NAME, rnd.nextDouble(), Double.class) + .setField(LONGITUDE_FIELD_NAME, rnd.nextDouble(), Double.class) + .build(); + + streamer.addData(i, bo); + } + } + } + + /** */ + protected List> query(Ignite ig, String sql) { + IgniteCache cache = ig.cache(POI_CACHE_NAME).withKeepBinary(); + + return cache.query(new SqlFieldsQuery(sql).setSchema(POI_SCHEMA_NAME)).getAll(); + } + + /** */ + private QueryEntity queryEntity() { + LinkedHashMap fields = new LinkedHashMap<>(); + fields.put(ID_FIELD_NAME, Integer.class.getName()); + fields.put(NAME_FIELD_NAME, String.class.getName()); + fields.put(LATITUDE_FIELD_NAME, Double.class.getName()); + fields.put(LONGITUDE_FIELD_NAME, Double.class.getName()); + + return new QueryEntity() + .setKeyType(Integer.class.getName()) + .setKeyFieldName(ID_FIELD_NAME) + .setValueType(POI_CLASS_NAME) + .setTableName(POI_TABLE_NAME) + .setFields(fields); + } + + /** */ + private IgniteEx ignitionStart(int idx) throws Exception { + GridQueryProcessor.idxCls = BlockingIndexing.class; + + IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(idx)); + + return startGrid(cfg); + } + + /** + * Blocking indexing processor. + */ + private static class BlockingIndexing extends IgniteH2Indexing { + @Override public void remove(GridCacheContext cctx, GridQueryTypeDescriptor type, + CacheDataRow row) throws IgniteCheckedException { + U.sleep(50); + + LATCH.countDown(); + + super.remove(cctx, type, row); + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java index b0ad0580515357..eb87a1d563c932 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java @@ -380,7 +380,7 @@ public void testSchemas() throws Exception { Set schemaFromSysView = new HashSet<>(); - schemasSysView.forEach(v -> schemaFromSysView.add(v.name())); + schemasSysView.forEach(v -> schemaFromSysView.add(v.schemaName())); HashSet expSchemas = new HashSet<>(asList("MY_SCHEMA", "ANOTHER_SCHEMA", "SYS", "PUBLIC")); @@ -453,20 +453,22 @@ public void testTable() throws Exception { assertEquals(1, res.size()); - List tbl = res.get(0); + List tbl = res.get(0); int cacheId = cacheId("SQL_PUBLIC_T1"); String cacheName = "SQL_PUBLIC_T1"; - assertEquals("T1", tbl.get(0)); // TABLE_NAME - assertEquals(DFLT_SCHEMA, tbl.get(1)); // SCHEMA_NAME - assertEquals(cacheName, tbl.get(2)); // CACHE_NAME - assertEquals(cacheId, tbl.get(3)); // CACHE_ID - assertNull(tbl.get(4)); // AFFINITY_KEY_COLUMN - assertEquals("ID", tbl.get(5)); // KEY_ALIAS - assertNull(tbl.get(6)); // VALUE_ALIAS - assertEquals("java.lang.Long", tbl.get(7)); // KEY_TYPE_NAME - assertNotNull(tbl.get(8)); // VALUE_TYPE_NAME + assertEquals(cacheId, tbl.get(0)); // CACHE_GROUP_ID + assertEquals(cacheName, tbl.get(1)); // CACHE_GROUP_NAME + assertEquals(cacheId, tbl.get(2)); // CACHE_ID + assertEquals(cacheName, tbl.get(3)); // CACHE_NAME + assertEquals(DFLT_SCHEMA, tbl.get(4)); // SCHEMA_NAME + assertEquals("T1", tbl.get(5)); // TABLE_NAME + assertNull(tbl.get(6)); // AFFINITY_KEY_COLUMN + assertEquals("ID", tbl.get(7)); // KEY_ALIAS + assertNull(tbl.get(8)); // VALUE_ALIAS + assertEquals("java.lang.Long", tbl.get(9)); // KEY_TYPE_NAME + assertNotNull(tbl.get(10)); // VALUE_TYPE_NAME execute(ignite0, "CREATE TABLE T2(ID LONG PRIMARY KEY, NAME VARCHAR)"); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java index 87f2c6ae7a9bf2..fb6241760f51e6 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java @@ -31,7 +31,6 @@ import org.apache.ignite.internal.IgniteNodeAttributes; import org.apache.ignite.internal.TestRecordingCommunicationSpi; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse; -import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.transactions.Transaction; @@ -283,8 +282,6 @@ public void testStartLastServerFails() throws Exception { } }, "start-cache"); - U.sleep(1000); - assertFalse(fut.isDone()); stopGrid(1); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsIndexingDefragmentationTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsIndexingDefragmentationTest.java new file mode 100644 index 00000000000000..bbb69ae3c7670e --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsIndexingDefragmentationTest.java @@ -0,0 +1,316 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.io.File; +import java.util.Collections; +import java.util.function.Function; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheUpdateSqlQuerySelfTest; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.verify.ValidateIndexesClosure; +import org.apache.ignite.internal.visor.verify.VisorValidateIndexesJobResult; +import org.apache.ignite.testframework.junits.WithSystemProperty; +import org.junit.Test; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; + +/** + * Defragmentation tests with enabled ignite-indexing. + */ +public class IgnitePdsIndexingDefragmentationTest extends IgnitePdsDefragmentationTest { + /** Use MVCC in tests. */ + private static final String USE_MVCC = "USE_MVCC"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration dsCfg = new DataStorageConfiguration(); + dsCfg.setWalSegmentSize(4 * 1024 * 1024); + + dsCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setInitialSize(100L * 1024 * 1024) + .setMaxSize(1024L * 1024 * 1024) + .setPersistenceEnabled(true) + ); + + cfg.setDataStorageConfiguration(dsCfg); + + CacheConfiguration cache1Cfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setIndexedTypes( + IgniteCacheUpdateSqlQuerySelfTest.AllTypes.class, byte[].class, + Integer.class, byte[].class + ) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + CacheConfiguration cache2Cfg = new CacheConfiguration<>(CACHE_2_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setIndexedTypes( + IgniteCacheUpdateSqlQuerySelfTest.AllTypes.class, byte[].class, + Integer.class, byte[].class + ) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + if (Boolean.TRUE.toString().equals(System.getProperty(USE_MVCC))) { + cache1Cfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT); + cache2Cfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT); + } else + cache2Cfg.setExpiryPolicyFactory(new PolicyFactory()); + + cfg.setCacheConfiguration(cache1Cfg, cache2Cfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + GridQueryProcessor.idxCls = null; + } + + /** + * Fill cache, remove half of the entries, defragmentate PDS and check index. + * + * @param keyMapper Function that provides key based on the index of entry. + * @param Type of cache key. + * + * @throws Exception If failed. + */ + private void test(Function keyMapper) throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(keyMapper, ig.cache(DEFAULT_CACHE_NAME)); + + forceCheckpoint(ig); + + createMaintenanceRecord(); + + stopGrid(0); + + File dbWorkDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); + File nodeWorkDir = new File(dbWorkDir, U.maskForFileName(ig.name())); + File workDir = new File(nodeWorkDir, FilePageStoreManager.CACHE_GRP_DIR_PREFIX + GRP_NAME); + + long oldIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + startGrid(0); + + long newIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + assertTrue(newIdxFileLen <= oldIdxFileLen); + + File completionMarkerFile = DefragmentationFileUtils.defragmentationCompletionMarkerFile(workDir); + assertTrue(completionMarkerFile.exists()); + + stopGrid(0); + + GridQueryProcessor.idxCls = CaptureRebuildGridQueryIndexing.class; + + IgniteEx node = startGrid(0); + + awaitPartitionMapExchange(); + + CaptureRebuildGridQueryIndexing indexing = (CaptureRebuildGridQueryIndexing) node.context().query().getIndexing(); + + assertFalse(indexing.didRebuildIndexes()); + + IgniteCache cache = node.cache(DEFAULT_CACHE_NAME); + + assertFalse(completionMarkerFile.exists()); + + validateIndexes(node); + + for (int k = 0; k < ADDED_KEYS_COUNT; k++) + cache.get(keyMapper.apply(k)); + } + + /** + * Test that indexes are correct. + * + * @param node Node. + * @throws Exception If failed. + */ + private static void validateIndexes(IgniteEx node) throws Exception { + ValidateIndexesClosure clo = new ValidateIndexesClosure( + Collections.singleton(DEFAULT_CACHE_NAME), + 0, + 0, + false, + true + ); + + node.context().resource().injectGeneric(clo); + + VisorValidateIndexesJobResult call = clo.call(); + + assertFalse(call.hasIssues()); + } + + /** + * Test using integer keys. + * + * @throws Exception If failed. + */ + @Test + public void testIndexingWithIntegerKey() throws Exception { + test(Function.identity()); + } + + /** + * Test using complex keys (integer and string). + * + * @throws Exception If failed. + */ + @Test + public void testIndexingWithComplexKey() throws Exception { + test(integer -> new IgniteCacheUpdateSqlQuerySelfTest.AllTypes((long)integer)); + } + + /** + * Test using integer keys. + * + * @throws Exception If failed. + */ + @Test + @WithSystemProperty(key = USE_MVCC, value = "true") + public void testIndexingWithIntegerKeyAndMVCC() throws Exception { + test(Function.identity()); + } + + /** + * Test using complex keys (integer and string). + * + * @throws Exception If failed. + */ + @Test + @WithSystemProperty(key = USE_MVCC, value = "true") + public void testIndexingWithComplexKeyAndMVCC() throws Exception { + test(integer -> new IgniteCacheUpdateSqlQuerySelfTest.AllTypes((long)integer)); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testMultipleIndexes() throws Exception { + startGrid(0).cluster().state(ClusterState.ACTIVE); + + IgniteCache cache = grid(0).cache(DEFAULT_CACHE_NAME); + + cache.query(new SqlFieldsQuery("CREATE TABLE TEST (ID INT PRIMARY KEY, VAL_INT INT, VAL_OBJ LONG)")); + + cache.query(new SqlFieldsQuery("CREATE INDEX TEST_VAL_INT ON TEST(VAL_INT)")); + + cache.query(new SqlFieldsQuery("CREATE INDEX TEST_VAL_OBJ ON TEST(VAL_OBJ)")); + + for (int i = 0; i < ADDED_KEYS_COUNT; i++) + cache.query(new SqlFieldsQuery("INSERT INTO TEST VALUES (?, ?, ?)").setArgs(i, i, (long)i)); + + cache.query(new SqlFieldsQuery("DELETE FROM TEST WHERE MOD(ID, 2) = 0")); + + createMaintenanceRecord(); + + // Restart first time. + stopGrid(0); + + startGrid(0); + + // Restart second time. + stopGrid(0); + + startGrid(0); + + // Reinit cache object. + cache = grid(0).cache(DEFAULT_CACHE_NAME); + + assertTrue(explainQuery(cache, "EXPLAIN SELECT * FROM TEST WHERE ID > 0").contains("_key_pk_proxy")); + + cache.query(new SqlFieldsQuery("SELECT * FROM TEST WHERE ID > 0")).getAll(); + + assertTrue(explainQuery(cache, "EXPLAIN SELECT * FROM TEST WHERE VAL_INT > 0").contains("test_val_int")); + + cache.query(new SqlFieldsQuery("SELECT * FROM TEST WHERE VAL_INT > 0")).getAll(); + + assertTrue(explainQuery(cache, "EXPLAIN SELECT * FROM TEST WHERE VAL_OBJ > 0").contains("test_val_obj")); + + cache.query(new SqlFieldsQuery("SELECT * FROM TEST WHERE VAL_OBJ > 0")).getAll(); + } + + /** */ + private static String explainQuery(IgniteCache cache, String qry) { + return cache + .query(new SqlFieldsQuery(qry)) + .getAll() + .get(0) + .get(0) + .toString() + .toLowerCase(); + } + + /** + * IgniteH2Indexing that captures index rebuild operations. + */ + public static class CaptureRebuildGridQueryIndexing extends IgniteH2Indexing { + /** + * Whether index rebuild happened. + */ + private boolean rebuiltIndexes; + + /** {@inheritDoc} */ + @Override public IgniteInternalFuture rebuildIndexesFromHash(GridCacheContext cctx) { + IgniteInternalFuture future = super.rebuildIndexesFromHash(cctx); + rebuiltIndexes = future != null; + return future; + } + + /** + * Get index rebuild flag. + * + * @return Whether index rebuild happened. + */ + public boolean didRebuildIndexes() { + return rebuiltIndexes; + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java index ca966840565273..1f03ef4d4cc4e1 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java @@ -60,6 +60,7 @@ import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadOnlyMetastorage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadWriteMetastorage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.pendingtask.DurableBackgroundTask; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.failure.FailureProcessor; @@ -758,7 +759,8 @@ public H2TreeTest( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { super( cctx, @@ -786,7 +788,8 @@ public H2TreeTest( log, stats, factory, - configuredInlineSize + configuredInlineSize, + pageIoRslvr ); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java index 9e0f93a1a69951..ed90b66507c415 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.failure.FailureProcessor; @@ -266,7 +267,8 @@ public H2TreeTest( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { super( cctx, @@ -294,7 +296,8 @@ public H2TreeTest( log, stats, factory, - configuredInlineSize + configuredInlineSize, + pageIoRslvr ); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java index 4f6663d4a67afa..a263df877f85f7 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java @@ -154,7 +154,7 @@ public void testClusterSnapshotConsistentConfig() throws Exception { IgniteEx snp = startGridsFromSnapshot(grids, SNAPSHOT_NAME); List currIdxNames = executeSql(snp, "SELECT * FROM SYS.INDEXES").stream(). - map(l -> (String)l.get(0)) + map(l -> (String)l.get(6)) .collect(Collectors.toList()); assertTrue("Concurrently created indexes must not exist in the snapshot: " + currIdxNames, diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java new file mode 100644 index 00000000000000..9459134a3808fd --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.IgniteEx; + +/** + * {@link ScanQueryConcurrentUpdatesAbstractTest} with caches created, updates and destroyed using SQL DDL queries. + */ +public class ScanQueryConcurrentSqlUpdatesTest extends ScanQueryConcurrentUpdatesAbstractTest { + /** + * A name for a cache that will be used to execute DDL queries. + */ + private static final String DUMMY_CACHE_NAME = "dummy"; + + /** {@inheritDoc} */ + @Override protected IgniteCache createCache(String cacheName, CacheMode cacheMode, + Duration expiration) { + CacheConfiguration cacheCfg = new CacheConfiguration<>(cacheName); + cacheCfg.setCacheMode(cacheMode); + if (expiration != null) { + cacheCfg.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(expiration)); + cacheCfg.setEagerTtl(true); + } + + IgniteEx ignite = grid(0); + ignite.addCacheConfiguration(cacheCfg); + + ignite.getOrCreateCache(DUMMY_CACHE_NAME).query(new SqlFieldsQuery("CREATE TABLE " + cacheName + " " + + "(key int primary key, val int) " + + "WITH \"template=" + cacheName + ",wrap_value=false\"")); + + return ignite.cache("SQL_PUBLIC_" + cacheName.toUpperCase()); + } + + /** {@inheritDoc} */ + @Override protected void updateCache(IgniteCache cache, int recordsNum) { + String tblName = tableName(cache); + + for (int i = 0; i < recordsNum; i++) { + cache.query(new SqlFieldsQuery( + "INSERT INTO " + tblName + " (key, val) " + + "VALUES (" + i + ", " + i + ")")); + } + } + + /** {@inheritDoc} */ + @Override protected void destroyCache(IgniteCache cache) { + grid(0).cache(DUMMY_CACHE_NAME).query(new SqlFieldsQuery("DROP TABLE " + tableName(cache))); + } + + /** + * @param cache Cache to determine a table name for. + * @return Name of the table corresponding to the provided cache. + */ + @SuppressWarnings("unchecked") + private String tableName(IgniteCache cache) { + CacheConfiguration cacheCfg = + (CacheConfiguration) cache.getConfiguration(CacheConfiguration.class); + QueryEntity qe = cacheCfg.getQueryEntities().iterator().next(); + + return qe.getTableName(); + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/RebuildIndexWithMVCCTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/RebuildIndexWithMVCCTest.java new file mode 100644 index 00000000000000..f5c2440acd98cf --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/RebuildIndexWithMVCCTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.database; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; + +/** + * Test index rebuild with MVCC enabled. + */ +public class RebuildIndexWithMVCCTest extends RebuildIndexTest { + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + final IgniteConfiguration configuration = super.getConfiguration(gridName); + + for (CacheConfiguration cacheConfiguration : configuration.getCacheConfiguration()) + cacheConfiguration.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT); + + return configuration; + } + +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LazyOnDmlTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LazyOnDmlTest.java new file mode 100644 index 00000000000000..bed2178aa1c9fe --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LazyOnDmlTest.java @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.ignite.internal.processors.query; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.binary.BinaryObjectBuilder; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.QueryIndex; +import org.apache.ignite.cache.QueryIndexType; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.FieldsQueryCursor; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.index.AbstractIndexingCommonTest; +import org.apache.ignite.internal.processors.query.h2.H2PooledConnection; +import org.apache.ignite.internal.processors.query.h2.H2QueryInfo; +import org.apache.ignite.internal.processors.query.h2.H2Utils; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.jetbrains.annotations.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests for lazy mode for DML queries. + */ +@RunWith(Parameterized.class) +public class LazyOnDmlTest extends AbstractIndexingCommonTest { + /** Keys count. */ + private static final int KEY_CNT = 3_000; + + /** */ + @Parameterized.Parameter + public CacheAtomicityMode atomicityMode; + + /** */ + @Parameterized.Parameter(1) + public CacheMode cacheMode; + + /** + * @return Test parameters. + */ + @Parameterized.Parameters(name = "atomicityMode={0}, cacheMode={1}") + public static Collection parameters() { + Set paramsSet = new LinkedHashSet<>(); + + Object[] paramTemplate = new Object[2]; + + for (CacheAtomicityMode atomicityMode : CacheAtomicityMode.values()) { + paramTemplate = Arrays.copyOf(paramTemplate, paramTemplate.length); + + paramTemplate[0] = atomicityMode; + + for (CacheMode cacheMode : new CacheMode[] {CacheMode.PARTITIONED, CacheMode.REPLICATED}) { + Object[] params = Arrays.copyOf(paramTemplate, paramTemplate.length); + + params[1] = cacheMode; + + paramsSet.add(params); + } + } + + return paramsSet; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + GridQueryProcessor.idxCls = CheckLazyIndexing.class; + + startGrids(3); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + + super.afterTestsStopped(); + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + IgniteCache c = grid(0).createCache(new CacheConfiguration() + .setName("test") + .setSqlSchema("TEST") + .setAtomicityMode(atomicityMode) + .setCacheMode(cacheMode) + .setQueryEntities(Collections.singleton(new QueryEntity(Long.class.getName(), "testVal") + .setTableName("test") + .addQueryField("id", Long.class.getName(), null) + .addQueryField("val0", Long.class.getName(), null) + .addQueryField("val1", Long.class.getName(), null) + .addQueryField("val2", Long.class.getName(), null) + .setKeyFieldName("id") + .setIndexes(Collections.singletonList( + new QueryIndex(Arrays.asList("val0", "val1"), QueryIndexType.SORTED) + )) + )) + .setBackups(1) + .setAffinity(new RendezvousAffinityFunction(false, 10))); + + try (IgniteDataStreamer streamer = grid(0).dataStreamer("test")) { + for (long i = 0; i < KEY_CNT; ++i) { + BinaryObjectBuilder bob = grid(0).binary().builder("testVal"); + + bob.setField("val0", i); + bob.setField("val1", i); + bob.setField("val2", i); + + streamer.addData(i, bob.build()); + } + } + + sql("CREATE TABLE table1 (id INT PRIMARY KEY, col0 INT, col1 VARCHAR (100))"); + + sql("INSERT INTO table1 (id, col0, col1) " + + "SELECT 1, 11, 'FIRST' " + + "UNION ALL " + + "SELECT 11,12, 'SECOND' " + + "UNION ALL " + + "SELECT 21, 13, 'THIRD' " + + "UNION ALL " + + "SELECT 31, 14, 'FOURTH'"); + + sql("CREATE TABLE table2 (id INT PRIMARY KEY, col0 INT, col1 VARCHAR (100))"); + + sql("INSERT INTO table2 (id, col0, col1) " + + "SELECT 1, 21, 'TWO-ONE' " + + "UNION ALL " + + "SELECT 11, 22, 'TWO-TWO' " + + "UNION ALL " + + "SELECT 21, 23, 'TWO-THREE' " + + "UNION ALL " + + "SELECT 31, 24, 'TWO-FOUR'"); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + for (String cache : grid(0).cacheNames()) + grid(0).cache(cache).destroy(); + + super.afterTest(); + } + + /** + */ + @Test + public void testUpdateNotLazy() throws Exception { + checkUpdateNotLazy("UPDATE test SET val0 = val0 + 1 WHERE val0 >= 0"); + checkUpdateNotLazy("UPDATE test SET val1 = val1 + 1 WHERE val0 >= 0"); + } + + /** + */ + public void checkUpdateNotLazy(String sql) throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(atomicityMode == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT)) { + List> res = sql(sql).getAll(); + + // Check that all rows updates only ones. + assertEquals((long)KEY_CNT, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testUpdateLazy() throws Exception { + checkUpdateLazy("UPDATE test SET val0 = val0 + 1"); + checkUpdateLazy("UPDATE test SET val2 = val2 + 1 WHERE val2 >= 0"); + checkUpdateLazy("UPDATE test SET val0 = val0 + 1 WHERE val1 >= 0"); + } + + /** + */ + public void checkUpdateLazy(String sql) throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(true)) { + List> res = sql(sql).getAll(); + + // Check that all rows updates only ones. + assertEquals((long)KEY_CNT, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testDeleteWithoutReduce() throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(true)) { + List> res = sql("DELETE FROM test WHERE val0 >= 0").getAll(); + + assertEquals((long)KEY_CNT, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testUpdateFromSubqueryLazy() throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(true)) { + List> res; + + res = sql("UPDATE table1 " + + "SET (col0, col1) = " + + " (SELECT table2.col0, table2.col1 FROM table2 WHERE table2.id = table1.id)" + + "WHERE table1.id in (21, 31)").getAll(); + + assertEquals(2L, res.get(0).get(0)); + + res = sql("UPDATE table1 " + + "SET (col0, col1) = " + + " (SELECT table2.col0, table2.col1 FROM table2 WHERE table2.id = table1.id) " + + "WHERE exists (select * from table2 where table2.id = table1.id) " + + "AND table1.id in (21, 31)").getAll(); + + assertEquals(2L, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testUpdateValueField() throws Exception { + sql("CREATE TABLE TEST2 (id INT PRIMARY KEY, val INT) " + + "WITH\"WRAP_VALUE=false\""); + + sql("INSERT INTO TEST2 VALUES (0, 0), (1, 1), (2, 2)"); + + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(false)) { + // 'val' field is the alias for _val. There is index for _val. + List> res = sql("UPDATE TEST2 SET _val = _val + 1 WHERE val >=0").getAll(); + + assertEquals(3L, res.get(0).get(0)); + } + } + + /** + * @param sql SQL query. + * @param args Query parameters. + * @return Results cursor. + */ + private FieldsQueryCursor> sql(String sql, Object... args) { + return sql(grid(0), sql, args); + } + + /** + * @param ign Node. + * @param sql SQL query. + * @param args Query parameters. + * @return Results cursor. + */ + private FieldsQueryCursor> sql(IgniteEx ign, String sql, Object... args) { + return ign.context().query().querySqlFields(new SqlFieldsQuery(sql) + .setLazy(true) + .setSchema("TEST") + .setPageSize(1) + .setArgs(args), false); + } + + /** */ + private static class CheckLazyIndexing extends IgniteH2Indexing { + /** */ + private static Boolean expectedLazy; + + /** */ + private static int qryCnt; + + /** {@inheritDoc} */ + @Override public ResultSet executeSqlQueryWithTimer(PreparedStatement stmt, H2PooledConnection conn, String sql, + int timeoutMillis, @Nullable GridQueryCancel cancel, Boolean dataPageScanEnabled, + H2QueryInfo qryInfo) throws IgniteCheckedException { + if (expectedLazy != null) { + assertEquals( + "Unexpected lazy flag [sql=" + sql + ']', + (boolean)expectedLazy, + H2Utils.session(conn.connection()).isLazyQueryExecution() + ); + } + + qryCnt++; + + return super.executeSqlQueryWithTimer(stmt, conn, sql, timeoutMillis, cancel, dataPageScanEnabled, qryInfo); + } + + /** */ + public static AutoCloseable checkLazy(boolean expLazy) { + expectedLazy = expLazy; + + return () -> { + assertTrue("Lazy checker doesn't work properly", CheckLazyIndexing.qryCnt > 0); + + expectedLazy = null; + qryCnt = 0; + }; + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java index 5167651b66bd9f..a037e19a040583 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java @@ -280,48 +280,46 @@ public void testIndexesView() throws Exception { //ToDo: As of now we can see duplicates columns within index due to https://issues.apache.org/jira/browse/IGNITE-11125 - String[][] expectedResults = { - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID2\" ASC, \"ID1\" ASC", "false", "false", "10"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "10"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID2\" ASC", "true", "true", "0"}, - - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "IDX_2", "BTREE", "\"ID\" DESC, \"ID\" ASC", "false", "false", "13"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "IDX_2_proxy", "BTREE", "\"ID\" DESC, \"ID\" ASC", "false", "false", "0"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK", "BTREE", "\"ID\" ASC", "true", "true", "5"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK_hash", "HASH", "\"ID\" ASC", "true", "true", "0"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK_proxy", "BTREE", "\"ID\" ASC", "false", "false", "0"}, - - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "false", "false", "10"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "IDX_AFF_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC", "false", "false", "10"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "10"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC", "true", "true", "0"}, - - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC, \"ID1\" ASC, \"ID2\" ASC", "false", "false", "10"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_3", "BTREE", "\"MY_VAL\" ASC, \"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC, \"ID2\" ASC", "false", "false", "10"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "10"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "0"}, - - {"2584860", "TST1", "TST1", "VALUECLASS", "TST1_INDEX", "BTREE", "\"KEY\" ASC, \"_KEY\" ASC", "false", "false", "10"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "TST1_INDEX_proxy", "BTREE", "\"_KEY\" ASC, \"KEY\" ASC", "false", "false", "0"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK", "BTREE", "\"_KEY\" ASC", "true", "true", "5"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK_hash", "HASH", "\"_KEY\" ASC", "true", "true", "0"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK_proxy", "BTREE", "\"KEY\" ASC", "false", "false", "0"} + Object[][] expectedResults = { + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID2\" ASC, \"ID1\" ASC", false, false, 10}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", true, true, 10}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID2\" ASC", false, true, null}, + + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "IDX_2", "BTREE", "\"ID\" DESC, \"ID\" ASC", false, false, 13}, + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "__SCAN_", "SCAN", null, false, false, null}, + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK", "BTREE", "\"ID\" ASC", true, true, 5}, + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK_hash", "HASH", "\"ID\" ASC", false, true, null}, + + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", false, false, 10}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "IDX_AFF_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC", false, false, 10}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", true, true, 10}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC", false, true, null}, + + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC, \"ID1\" ASC, \"ID2\" ASC", false, false, 10}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_3", "BTREE", "\"MY_VAL\" ASC, \"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC, \"ID2\" ASC", false, false, 10}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", true, true, 10}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC", false, true, null}, + + {2584860, "TST1", "TST1", "VALUECLASS", "TST1_INDEX", "BTREE", "\"KEY\" ASC, \"_KEY\" ASC", false, false, 10}, + {2584860, "TST1", "TST1", "VALUECLASS", "__SCAN_", "SCAN", null, false, false, null}, + {2584860, "TST1", "TST1", "VALUECLASS", "_key_PK", "BTREE", "\"_KEY\" ASC", true, true, 5}, + {2584860, "TST1", "TST1", "VALUECLASS", "_key_PK_hash", "HASH", "\"_KEY\" ASC", false, true, null}, }; + assertEquals(expectedResults.length, srvNodeIndexes.size()); + for (int i = 0; i < srvNodeIndexes.size(); i++) { List resRow = srvNodeIndexes.get(i); - String[] expRow = expectedResults[i]; + Object[] expRow = expectedResults[i]; assertEquals(expRow.length, resRow.size()); for (int j = 0; j < expRow.length; j++) - assertEquals(Integer.toString(i), expRow[j], String.valueOf(resRow.get(j))); + assertEquals(expRow[j], resRow.get(j)); } } @@ -1067,10 +1065,12 @@ public void testTablesView() throws Exception { "TABLE_NAME = 'CACHE_SQL'"); List expRow = asList( - "CACHE_SQL", // TABLE_NAME - "PUBLIC", // SCHEMA_NAME - "cache_sql", // CACHE_NAME + cacheSqlId, // CACHE_GROUP_ID + "cache_sql", // CACHE_GROUP_NAME cacheSqlId, // CACHE_ID + "cache_sql", // CACHE_NAME + "PUBLIC", // SCHEMA_NAME + "CACHE_SQL", // TABLE_NAME null, // AFFINITY_KEY_COLUMN "ID", // KEY_ALIAS null, // VALUE_ALIAS @@ -1089,10 +1089,12 @@ public void testTablesView() throws Exception { List allExpRows = asList( expRow, asList( - "DFLT_CACHE", // TABLE_NAME - "PUBLIC", // SCHEMA_NAME - "SQL_PUBLIC_DFLT_CACHE", // CACHE_NAME + ddlTabId, // CACHE_GROUP_ID + "SQL_PUBLIC_DFLT_CACHE", // CACHE_GROUP_NAME ddlTabId, // CACHE_ID + "SQL_PUBLIC_DFLT_CACHE", // CACHE_NAME + "PUBLIC", // SCHEMA_NAME + "DFLT_CACHE", // TABLE_NAME "ID2", // AFFINITY_KEY_COLUMN null, // KEY_ALIAS "MY_VAL", // VALUE_ALIAS diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java index 09a2abf0c3f19b..ab6d67ca5c11fc 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java @@ -168,6 +168,7 @@ import org.apache.ignite.internal.processors.cache.index.OptimizedMarshallerIndexNameTest; import org.apache.ignite.internal.processors.cache.index.QueryEntityValidationSelfTest; import org.apache.ignite.internal.processors.cache.index.SchemaExchangeSelfTest; +import org.apache.ignite.internal.processors.cache.index.SqlPartitionEvictionTest; import org.apache.ignite.internal.processors.cache.index.SqlTransactionCommandsWithMvccDisabledSelfTest; import org.apache.ignite.internal.processors.cache.index.StopNodeOnRebuildIndexFailureTest; import org.apache.ignite.internal.processors.cache.local.IgniteCacheLocalAtomicQuerySelfTest; @@ -183,6 +184,8 @@ import org.apache.ignite.internal.processors.cache.query.IndexingSpiQuerySelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryTxSelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryWithH2IndexingSelfTest; +import org.apache.ignite.internal.processors.cache.query.ScanQueryConcurrentSqlUpdatesTest; +import org.apache.ignite.internal.processors.cache.query.ScanQueryConcurrentUpdatesTest; import org.apache.ignite.internal.processors.cache.transaction.DmlInsideTransactionTest; import org.apache.ignite.internal.processors.client.ClientConnectorConfigurationValidationSelfTest; import org.apache.ignite.internal.processors.database.baseline.IgniteStableBaselineBinObjFieldsQuerySelfTest; @@ -536,6 +539,8 @@ IgniteCheckClusterStateBeforeExecuteQueryTest.class, OptimizedMarshallerIndexNameTest.class, SqlSystemViewsSelfTest.class, + ScanQueryConcurrentUpdatesTest.class, + ScanQueryConcurrentSqlUpdatesTest.class, GridIndexRebuildSelfTest.class, GridIndexRebuildTest.class, @@ -575,6 +580,9 @@ // Partition loss. IndexingCachePartitionLossPolicySelfTest.class, + // Partitions eviction + SqlPartitionEvictionTest.class, + // GROUP_CONCAT IgniteSqlGroupConcatCollocatedTest.class, IgniteSqlGroupConcatNotCollocatedTest.class, diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java index 2022fce59e26d4..6676e470bcd54e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java @@ -63,6 +63,7 @@ import org.apache.ignite.internal.processors.query.IgniteCacheGroupsSqlSegmentedIndexMultiNodeSelfTest; import org.apache.ignite.internal.processors.query.IgniteCacheGroupsSqlSegmentedIndexSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlCreateTableTemplateTest; +import org.apache.ignite.internal.processors.query.LazyOnDmlTest; import org.apache.ignite.internal.processors.query.LocalQueryLazyTest; import org.apache.ignite.internal.processors.query.LongRunningQueryTest; import org.apache.ignite.internal.processors.query.SqlIndexConsistencyAfterInterruptAtomicCacheOperationTest; @@ -92,6 +93,8 @@ */ @RunWith(Suite.class) @Suite.SuiteClasses({ + LazyOnDmlTest.class, + DefaultQueryTimeoutTestSuite.class, CreateIndexOnInvalidDataTypeTest.class, diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java index c4b8d0f42015f5..d018457b3abded 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java @@ -17,9 +17,11 @@ package org.apache.ignite.testsuites; +import org.apache.ignite.internal.encryption.CacheGroupReencryptionTest; import org.apache.ignite.internal.processors.cache.IgnitePdsSingleNodeWithIndexingAndGroupPutGetPersistenceSelfTest; import org.apache.ignite.internal.processors.cache.IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest; import org.apache.ignite.internal.processors.cache.index.ClientReconnectWithSqlTableConfiguredTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsIndexingDefragmentationTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgniteTcBotInitNewPageTest; import org.apache.ignite.internal.processors.cache.persistence.db.IndexingMultithreadedLoadContinuousRestartTest; import org.apache.ignite.internal.processors.cache.persistence.db.LongDestroyDurableBackgroundTaskTest; @@ -33,6 +35,7 @@ import org.apache.ignite.internal.processors.database.IgniteTwoRegionsRebuildIndexTest; import org.apache.ignite.internal.processors.database.RebuildIndexTest; import org.apache.ignite.internal.processors.database.RebuildIndexWithHistoricalRebalanceTest; +import org.apache.ignite.internal.processors.database.RebuildIndexWithMVCCTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -52,9 +55,12 @@ IndexingMultithreadedLoadContinuousRestartTest.class, LongDestroyDurableBackgroundTaskTest.class, RebuildIndexTest.class, + RebuildIndexWithMVCCTest.class, IgniteClusterSnapshotWithIndexesTest.class, ClientReconnectWithSqlTableConfiguredTest.class, - MultipleParallelCacheDeleteDeadlockTest.class + MultipleParallelCacheDeleteDeadlockTest.class, + CacheGroupReencryptionTest.class, + IgnitePdsIndexingDefragmentationTest.class }) public class IgnitePdsWithIndexingTestSuite { } diff --git a/modules/ml/pom.xml b/modules/ml/pom.xml index 338d2542e82698..ad9f8dcf0945e9 100644 --- a/modules/ml/pom.xml +++ b/modules/ml/pom.xml @@ -160,6 +160,11 @@ slf4j-api 1.7.7 + + com.fasterxml.jackson.core + jackson-databind + ${jackson.version} + diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java index 8d349a197fa3f3..373da3af67faa0 100644 --- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java +++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.NavigableMap; import java.util.Scanner; import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; @@ -34,7 +33,7 @@ import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.clustering.kmeans.KMeansModel; import org.apache.ignite.ml.composition.ModelsComposition; -import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.GDBModel; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator; import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator; @@ -49,9 +48,7 @@ import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; import org.apache.ignite.ml.regressions.logistic.LogisticRegressionModel; import org.apache.ignite.ml.svm.SVMLinearClassificationModel; -import org.apache.ignite.ml.tree.DecisionTreeConditionalNode; -import org.apache.ignite.ml.tree.DecisionTreeLeafNode; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.NodeData; import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.SimpleGroup; @@ -66,6 +63,8 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.ml.tree.NodeData.buildDecisionTreeModel; + /** Parser of Spark models. */ public class SparkModelParser { /** @@ -497,7 +496,7 @@ private static Model loadGBTClassifierModel(String pathToMdl, String pathToMdlMe final List> models = new ArrayList<>(); nodesByTreeId.forEach((key, nodes) -> models.add(buildDecisionTreeModel(nodes))); - return new GDBTrainer.GDBModel(models, new WeightedPredictionsAggregator(treeWeights), lbMapper); + return new GDBModel(models, new WeightedPredictionsAggregator(treeWeights), lbMapper); } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); @@ -604,42 +603,13 @@ private static Model loadDecisionTreeModel(String pathToMdl, LearningEnvironment return null; } - /** - * Builds the DT model by the given sorted map of nodes. - * - * @param nodes The sorted map of nodes. - */ - private static DecisionTreeNode buildDecisionTreeModel(Map nodes) { - DecisionTreeNode mdl = null; - if (!nodes.isEmpty()) { - NodeData rootNodeData = (NodeData)((NavigableMap)nodes).firstEntry().getValue(); - mdl = buildTree(nodes, rootNodeData); - return mdl; - } - return mdl; - } - - /** - * Build tree or sub-tree based on indices and nodes sorted map as a dictionary. - * - * @param nodes The sorted map of nodes. - * @param rootNodeData Root node data. - */ - @NotNull private static DecisionTreeNode buildTree(Map nodes, - NodeData rootNodeData) { - return rootNodeData.isLeafNode ? new DecisionTreeLeafNode(rootNodeData.prediction) : new DecisionTreeConditionalNode(rootNodeData.featureIdx, - rootNodeData.threshold, - buildTree(nodes, nodes.get(rootNodeData.rightChildId)), - buildTree(nodes, nodes.get(rootNodeData.leftChildId)), - null); - } /** * Form the node data according data in parquet row. * * @param g The given group presenting the node data from Spark DT model. */ - @NotNull private static SparkModelParser.NodeData extractNodeDataFromParquetRow(SimpleGroup g) { + @NotNull private static NodeData extractNodeDataFromParquetRow(SimpleGroup g) { NodeData nodeData = new NodeData(); nodeData.id = g.getInteger(0, 0); @@ -888,43 +858,4 @@ private static Vector readCoefficients(SimpleGroup g) { } return coefficients; } - - /** - * Presenting data from one parquet row filled with NodeData in Spark DT model. - */ - private static class NodeData { - /** Id. */ - int id; - - /** Prediction. */ - double prediction; - - /** Left child id. */ - int leftChildId; - - /** Right child id. */ - int rightChildId; - - /** Threshold. */ - double threshold; - - /** Feature index. */ - int featureIdx; - - /** Is leaf node. */ - boolean isLeafNode; - - /** {@inheritDoc} */ - @Override public String toString() { - return "NodeData{" + - "id=" + id + - ", prediction=" + prediction + - ", leftChildId=" + leftChildId + - ", rightChildId=" + rightChildId + - ", threshold=" + threshold + - ", featureIdx=" + featureIdx + - ", isLeafNode=" + isLeafNode + - '}'; - } - } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java index fda08b399b588e..2546d0c70cbfe7 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java @@ -19,6 +19,7 @@ import java.util.Collections; import java.util.List; +import com.fasterxml.jackson.annotation.JsonIgnore; import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.environment.deploy.DeployableObject; import org.apache.ignite.ml.math.primitives.vector.Vector; @@ -47,12 +48,17 @@ public GmmModel(Vector componentProbs, List di super(componentProbs, distributions); } + /** */ + public GmmModel() { + } + /** {@inheritDoc} */ @Override public Double predict(Vector input) { return (double)likelihood(input).maxElement().index(); } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.emptyList(); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java index 42b0823dc39ccd..4fba73936f8afd 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java @@ -22,8 +22,8 @@ /** Base interface for all clusterization models. */ public interface ClusterizationModel extends IgniteModel { /** Gets the clusters count. */ - public int getAmountOfClusters(); + public int amountOfClusters(); /** Get cluster centers. */ - public P[] getCenters(); + public P[] centers(); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java index f1f677f63a861b..de473c914fb705 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java @@ -17,28 +17,41 @@ package org.apache.ignite.ml.clustering.kmeans; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.UUID; import java.util.stream.Collectors; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONWritable; import org.apache.ignite.ml.math.Tracer; import org.apache.ignite.ml.math.distances.DistanceMeasure; +import org.apache.ignite.ml.math.distances.EuclideanDistance; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector; import org.apache.ignite.ml.util.ModelTrace; /** * This class encapsulates result of clusterization by KMeans algorithm. */ public final class KMeansModel implements ClusterizationModel, Exportable, - DeployableObject { + JSONWritable, DeployableObject { /** Centers of clusters. */ - private final Vector[] centers; + private Vector[] centers; /** Distance measure. */ - private final DistanceMeasure distanceMeasure; + private DistanceMeasure distanceMeasure = new EuclideanDistance(); /** * Construct KMeans model with given centers and distanceMeasure measure. @@ -51,18 +64,45 @@ public KMeansModel(Vector[] centers, DistanceMeasure distanceMeasure) { this.distanceMeasure = distanceMeasure; } + /** {@inheritDoc} */ + private KMeansModel() { + + } + /** Distance measure. */ public DistanceMeasure distanceMeasure() { return distanceMeasure; } /** {@inheritDoc} */ - @Override public int getAmountOfClusters() { + @Override public int amountOfClusters() { return centers.length; } + /** + * Set up the centroids. + * + * @param centers The parameter value. + * @return Model with new centers parameter value. + */ + public KMeansModel withCentroids(Vector[] centers) { + this.centers = centers; + return this; + } + + /** + * Set up the distance measure. + * + * @param distanceMeasure The parameter value. + * @return Model with new distance measure parameter value. + */ + public KMeansModel withDistanceMeasure(DistanceMeasure distanceMeasure) { + this.distanceMeasure = distanceMeasure; + return this; + } + /** {@inheritDoc} */ - @Override public Vector[] getCenters() { + @Override public Vector[] centers() { return Arrays.copyOf(centers, centers.length); } @@ -119,12 +159,11 @@ public DistanceMeasure distanceMeasure() { /** {@inheritDoc} */ @Override public String toString(boolean pretty) { - String measureName = distanceMeasure.getClass().getSimpleName(); List centersList = Arrays.stream(centers).map(x -> Tracer.asAscii(x, "%.4f", false)) .collect(Collectors.toList()); return ModelTrace.builder("KMeansModel", pretty) - .addField("distance measure", measureName) + .addField("distance measure", distanceMeasure.toString()) .addField("centroids", centersList) .toString(); } @@ -133,4 +172,76 @@ public DistanceMeasure distanceMeasure() { @Override public List getDependencies() { return Collections.singletonList(distanceMeasure); } + + /** Loads KMeansModel from JSON file. */ + public static KMeansModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + KMeansJSONExportModel exportModel; + try { + exportModel = mapper + .readValue(new File(path.toAbsolutePath().toString()), KMeansJSONExportModel.class); + + return exportModel.convert(); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + // TODO: https://github.com/apache/spark/blob/master/mllib/src/main/scala/org/apache/spark/mllib/pmml/export/KMeansPMMLModelExport.scala + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + + try { + KMeansJSONExportModel exportModel = new KMeansJSONExportModel(System.currentTimeMillis(), "ann_" + UUID.randomUUID().toString(), KMeansModel.class.getSimpleName()); + List listOfCenters = new ArrayList<>(); + for (int i = 0; i < centers.length; i++) { + listOfCenters.add(centers[i].asArray()); + } + + exportModel.mdlCenters = listOfCenters; + exportModel.distanceMeasure = distanceMeasure; + + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, exportModel); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** */ + public static class KMeansJSONExportModel extends JSONModel { + /** Centers of clusters. */ + public List mdlCenters; + + /** Distance measure. */ + public DistanceMeasure distanceMeasure; + + /** */ + public KMeansJSONExportModel(Long timestamp, String uid, String modelClass) { + super(timestamp, uid, modelClass); + } + + /** */ + @JsonCreator + public KMeansJSONExportModel() { + } + + /** {@inheritDoc} */ + @Override public KMeansModel convert() { + KMeansModel mdl = new KMeansModel(); + Vector[] centers = new DenseVector[mdlCenters.size()]; + for (int i = 0; i < mdlCenters.size(); i++) { + centers[i] = VectorUtils.of(mdlCenters.get(i)); + } + + DistanceMeasure distanceMeasure = this.distanceMeasure; + + mdl.withCentroids(centers); + mdl.withDistanceMeasure(distanceMeasure); + return mdl; + } + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java index 05f41b505955e5..c36dd341c81f4b 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java @@ -21,6 +21,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Random; import java.util.Set; @@ -101,7 +102,7 @@ public class KMeansTrainer extends SingleLabelDatasetTrainer { return getLastTrainedModelOrThrowEmptyDatasetException(mdl); centers = Optional.ofNullable(mdl) - .map(KMeansModel::getCenters) + .map(KMeansModel::centers) .orElseGet(() -> initClusterCentersRandomly(dataset, k)); boolean converged = false; @@ -114,13 +115,13 @@ public class KMeansTrainer extends SingleLabelDatasetTrainer { converged = true; - for (Integer ind : totalRes.sums.keySet()) { - Vector massCenter = totalRes.sums.get(ind).times(1.0 / totalRes.counts.get(ind)); + for (Map.Entry entry : totalRes.sums.entrySet()) { + Vector massCenter = entry.getValue().times(1.0 / totalRes.counts.get(entry.getKey())); - if (converged && distance.compute(massCenter, centers[ind]) > epsilon * epsilon) + if (converged && distance.compute(massCenter, centers[entry.getKey()]) > epsilon * epsilon) converged = false; - newCentroids[ind] = massCenter; + newCentroids[entry.getKey()] = massCenter; } iteration++; @@ -138,7 +139,7 @@ public class KMeansTrainer extends SingleLabelDatasetTrainer { /** {@inheritDoc} */ @Override public boolean isUpdateable(KMeansModel mdl) { - return mdl.getCenters().length == k && mdl.distanceMeasure().equals(distance); + return mdl.centers().length == k && mdl.distanceMeasure().equals(distance); } /** diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java index 3942b9ee907d4e..190203c1fbe44d 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java @@ -19,6 +19,8 @@ import java.util.Collections; import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnore; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.IgniteModel; @@ -30,17 +32,17 @@ /** * Model consisting of several models and prediction aggregation strategy. */ -public class ModelsComposition implements IgniteModel, Exportable, +public class ModelsComposition> implements IgniteModel, Exportable, DeployableObject { /** * Predictions aggregator. */ - private final PredictionsAggregator predictionsAggregator; + protected PredictionsAggregator predictionsAggregator; /** * Models. */ - private final List> models; + protected List models; /** * Constructs a new instance of composition of models. @@ -48,11 +50,14 @@ public class ModelsComposition implements IgniteModel, Exportabl * @param models Basic models. * @param predictionsAggregator Predictions aggregator. */ - public ModelsComposition(List> models, PredictionsAggregator predictionsAggregator) { + public ModelsComposition(List models, PredictionsAggregator predictionsAggregator) { this.predictionsAggregator = predictionsAggregator; this.models = Collections.unmodifiableList(models); } + public ModelsComposition() { + } + /** * Applies containing models to features and aggregate them to one prediction. * @@ -78,7 +83,7 @@ public PredictionsAggregator getPredictionsAggregator() { /** * Returns containing models. */ - public List> getModels() { + public List getModels() { return models; } @@ -102,6 +107,7 @@ public List> getModels() { } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.singletonList(predictionsAggregator); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java index ba71afa14cd25f..c49638cc51e2d7 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java @@ -33,7 +33,7 @@ public class ModelsCompositionFormat implements Serializable { private static final long serialVersionUID = 9115341364082681837L; /** Models. */ - private List> models; + private List> models; /** Predictions aggregator. */ private PredictionsAggregator predictionsAggregator; @@ -44,13 +44,13 @@ public class ModelsCompositionFormat implements Serializable { * @param models Models. * @param predictionsAggregator Predictions aggregator. */ - public ModelsCompositionFormat(List> models,PredictionsAggregator predictionsAggregator) { + public ModelsCompositionFormat(List> models, PredictionsAggregator predictionsAggregator) { this.models = models; this.predictionsAggregator = predictionsAggregator; } /** */ - public List> models() { + public List> models() { return models; } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java index 44137f7da934f2..45b43181f258e4 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java @@ -103,7 +103,7 @@ public List> learnModels(DatasetBuilder * @param Type of a value in {@code upstream} data. * @return Updated models list. */ - public List> update(GDBTrainer.GDBModel mdlToUpdate, + public List> update(GDBModel mdlToUpdate, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { if (trainerEnvironment == null) throw new IllegalStateException("Learning environment builder is not set."); @@ -148,7 +148,7 @@ public List> update(GDBTrainer.GDBModel mdlTo * @param mdlToUpdate Model to update. * @return List of already learned models. */ - @NotNull protected List> initLearningState(GDBTrainer.GDBModel mdlToUpdate) { + @NotNull protected List> initLearningState(GDBModel mdlToUpdate) { List> models = new ArrayList<>(); if (mdlToUpdate != null) { models.addAll(mdlToUpdate.getModels()); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBModel.java new file mode 100644 index 00000000000000..35cb70e77c54be --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBModel.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.composition.boosting; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import org.apache.ignite.ml.IgniteModel; +import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; +import org.apache.ignite.ml.math.functions.IgniteFunction; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.tree.DecisionTreeModel; + +/** + * GDB model. + */ +public final class GDBModel extends ModelsComposition implements JSONWritable { + /** Serial version uid. */ + private static final long serialVersionUID = 3476661240155508004L; + + /** Internal to external lbl mapping. */ + @JsonIgnore private IgniteFunction internalToExternalLblMapping; + + /** + * Creates an instance of GDBModel. + * + * @param models Models. + * @param predictionsAggregator Predictions aggregator. + * @param internalToExternalLblMapping Internal to external lbl mapping. + */ + public GDBModel(List> models, + WeightedPredictionsAggregator predictionsAggregator, + IgniteFunction internalToExternalLblMapping) { + + super((List) models, predictionsAggregator); + this.internalToExternalLblMapping = internalToExternalLblMapping; + } + + private GDBModel() { + } + + public GDBModel withLblMapping(IgniteFunction internalToExternalLblMapping) { + this.internalToExternalLblMapping = internalToExternalLblMapping; + return this; + } + + /** {@inheritDoc} */ + @Override public Double predict(Vector features) { + if (internalToExternalLblMapping == null) { + throw new IllegalArgumentException("The mapping should not be empty. Initialize it with apropriate function. "); + } else { + return internalToExternalLblMapping.apply(super.predict(features)); + } + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(GDBModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(GDBModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", GDBModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads RandomForestModel from JSON file. */ + public static GDBModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + GDBModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, GDBModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), GDBModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java index ad35d809d48392..a36feec1a8d1b2 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.ml.IgniteModel; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerFactory; import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; import org.apache.ignite.ml.composition.boosting.loss.Loss; @@ -34,7 +33,6 @@ import org.apache.ignite.ml.environment.LearningEnvironmentBuilder; import org.apache.ignite.ml.environment.logging.MLLogger; import org.apache.ignite.ml.knn.regression.KNNRegressionTrainer; -import org.apache.ignite.ml.math.functions.IgniteFunction; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.preprocessing.Preprocessor; import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; @@ -57,7 +55,7 @@ * * But in practice Decision Trees is most used regressors (see: {@link DecisionTreeRegressionTrainer}). */ -public abstract class GDBTrainer extends DatasetTrainer { +public abstract class GDBTrainer extends DatasetTrainer { /** Gradient step. */ private final double gradientStep; @@ -87,13 +85,13 @@ public GDBTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) { } /** {@inheritDoc} */ - @Override public ModelsComposition fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, + @Override public GDBModel fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, Preprocessor preprocessor) { return updateModel(null, datasetBuilder, preprocessor); } /** {@inheritDoc} */ - @Override protected ModelsComposition updateModel(ModelsComposition mdl, + @Override protected GDBModel updateModel(GDBModel mdl, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { if (!learnLabels(datasetBuilder, preprocessor)) @@ -121,7 +119,7 @@ public GDBTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) { List> models; if (mdl != null) - models = stgy.update((GDBModel) mdl, datasetBuilder, preprocessor); + models = stgy.update(mdl, datasetBuilder, preprocessor); else models = stgy.learnModels(datasetBuilder, preprocessor); @@ -136,7 +134,7 @@ public GDBTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) { } /** {@inheritDoc} */ - @Override public boolean isUpdateable(ModelsComposition mdl) { + @Override public boolean isUpdateable(GDBModel mdl) { return mdl instanceof GDBModel; } @@ -239,35 +237,4 @@ public GDBTrainer withCheckConvergenceStgyFactory(ConvergenceCheckerFactory fact protected GDBLearningStrategy getLearningStrategy() { return new GDBLearningStrategy(); } - - /** - * GDB model. - */ - public static final class GDBModel extends ModelsComposition { - /** Serial version uid. */ - private static final long serialVersionUID = 3476661240155508004L; - - /** Internal to external lbl mapping. */ - private final IgniteFunction internalToExternalLblMapping; - - /** - * Creates an instance of GDBModel. - * - * @param models Models. - * @param predictionsAggregator Predictions aggregator. - * @param internalToExternalLblMapping Internal to external lbl mapping. - */ - public GDBModel(List> models, - WeightedPredictionsAggregator predictionsAggregator, - IgniteFunction internalToExternalLblMapping) { - - super(models, predictionsAggregator); - this.internalToExternalLblMapping = internalToExternalLblMapping; - } - - /** {@inheritDoc} */ - @Override public Double predict(Vector features) { - return internalToExternalLblMapping.apply(super.predict(features)); - } - } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java index d996a2aeb6d842..1490b7c82dccc5 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java @@ -17,11 +17,20 @@ package org.apache.ignite.ml.composition.predictionsaggregator; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; import org.apache.ignite.ml.math.functions.IgniteFunction; /** * Predictions aggregator interface. */ +@JsonTypeInfo( use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type") +@JsonSubTypes( + { + @JsonSubTypes.Type(value = MeanValuePredictionsAggregator.class, name = "MeanValuePredictionsAggregator"), + @JsonSubTypes.Type(value = OnMajorityPredictionsAggregator.class, name = "OnMajorityPredictionsAggregator"), + @JsonSubTypes.Type(value = WeightedPredictionsAggregator.class, name = "WeightedPredictionsAggregator"), + }) public interface PredictionsAggregator extends IgniteFunction { /** * Represents aggregator as String. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java index 555ff3c1eaed6c..257c635f79c3ed 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java @@ -25,10 +25,13 @@ */ public final class WeightedPredictionsAggregator implements PredictionsAggregator { /** Weights for predictions. */ - private final double[] weights; + private double[] weights; /** Bias. */ - private final double bias; + private double bias; + + public WeightedPredictionsAggregator() { + } /** * Constructs WeightedPredictionsAggregator instance. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java index a66cb48b4523c1..17c22ca1b26436 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java @@ -60,9 +60,9 @@ public abstract class ObjectHistogram implements Histogram res = new TreeMap<>(); double accum = 0.0; - for (Integer bucket : hist.keySet()) { - accum += hist.get(bucket); - res.put(bucket, accum); + for (Map.Entry entry : hist.entrySet()) { + accum += entry.getValue(); + res.put(entry.getKey(), accum); } return res; @@ -71,7 +71,7 @@ public abstract class ObjectHistogram implements Histogram plus(ObjectHistogram other) { ObjectHistogram res = newInstance(); - addTo(this.hist, res.hist); + addTo(hist, res.hist); addTo(other.hist, res.hist); return res; } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java index b53c27b6077f6c..5b50c9428f709b 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java @@ -56,7 +56,14 @@ /** * Util class that provides common methods to perform computations on top of the Ignite Compute Grid. */ -public class ComputeUtils { +public final class ComputeUtils { + /** + * + */ + private ComputeUtils() { + // No-op. + } + /** Template of the key used to store partition {@code data} in local storage. */ private static final String DATA_STORAGE_KEY_TEMPLATE = "part_data_storage_%s"; @@ -110,11 +117,11 @@ public static Collection affinityCallWithRetries(Ignite ignite, Collectio } // Collects results. - for (int part : futures.keySet()) + for (Map.Entry> entry : futures.entrySet()) try { - R res = futures.get(part).get(); + R res = entry.getValue().get(); results.add(res); - completionFlags.set(part); + completionFlags.set(entry.getKey()); } catch (IgniteException ignore) { } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java index e751ecc71aca81..238d90074ccf7a 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java @@ -42,7 +42,14 @@ /** * Utils class that helps to operate with model storage and Ignite models. */ -public class IgniteModelStorageUtil { +public final class IgniteModelStorageUtil { + /** + * + */ + private IgniteModelStorageUtil(){ + // No-op. + } + /** Folder to be used to store Ignite models. */ private static final String IGNITE_MDL_FOLDER = "/ignite_models"; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModel.java new file mode 100644 index 00000000000000..ac733988c32c40 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModel.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import org.apache.ignite.ml.IgniteModel; + +/** Basic class for all non-trivial model data serialization. */ +public abstract class JSONModel { + /** Basic Ignite version. */ + @JsonIgnore + public static final String JSON_MODEL_FORMAT_VERSION = "1"; + + /** Ignite version. */ + public String formatVersion = JSON_MODEL_FORMAT_VERSION; + + /** Timestamp in ms from System.currentTimeMillis() method. */ + public Long timestamp; + + /** Unique string indetifier. */ + public String uid; + + /** String description of model class. */ + public String modelClass; + + /** Convert JSON string to IgniteModel object. */ + public abstract IgniteModel convert(); + + /** */ + public JSONModel(Long timestamp, String uid, String modelClass) { + this.timestamp = timestamp; + this.uid = uid; + this.modelClass = modelClass; + } + + @JsonCreator + public JSONModel() { + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModelMixIn.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModelMixIn.java new file mode 100644 index 00000000000000..843b5942f7be96 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModelMixIn.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import com.fasterxml.jackson.databind.annotation.JsonAppend; + +/** Just a mixin class to add a few configuration properties. */ +@JsonAppend( + attrs = { + @JsonAppend.Attr(value = "formatVersion"), + @JsonAppend.Attr(value = "timestamp"), + @JsonAppend.Attr(value = "uid"), + @JsonAppend.Attr(value = "modelClass") + } +) +public class JSONModelMixIn { } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONWritable.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONWritable.java new file mode 100644 index 00000000000000..fcc30379dded99 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONWritable.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; + +public interface JSONWritable { + default void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + try { + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JacksonHelper.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JacksonHelper.java new file mode 100644 index 00000000000000..654ade44d774dd --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JacksonHelper.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.LinkedHashMap; +import java.util.Map; +import com.fasterxml.jackson.databind.ObjectMapper; + +public class JacksonHelper { + public static void readAndValidateBasicJsonModelProperties(Path path, ObjectMapper mapper, String className) throws IOException { + Map jsonAsMap = mapper.readValue(new File(path.toAbsolutePath().toString()), LinkedHashMap.class); + String formatVersion = jsonAsMap.get("formatVersion").toString(); + Long timestamp = (Long) jsonAsMap.get("timestamp"); + String uid = jsonAsMap.get("uid").toString(); + String modelClass = jsonAsMap.get("modelClass").toString(); + + if (!modelClass.equals(className)) { + throw new IllegalArgumentException("You are trying to load " + modelClass + " model to " + className); + } + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java index 2ad0c46aedd572..922630ea1615ab 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java @@ -180,6 +180,17 @@ public DistanceMeasure getDistanceMeasure() { return distanceMeasure; } + /** */ + public int getK() { + return k; + } + + /** */ + public boolean isWeighted() { + return weighted; + } + + /** {@inheritDoc} */ @Override public int hashCode() { int res = 1; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java index 2c820b7423af4c..60159002598090 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java @@ -17,33 +17,47 @@ package org.apache.ignite.ml.knn.ann; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exporter; +import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONWritable; import org.apache.ignite.ml.knn.NNClassificationModel; +import org.apache.ignite.ml.math.distances.DistanceMeasure; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.structures.LabeledVector; import org.apache.ignite.ml.structures.LabeledVectorSet; import org.apache.ignite.ml.util.ModelTrace; -import org.jetbrains.annotations.NotNull; /** * ANN model to predict labels in multi-class classification task. */ -public final class ANNClassificationModel extends NNClassificationModel { +public final class ANNClassificationModel extends NNClassificationModel implements JSONWritable, DeployableObject { /** */ private static final long serialVersionUID = -127312378991350345L; /** The labeled set of candidates. */ - private final LabeledVectorSet candidates; + private LabeledVectorSet candidates; /** Centroid statistics. */ - private final ANNClassificationTrainer.CentroidStat centroindsStat; + private ANNClassificationTrainer.CentroidStat centroindsStat; /** * Build the model based on a candidates set. @@ -56,6 +70,10 @@ public ANNClassificationModel(LabeledVectorSet centers, this.centroindsStat = centroindsStat; } + /** */ + private ANNClassificationModel() { + } + /** */ public LabeledVectorSet getCandidates() { return candidates; @@ -94,7 +112,7 @@ private List findKNearestNeighbors(Vector v) { * @param distanceIdxPairs The distance map. * @return K-nearest neighbors. */ - @NotNull private LabeledVector[] getKClosestVectors( + private LabeledVector[] getKClosestVectors( TreeMap> distanceIdxPairs) { LabeledVector[] res; @@ -129,7 +147,7 @@ private List findKNearestNeighbors(Vector v) { * @return Key - distanceMeasure from given features before features with idx stored in value. Value is presented * with Set because there can be a few vectors with the same distance. */ - @NotNull private TreeMap> getDistances(Vector v) { + private TreeMap> getDistances(Vector v) { TreeMap> distanceIdxPairs = new TreeMap<>(); for (int i = 0; i < candidates.rowSize(); i++) { @@ -203,4 +221,104 @@ private double classify(List neighbors, Vector v, boolean weighte .addField("amount of candidates", String.valueOf(candidates.rowSize())) .toString(); } + + /** {@inheritDoc} */ + @JsonIgnore + @Override public List getDependencies() { + return Collections.emptyList(); + } + + /** Loads ANNClassificationModel from JSON file. */ + public static ANNClassificationModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + ANNJSONExportModel exportModel; + try { + exportModel = mapper + .readValue(new File(path.toAbsolutePath().toString()), ANNJSONExportModel.class); + + return exportModel.convert(); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + try { + ANNJSONExportModel exportModel = new ANNJSONExportModel(System.currentTimeMillis(), "ann_" + UUID.randomUUID().toString(), ANNClassificationModel.class.getSimpleName()); + List listOfCandidates = new ArrayList<>(); + ProbableLabel[] labels = new ProbableLabel[candidates.rowSize()]; + for (int i = 0; i < candidates.rowSize(); i++) { + labels[i] = (ProbableLabel) candidates.getRow(i).getLb(); + listOfCandidates.add(candidates.features(i).asArray()); + } + + exportModel.candidateFeatures = listOfCandidates; + exportModel.distanceMeasure = distanceMeasure; + exportModel.k = k; + exportModel.weighted = weighted; + exportModel.candidateLabels = labels; + exportModel.centroindsStat = centroindsStat; + + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, exportModel); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** */ + public static class ANNJSONExportModel extends JSONModel { + /** Centers of clusters. */ + public List candidateFeatures; + + public ProbableLabel[] candidateLabels; + + /** Distance measure. */ + public DistanceMeasure distanceMeasure; + + /** Amount of nearest neighbors. */ + public int k; + + /** kNN strategy. */ + public boolean weighted; + + /** Centroid statistics. */ + public ANNClassificationTrainer.CentroidStat centroindsStat; + + /** */ + public ANNJSONExportModel(Long timestamp, String uid, String modelClass) { + super(timestamp, uid, modelClass); + } + + /** */ + @JsonCreator + public ANNJSONExportModel() { + } + + /** {@inheritDoc} */ + @Override public ANNClassificationModel convert() { + if (candidateFeatures == null || candidateFeatures.isEmpty()) + throw new IllegalArgumentException("Loaded list of candidates is empty. It should be not empty."); + + double[] firstRow = candidateFeatures.get(0); + LabeledVectorSet candidatesForANN = new LabeledVectorSet<>(candidateFeatures.size(), firstRow.length); + LabeledVector[] data = new LabeledVector[candidateFeatures.size()]; + for (int i = 0; i < candidateFeatures.size(); i++) { + data[i] = new LabeledVector(VectorUtils.of(candidateFeatures.get(i)), candidateLabels[i]); + } + candidatesForANN.setData(data); + + ANNClassificationModel mdl = new ANNClassificationModel(candidatesForANN, centroindsStat); + + mdl.withDistanceMeasure(distanceMeasure); + mdl.withK(k); + mdl.withWeighted(weighted); + return mdl; + } + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java index 22192224c760a1..eec871386d1f83 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java @@ -24,6 +24,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListSet; import java.util.stream.Collectors; +import com.fasterxml.jackson.annotation.JsonIgnore; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.ml.clustering.kmeans.KMeansModel; import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer; @@ -139,7 +140,7 @@ private List getCentroids(Preprocessor> centroidStat = new ConcurrentHashMap<>(); + public ConcurrentHashMap> centroidStat = new ConcurrentHashMap<>(); /** Count of points closest to the center with a given index. */ - ConcurrentHashMap counts = new ConcurrentHashMap<>(); + public ConcurrentHashMap counts = new ConcurrentHashMap<>(); /** Set of unique labels. */ - ConcurrentSkipListSet clsLblsSet = new ConcurrentSkipListSet<>(); + public ConcurrentSkipListSet clsLblsSet = new ConcurrentSkipListSet<>(); /** Merge current */ CentroidStat merge(CentroidStat other) { diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java index 1fee123d193855..49f56b8815f647 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java @@ -25,7 +25,10 @@ */ public class ProbableLabel { /** Key is label, value is probability to be this class */ - TreeMap clsLbls; + public TreeMap clsLbls; + + public ProbableLabel() { + } /** * The key is class label, diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java index 137c64c1afcac4..69a349b5a81ef4 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java @@ -17,6 +17,7 @@ package org.apache.ignite.ml.math; +import java.io.Serializable; import java.util.Set; import com.github.fommil.netlib.BLAS; import com.github.fommil.netlib.F2jBLAS; @@ -35,7 +36,10 @@ * Useful subset of BLAS operations. * This class is based on 'BLAS' class from Apache Spark MLlib. */ -public class Blas { +public class Blas implements Serializable { + /** */ + private static final long serialVersionUID = 124309657712638021L; + /** F2J implementation of BLAS. */ private static transient BLAS f2jBlas = new F2jBLAS(); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java index 0b431597a81d77..2c32ee6ebcfedb 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java @@ -51,4 +51,8 @@ public class BrayCurtisDistance implements DistanceMeasure { @Override public int hashCode() { return getClass().hashCode(); } + + @Override public String toString() { + return "BrayCurtisDistance{}"; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java index 392e7b0a2247e3..4176d971adfc98 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java @@ -20,6 +20,8 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; import org.apache.ignite.ml.math.exceptions.math.CardinalityException; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector; @@ -28,6 +30,21 @@ * This class is based on the corresponding class from Apache Common Math lib. Interface for distance measures of * n-dimensional vectors. */ +@JsonTypeInfo( use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type") +@JsonSubTypes( + { + @JsonSubTypes.Type(value = BrayCurtisDistance.class, name = "BrayCurtisDistance"), + @JsonSubTypes.Type(value = CanberraDistance.class, name = "CanberraDistance"), + @JsonSubTypes.Type(value = ChebyshevDistance.class, name = "ChebyshevDistance"), + @JsonSubTypes.Type(value = CosineSimilarity.class, name = "CosineSimilarity"), + @JsonSubTypes.Type(value = EuclideanDistance.class, name = "EuclideanDistance"), + @JsonSubTypes.Type(value = HammingDistance.class, name = "HammingDistance"), + @JsonSubTypes.Type(value = JaccardIndex.class, name = "JaccardIndex"), + @JsonSubTypes.Type(value = JensenShannonDistance.class, name = "JensenShannonDistance"), + @JsonSubTypes.Type(value = ManhattanDistance.class, name = "ManhattanDistance"), + @JsonSubTypes.Type(value = MinkowskiDistance.class, name = "MinkowskiDistance"), + @JsonSubTypes.Type(value = WeightedMinkowskiDistance.class, name = "WeightedMinkowskiDistance"), + }) public interface DistanceMeasure extends Externalizable { /** * Compute the distance between two n-dimensional vectors. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java index b382112964ec46..20c1c02fe06ce7 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java @@ -17,6 +17,8 @@ package org.apache.ignite.ml.math.distances; import java.util.Objects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.ignite.ml.math.exceptions.math.CardinalityException; import org.apache.ignite.ml.math.functions.IgniteDoubleFunction; import org.apache.ignite.ml.math.primitives.vector.Vector; @@ -35,10 +37,16 @@ public class MinkowskiDistance implements DistanceMeasure { private final double p; /** @param p norm */ - public MinkowskiDistance(double p) { + @JsonCreator + public MinkowskiDistance(@JsonProperty("p")double p) { this.p = p; } + /** Returns p-norm. */ + public double getP() { + return p; + } + /** {@inheritDoc} */ @Override public double compute(Vector a, Vector b) throws CardinalityException { assert a.size() == b.size(); @@ -60,4 +68,10 @@ public MinkowskiDistance(double p) { @Override public int hashCode() { return Objects.hash(p); } + + @Override public String toString() { + return "MinkowskiDistance{" + + "p=" + p + + '}'; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java index 662bf907021e46..61e2125f70744d 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java @@ -16,8 +16,13 @@ */ package org.apache.ignite.ml.math.distances; +import java.util.Arrays; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.ignite.ml.math.exceptions.math.CardinalityException; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.math.util.MatrixUtil; /** @@ -29,13 +34,18 @@ public class WeightedMinkowskiDistance implements DistanceMeasure { */ private static final long serialVersionUID = 1771556549784040096L; - private final int p; + private int p = 1; - private final Vector weight; + private final double[] weights; - public WeightedMinkowskiDistance(int p, Vector weight) { + @JsonIgnore + private final Vector internalWeights; + + @JsonCreator + public WeightedMinkowskiDistance(@JsonProperty("p")int p, @JsonProperty("weights")double[] weights) { this.p = p; - this.weight = weight.copy().map(x -> Math.pow(Math.abs(x), p)); + this.weights = weights.clone(); + internalWeights = VectorUtils.of(weights).copy().map(x -> Math.pow(Math.abs(x), p)); } /** @@ -47,12 +57,20 @@ public WeightedMinkowskiDistance(int p, Vector weight) { return Math.pow( MatrixUtil.localCopyOf(a).minus(b) .map(x -> Math.pow(Math.abs(x), p)) - .times(weight) + .times(internalWeights) .sum(), 1 / (double) p ); } + /** Returns p-norm. */ + public int getP() { + return p; + } + + /** Returns weights. */ + public double[] getWeights() { return weights.clone(); } + /** * {@inheritDoc} */ @@ -70,4 +88,11 @@ public WeightedMinkowskiDistance(int p, Vector weight) { @Override public int hashCode() { return getClass().hashCode(); } + + @Override public String toString() { + return "WeightedMinkowskiDistance{" + + "p=" + p + + ", weights=" + Arrays.toString(weights) + + '}'; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java index abd39df19b0f38..4a915fa7fc460e 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java @@ -32,13 +32,13 @@ */ public abstract class DistributionMixture implements Distribution { /** Component probabilities. */ - private final Vector componentProbs; + private Vector componentProbs; /** Distributions. */ - private final List distributions; + private List distributions; /** Dimension. */ - private final int dimension; + private int dimension; /** * Creates an instance of DistributionMixture. @@ -61,6 +61,9 @@ public DistributionMixture(Vector componentProbs, List distributions) { this.dimension = dimension; } + public DistributionMixture() { + } + /** {@inheritDoc} */ @Override public double prob(Vector x) { return likelihood(x).sum(); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java index 9190901bf215e6..c632d1bd9293a1 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java @@ -27,7 +27,14 @@ /** * Some {@link Map} related utils. */ -public class MapUtil { +public final class MapUtil { + /** + * + */ + private MapUtil(){ + // No-op. + } + /** */ public static > M mergeMaps(M m1, M m2, BinaryOperator op, Supplier mapSupplier) { return Stream.of(m1, m2) diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java index 7cc7f276f8e146..21a5f0b1fe65d6 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java @@ -32,7 +32,14 @@ /** * Utility class for various matrix operations. */ -public class MatrixUtil { +public final class MatrixUtil { + /** + * + */ + private MatrixUtil() { + // No-op. + } + /** * Create the like matrix with read-only matrices support. * diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java index 6cdc637b59973f..a9fc2d0dea0a9c 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java @@ -17,14 +17,27 @@ package org.apache.ignite.ml.naivebayes.compound; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesModel; @@ -34,7 +47,8 @@ * A compound Naive Bayes model which uses a composition of{@code GaussianNaiveBayesModel} and {@code * DiscreteNaiveBayesModel}. */ -public class CompoundNaiveBayesModel implements IgniteModel, Exportable, DeployableObject { +public class CompoundNaiveBayesModel implements IgniteModel, Exportable, + JSONWritable, DeployableObject { /** Serial version uid. */ private static final long serialVersionUID = -5045925321135798960L; @@ -56,6 +70,10 @@ public class CompoundNaiveBayesModel implements IgniteModel, Exp /** Feature ids which should be skipped in Discrete model. */ private Collection discreteFeatureIdsToSkip = Collections.emptyList(); + /** */ + public CompoundNaiveBayesModel() { + } + /** {@inheritDoc} */ @Override public

    void saveModel(Exporter exporter, P path) { exporter.save(this, path); @@ -91,6 +109,22 @@ public DiscreteNaiveBayesModel getDiscreteModel() { return discreteModel; } + public double[] getPriorProbabilities() { + return priorProbabilities; + } + + public double[] getLabels() { + return labels; + } + + public Collection getGaussianFeatureIdsToSkip() { + return gaussianFeatureIdsToSkip; + } + + public Collection getDiscreteFeatureIdsToSkip() { + return discreteFeatureIdsToSkip; + } + /** Sets prior probabilities. */ public CompoundNaiveBayesModel withPriorProbabilities(double[] priorProbabilities) { this.priorProbabilities = priorProbabilities.clone(); @@ -155,7 +189,44 @@ private static Vector skipFeatures(Vector vector, Collection featureIds } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Arrays.asList(discreteModel, gaussianModel); } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(CompoundNaiveBayesModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(CompoundNaiveBayesModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", CompoundNaiveBayesModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads CompoundNaiveBayesModel from JSON file. */ + public static CompoundNaiveBayesModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + CompoundNaiveBayesModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, CompoundNaiveBayesModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), CompoundNaiveBayesModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java index b7eb5d383ab304..3d5edce45b7555 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java @@ -17,10 +17,23 @@ package org.apache.ignite.ml.naivebayes.discrete; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.naivebayes.BayesModel; @@ -29,7 +42,8 @@ * {@code p(C_k,y) =x_1*p_k1^x *...*x_i*p_ki^x_i}. Where {@code x_i} is a discrete feature, {@code p_ki} is a prior * probability probability of class {@code p(x|C_k)}. Returns the number of the most possible class. */ -public class DiscreteNaiveBayesModel implements BayesModel, DeployableObject { +public class DiscreteNaiveBayesModel implements BayesModel, + JSONWritable, DeployableObject { /** Serial version uid. */ private static final long serialVersionUID = -127386523291350345L; @@ -37,23 +51,23 @@ public class DiscreteNaiveBayesModel implements BayesModel void saveModel(Exporter exporter, P path) { exporter.save(this, path); @@ -111,22 +129,22 @@ public DiscreteNaiveBayesModel(double[][][] probabilities, double[] clsProbabili /** A getter for probabilities.*/ public double[][][] getProbabilities() { - return probabilities; + return probabilities.clone(); } /** A getter for clsProbabilities.*/ public double[] getClsProbabilities() { - return clsProbabilities; + return clsProbabilities.clone(); } /** A getter for bucketThresholds.*/ public double[][] getBucketThresholds() { - return bucketThresholds; + return bucketThresholds.clone(); } /** A getter for labels.*/ public double[] getLabels() { - return labels; + return labels.clone(); } /** A getter for sumsHolder.*/ @@ -145,7 +163,44 @@ private int toBucketNumber(double val, double[] thresholds) { } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.emptyList(); } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(DiscreteNaiveBayesModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(DiscreteNaiveBayesModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", DiscreteNaiveBayesModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads DiscreteNaiveBayesModel from JSON file. */ + public static DiscreteNaiveBayesModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + DiscreteNaiveBayesModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, DiscreteNaiveBayesModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), DiscreteNaiveBayesModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java index 50b335eaa65397..060d1889d9e37f 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java @@ -32,6 +32,17 @@ public class DiscreteNaiveBayesSumsHolder implements AutoCloseable, Serializable /** Rows count for each label */ Map featureCountersPerLbl = new HashMap<>(); + public DiscreteNaiveBayesSumsHolder() { + } + + public Map getValuesInBucketPerLbl() { + return valuesInBucketPerLbl; + } + + public Map getFeatureCountersPerLbl() { + return featureCountersPerLbl; + } + /** Merge to current */ DiscreteNaiveBayesSumsHolder merge(DiscreteNaiveBayesSumsHolder other) { valuesInBucketPerLbl = MapUtil.mergeMaps(valuesInBucketPerLbl, other.valuesInBucketPerLbl, this::sum, HashMap::new); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java index d0a647093f0e06..0627ce52677dfe 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java @@ -17,10 +17,23 @@ package org.apache.ignite.ml.naivebayes.gaussian; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.naivebayes.BayesModel; @@ -28,24 +41,25 @@ * Simple naive Bayes model which predicts result value {@code y} belongs to a class {@code C_k, k in [0..K]} as {@code * p(C_k,y) = p(C_k)*p(y_1,C_k) *...*p(y_n,C_k) / p(y)}. Return the number of the most possible class. */ -public class GaussianNaiveBayesModel implements BayesModel, DeployableObject { +public class GaussianNaiveBayesModel implements BayesModel, + JSONWritable, DeployableObject { /** Serial version uid. */ private static final long serialVersionUID = -127386523291350345L; /** Means of features for all classes. kth row contains means for labels[k] class. */ - private final double[][] means; + private double[][] means; /** Variances of features for all classes. kth row contains variances for labels[k] class */ - private final double[][] variances; + private double[][] variances; /** Prior probabilities of each class */ - private final double[] classProbabilities; + private double[] classProbabilities; /** Labels. */ - private final double[] labels; + private double[] labels; /** Feature sum, squared sum and count per label. */ - private final GaussianNaiveBayesSumsHolder sumsHolder; + private GaussianNaiveBayesSumsHolder sumsHolder; /** * @param means Means of features for all classes. @@ -56,13 +70,17 @@ public class GaussianNaiveBayesModel implements BayesModel void saveModel(Exporter exporter, P path) { exporter.save(this, path); @@ -127,7 +145,44 @@ private static double gauss(double x, double mean, double variance) { } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.emptyList(); } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(GaussianNaiveBayesModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(GaussianNaiveBayesModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", GaussianNaiveBayesModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads GaussianNaiveBayesModel from JSON file. */ + public static GaussianNaiveBayesModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + GaussianNaiveBayesModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, GaussianNaiveBayesModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), GaussianNaiveBayesModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java index 7b95ff8f9354fd..1d85832ceec906 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java @@ -35,6 +35,21 @@ class GaussianNaiveBayesSumsHolder implements Serializable, AutoCloseable { /** Rows count for each label */ Map featureCountersPerLbl = new HashMap<>(); + public GaussianNaiveBayesSumsHolder() { + } + + public Map getFeatureSumsPerLbl() { + return featureSumsPerLbl; + } + + public Map getFeatureSquaredSumsPerLbl() { + return featureSquaredSumsPerLbl; + } + + public Map getFeatureCountersPerLbl() { + return featureCountersPerLbl; + } + /** Merge to current */ GaussianNaiveBayesSumsHolder merge(GaussianNaiveBayesSumsHolder other) { featureSumsPerLbl = MapUtil.mergeMaps(featureSumsPerLbl, other.featureSumsPerLbl, this::sum, HashMap::new); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java b/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java index 4c34cd26772471..7665164121dae6 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java @@ -26,7 +26,7 @@ public class Activators { /** * Sigmoid activation function. */ - public static IgniteDifferentiableDoubleToDoubleFunction SIGMOID = new IgniteDifferentiableDoubleToDoubleFunction() { + public static final IgniteDifferentiableDoubleToDoubleFunction SIGMOID = new IgniteDifferentiableDoubleToDoubleFunction() { /** {@inheritDoc} */ @Override public double differential(double pnt) { double v = apply(pnt); @@ -42,7 +42,7 @@ public class Activators { /** * Rectified linear unit (ReLU) activation function. */ - public static IgniteDifferentiableDoubleToDoubleFunction RELU = new IgniteDifferentiableDoubleToDoubleFunction() { + public static final IgniteDifferentiableDoubleToDoubleFunction RELU = new IgniteDifferentiableDoubleToDoubleFunction() { /** * Differential of ReLU at pnt. Formally, function is not differentiable at 0, but we let differential at 0 be 0. * @@ -62,7 +62,7 @@ public class Activators { /** * Linear unit activation function. */ - public static IgniteDifferentiableDoubleToDoubleFunction LINEAR = new IgniteDifferentiableDoubleToDoubleFunction() { + public static final IgniteDifferentiableDoubleToDoubleFunction LINEAR = new IgniteDifferentiableDoubleToDoubleFunction() { /** {@inheritDoc} */ @Override public double differential(double pnt) { return 1.0; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java index e33504eae05095..449bb2228c5d8e 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java @@ -17,6 +17,7 @@ package org.apache.ignite.ml.preprocessing.imputing; +import java.util.Arrays; import java.util.Comparator; import java.util.HashMap; import java.util.Map; @@ -442,8 +443,7 @@ private int[] updateTheCounts(LabeledVector row, int[] counts) { private double[] updateTheMins(LabeledVector row, double[] mins) { if (mins == null) { mins = new double[row.size()]; - for (int i = 0; i < mins.length; i++) - mins[i] = Double.POSITIVE_INFINITY; + Arrays.fill(mins, Double.POSITIVE_INFINITY); } else @@ -468,8 +468,7 @@ private double[] updateTheMins(LabeledVector row, double[] mins) { private double[] updateTheMaxs(LabeledVector row, double[] maxs) { if (maxs == null) { maxs = new double[row.size()]; - for (int i = 0; i < maxs.length; i++) - maxs[i] = Double.NEGATIVE_INFINITY; + Arrays.fill(maxs, Double.NEGATIVE_INFINITY); } else diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java index b7678f9c7bc352..978fcb44bdfa0f 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java @@ -17,6 +17,7 @@ package org.apache.ignite.ml.preprocessing.maxabsscaling; +import java.util.Arrays; import org.apache.ignite.ml.dataset.Dataset; import org.apache.ignite.ml.dataset.DatasetBuilder; import org.apache.ignite.ml.dataset.UpstreamEntry; @@ -50,8 +51,7 @@ public class MaxAbsScalerTrainer implements PreprocessingTrainer { if (maxAbs == null) { maxAbs = new double[row.size()]; - for (int i = 0; i < maxAbs.length; i++) - maxAbs[i] = .0; + Arrays.fill(maxAbs, .0); } else assert maxAbs.length == row.size() : "Base preprocessor must return exactly " + maxAbs.length diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java index 54a6d59d98ee0b..272dab11587c64 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java @@ -17,6 +17,7 @@ package org.apache.ignite.ml.preprocessing.minmaxscaling; +import java.util.Arrays; import org.apache.ignite.ml.dataset.Dataset; import org.apache.ignite.ml.dataset.DatasetBuilder; import org.apache.ignite.ml.dataset.PartitionContextBuilder; @@ -53,8 +54,7 @@ public class MinMaxScalerTrainer implements PreprocessingTrainer { if (min == null) { min = new double[row.size()]; - for (int i = 0; i < min.length; i++) - min[i] = Double.MAX_VALUE; + Arrays.fill(min, Double.MAX_VALUE); } else assert min.length == row.size() : "Base preprocessor must return exactly " + min.length @@ -62,8 +62,7 @@ public class MinMaxScalerTrainer implements PreprocessingTrainer { if (max == null) { max = new double[row.size()]; - for (int i = 0; i < max.length; i++) - max[i] = -Double.MAX_VALUE; + Arrays.fill(max, -Double.MAX_VALUE); } else assert max.length == row.size() : "Base preprocessor must return exactly " + min.length diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java index 9ecc257492b19f..d28a2a958a1ce6 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java @@ -72,10 +72,10 @@ private static LabeledVector extendLabeledVector(LabeledVector double[] x0 = null; if (mdl != null) { - int x0Size = mdl.getWeights().size() + 1; - Vector weights = mdl.getWeights().like(x0Size); - mdl.getWeights().nonZeroes().forEach(ith -> weights.set(ith.index(), ith.get())); - weights.set(weights.size() - 1, mdl.getIntercept()); + int x0Size = mdl.weights().size() + 1; + Vector weights = mdl.weights().like(x0Size); + mdl.weights().nonZeroes().forEach(ith -> weights.set(ith.index(), ith.get())); + weights.set(weights.size() - 1, mdl.intercept()); x0 = weights.asArray(); } res = lsqr.solve(0, 1e-12, 1e-12, 1e8, -1, false, x0); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java index 150b6d763a20b6..4cb53403d96b80 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java @@ -17,25 +17,35 @@ package org.apache.ignite.ml.regressions.linear; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Objects; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.IgniteModel; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONWritable; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; /** * Simple linear regression model which predicts result value Y as a linear combination of input variables: * Y = weights * X + intercept. */ -public final class LinearRegressionModel implements IgniteModel, Exportable { +public final class LinearRegressionModel implements IgniteModel, Exportable, + JSONWritable { /** */ private static final long serialVersionUID = -105984600091550226L; /** Multiplier of the objects's vector required to make prediction. */ - private final Vector weights; + private Vector weights; /** Intercept of the linear regression model */ - private final double intercept; + private double intercept; /** */ public LinearRegressionModel(Vector weights, double intercept) { @@ -44,15 +54,41 @@ public LinearRegressionModel(Vector weights, double intercept) { } /** */ - public Vector getWeights() { + private LinearRegressionModel() { + } + + /** */ + public Vector weights() { return weights; } /** */ - public double getIntercept() { + public double intercept() { return intercept; } + /** + * Set up the weights. + * + * @param weights The parameter value. + * @return Model with new weights parameter value. + */ + public LinearRegressionModel withWeights(Vector weights) { + this.weights = weights; + return this; + } + + /** + * Set up the intercept. + * + * @param intercept The parameter value. + * @return Model with new intercept parameter value. + */ + public LinearRegressionModel withIntercept(double intercept) { + this.intercept = intercept; + return this; + } + /** {@inheritDoc} */ @Override public Double predict(Vector input) { return input.dot(weights) + intercept; @@ -108,4 +144,72 @@ public double getIntercept() { @Override public String toString(boolean pretty) { return toString(); } + + /** Loads LinearRegressionModel from JSON file. */ + public static LinearRegressionModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + + LinearRegressionModelJSONExportModel linearRegressionJSONExportModel; + try { + linearRegressionJSONExportModel = mapper + .readValue(new File(path.toAbsolutePath().toString()), LinearRegressionModelJSONExportModel.class); + + return linearRegressionJSONExportModel.convert(); + } catch (IOException e) { + e.printStackTrace(); + } + + return null; + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + try { + LinearRegressionModelJSONExportModel exportModel = new LinearRegressionModelJSONExportModel( + System.currentTimeMillis(), + "linreg_" + UUID.randomUUID().toString(), + LinearRegressionModel.class.getSimpleName() + ); + exportModel.intercept = intercept; + exportModel.weights = weights.asArray(); + + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, exportModel); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** */ + public static class LinearRegressionModelJSONExportModel extends JSONModel { + /** + * Multiplier of the objects's vector required to make prediction. + */ + public double[] weights; + + /** + * Intercept of the linear regression model. + */ + public double intercept; + + /** */ + public LinearRegressionModelJSONExportModel(Long timestamp, String uid, String modelClass) { + super(timestamp, uid, modelClass); + } + + /** */ + @JsonCreator + public LinearRegressionModelJSONExportModel() { + } + + /** {@inheritDoc} */ + @Override public LinearRegressionModel convert() { + LinearRegressionModel linRegMdl = new LinearRegressionModel(); + linRegMdl.withWeights(VectorUtils.of(weights)); + linRegMdl.withIntercept(intercept); + + return linRegMdl; + } + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java index da813fc1953e84..d98267152e3c48 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java @@ -148,8 +148,8 @@ public LinearRegressionSGDTrainer(UpdatesStrategy, Exportable { +public final class LogisticRegressionModel implements IgniteModel, Exportable, + JSONWritable { /** */ private static final long serialVersionUID = -133984600091550776L; @@ -42,6 +53,10 @@ public final class LogisticRegressionModel implements IgniteModel, JSONWritable { + /** Root node. */ + private DecisionTreeNode rootNode; + + /** + * Creates the model. + * + * @param rootNode Root node of the tree. + */ + public DecisionTreeModel(DecisionTreeNode rootNode) { + this.rootNode = rootNode; + } + + /** */ + private DecisionTreeModel() { + + } + + /** Returns the root node. */ + public DecisionTreeNode getRootNode() { + return rootNode; + } + + /** {@inheritDoc} */ + @Override public Double predict(Vector features) { + return rootNode.predict(features); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return toString(false); + } + + /** {@inheritDoc} */ + @Override public String toString(boolean pretty) { + return DecisionTreeTrainer.printTree(rootNode, pretty); + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(DecisionTreeModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(DecisionTreeModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", DecisionTreeModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads DecisionTreeModel from JSON file. */ + public static DecisionTreeModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + DecisionTreeModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, DecisionTreeModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), DecisionTreeModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java index 80036ba4da2d87..8d705e4c9bbe5f 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java @@ -17,11 +17,24 @@ package org.apache.ignite.ml.tree; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.math.primitives.vector.Vector; /** * Base interface for decision tree nodes. */ -public interface DecisionTreeNode extends IgniteModel { +@JsonTypeInfo( use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type") +@JsonSubTypes( + { + @JsonSubTypes.Type(value = DecisionTreeLeafNode.class, name = "leaf"), + @JsonSubTypes.Type(value = DecisionTreeConditionalNode.class, name = "conditional"), + }) +public abstract class DecisionTreeNode implements IgniteModel { + /** + * Empty constructor for serialization needs. + */ + protected DecisionTreeNode() { + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java index 2b259f24cb3d0c..7ae86fcceea19c 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java @@ -31,7 +31,7 @@ * Decision tree regressor based on distributed decision tree trainer that allows to fit trees using row-partitioned * dataset. */ -public class DecisionTreeRegressionTrainer extends DecisionTree { +public class DecisionTreeRegressionTrainer extends DecisionTreeTrainer { /** * Constructs a new decision tree regressor with default impurity function compressor. * diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeTrainer.java similarity index 92% rename from modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java rename to modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeTrainer.java index eb2f1e5e2eba2e..0692ec62ac853d 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeTrainer.java @@ -41,7 +41,7 @@ * * @param Type of impurity measure. */ -public abstract class DecisionTree> extends SingleLabelDatasetTrainer { +public abstract class DecisionTreeTrainer> extends SingleLabelDatasetTrainer { /** Max tree deep. */ int maxDeep; @@ -65,8 +65,8 @@ public abstract class DecisionTree> extends SingleL * @param compressor Impurity function compressor. * @param decisionTreeLeafBuilder Decision tree leaf builder. */ - DecisionTree(int maxDeep, double minImpurityDecrease, StepFunctionCompressor compressor, - DecisionTreeLeafBuilder decisionTreeLeafBuilder) { + DecisionTreeTrainer(int maxDeep, double minImpurityDecrease, StepFunctionCompressor compressor, + DecisionTreeLeafBuilder decisionTreeLeafBuilder) { this.maxDeep = maxDeep; this.minImpurityDecrease = minImpurityDecrease; this.compressor = compressor; @@ -108,7 +108,7 @@ else if (node instanceof DecisionTreeConditionalNode) { } /** {@inheritDoc} */ - @Override public DecisionTreeNode fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, + @Override public DecisionTreeModel fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, Preprocessor preprocessor) { try (Dataset dataset = datasetBuilder.build( envBuilder, @@ -124,13 +124,13 @@ else if (node instanceof DecisionTreeConditionalNode) { } /** {@inheritDoc} */ - @Override public boolean isUpdateable(DecisionTreeNode mdl) { + @Override public boolean isUpdateable(DecisionTreeModel mdl) { return true; } /** {@inheritDoc} */ - @Override public DecisionTree withEnvironmentBuilder(LearningEnvironmentBuilder envBuilder) { - return (DecisionTree)super.withEnvironmentBuilder(envBuilder); + @Override public DecisionTreeTrainer withEnvironmentBuilder(LearningEnvironmentBuilder envBuilder) { + return (DecisionTreeTrainer)super.withEnvironmentBuilder(envBuilder); } /** @@ -143,7 +143,7 @@ else if (node instanceof DecisionTreeConditionalNode) { * @param Type of a value in {@code upstream} data. * @return New model based on new dataset. */ - @Override protected DecisionTreeNode updateModel(DecisionTreeNode mdl, + @Override protected DecisionTreeModel updateModel(DecisionTreeModel mdl, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { @@ -151,8 +151,8 @@ else if (node instanceof DecisionTreeConditionalNode) { } /** */ - public DecisionTreeNode fit(Dataset dataset) { - return split(dataset, e -> true, 0, getImpurityMeasureCalculator(dataset)); + public DecisionTreeModel fit(Dataset dataset) { + return new DecisionTreeModel(split(dataset, e -> true, 0, getImpurityMeasureCalculator(dataset))); } /** diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/NodeData.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/NodeData.java new file mode 100644 index 00000000000000..885a14d788a573 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/NodeData.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.tree; + +import java.util.Map; +import java.util.NavigableMap; + +/** + * Presenting decision tree data in plain manner (For example: from one parquet row filled with NodeData in Spark DT model). + */ +public class NodeData { + /** Id. */ + public int id; + + /** Prediction. */ + public double prediction; + + /** Left child id. */ + public int leftChildId; + + /** Right child id. */ + public int rightChildId; + + /** Threshold. */ + public double threshold; + + /** Feature index. */ + public int featureIdx; + + /** Is leaf node. */ + public boolean isLeafNode; + + /**{@inheritDoc}*/ + @Override public String toString() { + return "NodeData{" + + "id=" + id + + ", prediction=" + prediction + + ", leftChildId=" + leftChildId + + ", rightChildId=" + rightChildId + + ", threshold=" + threshold + + ", featureIdx=" + featureIdx + + ", isLeafNode=" + isLeafNode + + '}'; + } + + /** + * Build tree or sub-tree based on indices and nodes sorted map as a dictionary. + * + * @param nodes The sorted map of nodes. + * @param rootNodeData Root node data. + */ + public static DecisionTreeNode buildTree(Map nodes, + NodeData rootNodeData) { + return rootNodeData.isLeafNode ? new DecisionTreeLeafNode(rootNodeData.prediction) : new DecisionTreeConditionalNode(rootNodeData.featureIdx, + rootNodeData.threshold, + buildTree(nodes, nodes.get(rootNodeData.rightChildId)), + buildTree(nodes, nodes.get(rootNodeData.leftChildId)), + null); + } + + /** + * Builds the DT model by the given sorted map of nodes. + * + * @param nodes The sorted map of nodes. + */ + public static DecisionTreeModel buildDecisionTreeModel(Map nodes) { + DecisionTreeModel mdl = null; + if (!nodes.isEmpty()) { + NodeData rootNodeData = (NodeData)((NavigableMap)nodes).firstEntry().getValue(); + mdl = new DecisionTreeModel(buildTree(nodes, rootNodeData)); + return mdl; + } + return mdl; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java index 1c25f73b352c4d..a2438e517ae013 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java @@ -22,7 +22,7 @@ import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.boosting.GDBLearningStrategy; -import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.GDBModel; import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker; import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator; import org.apache.ignite.ml.dataset.Dataset; @@ -35,7 +35,7 @@ import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.preprocessing.Preprocessor; import org.apache.ignite.ml.trainers.DatasetTrainer; -import org.apache.ignite.ml.tree.DecisionTree; +import org.apache.ignite.ml.tree.DecisionTreeTrainer; import org.apache.ignite.ml.tree.data.DecisionTreeData; import org.apache.ignite.ml.tree.data.DecisionTreeDataBuilder; @@ -57,15 +57,15 @@ public GDBOnTreesLearningStrategy(boolean useIdx) { } /** {@inheritDoc} */ - @Override public List> update(GDBTrainer.GDBModel mdlToUpdate, + @Override public List> update(GDBModel mdlToUpdate, DatasetBuilder datasetBuilder, Preprocessor vectorizer) { LearningEnvironment environment = envBuilder.buildForTrainer(); environment.initDeployingContext(vectorizer); DatasetTrainer, Double> trainer = baseMdlTrainerBuilder.get(); - assert trainer instanceof DecisionTree; - DecisionTree decisionTreeTrainer = (DecisionTree)trainer; + assert trainer instanceof DecisionTreeTrainer; + DecisionTreeTrainer decisionTreeTrainer = (DecisionTreeTrainer)trainer; List> models = initLearningState(mdlToUpdate); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java index fb118ec327e298..ab8db2e5631147 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator; import org.apache.ignite.ml.dataset.Dataset; import org.apache.ignite.ml.dataset.feature.FeatureMeta; @@ -31,7 +30,7 @@ import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector; import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; import org.apache.ignite.ml.environment.LearningEnvironmentBuilder; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.impurity.GiniHistogram; import org.apache.ignite.ml.tree.randomforest.data.impurity.GiniHistogramsComputer; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer; @@ -98,8 +97,8 @@ public RandomForestClassifierTrainer(List meta) { } /** {@inheritDoc} */ - @Override protected ModelsComposition buildComposition(List models) { - return new ModelsComposition(models, new OnMajorityPredictionsAggregator()); + @Override protected RandomForestModel buildComposition(List models) { + return new RandomForestModel(models, new OnMajorityPredictionsAggregator()); } /** {@inheritDoc} */ diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestModel.java new file mode 100644 index 00000000000000..1ae95762b925a0 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestModel.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.tree.randomforest; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; +import org.apache.ignite.ml.composition.predictionsaggregator.PredictionsAggregator; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; + +/** + * Random Forest Model class. + */ +public class RandomForestModel extends ModelsComposition implements JSONWritable { + /** Serial version uid. */ + private static final long serialVersionUID = 3476345240155508004L; + + /** */ + public RandomForestModel() { + super(new ArrayList<>(), new MeanValuePredictionsAggregator()); + + } + + /** */ + public RandomForestModel(List oldModels, PredictionsAggregator predictionsAggregator) { + super(oldModels, predictionsAggregator); + } + + /** + * Returns predictions aggregator. + */ + @Override public PredictionsAggregator getPredictionsAggregator() { + return predictionsAggregator; + } + + /** + * Returns containing models. + */ + @Override public List getModels() { + return models; + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(RandomForestModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(RandomForestModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", RandomForestModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads RandomForestModel from JSON file. */ + public static RandomForestModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + RandomForestModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, RandomForestModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), RandomForestModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java index ab1d0361ee4c78..4b0499f5c8f731 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java @@ -18,10 +18,9 @@ package org.apache.ignite.ml.tree.randomforest; import java.util.List; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer; import org.apache.ignite.ml.tree.randomforest.data.impurity.MSEHistogram; import org.apache.ignite.ml.tree.randomforest.data.impurity.MSEHistogramComputer; @@ -49,8 +48,8 @@ public RandomForestRegressionTrainer(List meta) { } /** {@inheritDoc} */ - @Override protected ModelsComposition buildComposition(List models) { - return new ModelsComposition(models, new MeanValuePredictionsAggregator()); + @Override protected RandomForestModel buildComposition(List models) { + return new RandomForestModel(models, new MeanValuePredictionsAggregator()); } /** {@inheritDoc} */ diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java index fe860ca62866da..481c22b15673b9 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java @@ -30,8 +30,6 @@ import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.ignite.ml.IgniteModel; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.dataset.Dataset; import org.apache.ignite.ml.dataset.DatasetBuilder; import org.apache.ignite.ml.dataset.feature.BucketMeta; @@ -41,14 +39,13 @@ import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector; import org.apache.ignite.ml.dataset.primitive.builder.context.EmptyContextBuilder; import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; -import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.preprocessing.Preprocessor; import org.apache.ignite.ml.trainers.SingleLabelDatasetTrainer; import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies; import org.apache.ignite.ml.tree.randomforest.data.NodeId; import org.apache.ignite.ml.tree.randomforest.data.NodeSplit; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.TreeNode; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityComputer; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer; import org.apache.ignite.ml.tree.randomforest.data.statistics.LeafValuesComputer; @@ -68,7 +65,7 @@ * @param Type of child of RandomForestTrainer using in with-methods. */ public abstract class RandomForestTrainer, - T extends RandomForestTrainer> extends SingleLabelDatasetTrainer { + T extends RandomForestTrainer> extends SingleLabelDatasetTrainer { /** Bucket size factor. */ private static final double BUCKET_SIZE_FACTOR = (1 / 10.0); @@ -110,9 +107,9 @@ public RandomForestTrainer(List meta) { } /** {@inheritDoc} */ - @Override public ModelsComposition fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, - Preprocessor preprocessor) { - List models = null; + @Override public RandomForestModel fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, + Preprocessor preprocessor) { + List models = null; try (Dataset dataset = datasetBuilder.build( envBuilder, new EmptyContextBuilder<>(), @@ -215,9 +212,9 @@ protected boolean init(Dataset datas * @param dataset Dataset. * @return list of decision trees. */ - private List fit(Dataset dataset) { + private List fit(Dataset dataset) { Queue treesQueue = createRootsQueue(); - ArrayList roots = initTrees(treesQueue); + ArrayList roots = initTrees(treesQueue); Map histMeta = computeHistogramMeta(meta, dataset); if (histMeta.isEmpty()) return Collections.emptyList(); @@ -239,20 +236,20 @@ private List fit(Dataset d } /** {@inheritDoc} */ - @Override public boolean isUpdateable(ModelsComposition mdl) { - ModelsComposition fakeComposition = buildComposition(Collections.emptyList()); + @Override public boolean isUpdateable(RandomForestModel mdl) { + RandomForestModel fakeComposition = buildComposition(Collections.emptyList()); return mdl.getPredictionsAggregator().getClass() == fakeComposition.getPredictionsAggregator().getClass(); } /** {@inheritDoc} */ - @Override protected ModelsComposition updateModel(ModelsComposition mdl, DatasetBuilder datasetBuilder, + @Override protected RandomForestModel updateModel(RandomForestModel mdl, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { - ArrayList> oldModels = new ArrayList<>(mdl.getModels()); - ModelsComposition newModels = fit(datasetBuilder, preprocessor); + List oldModels = new ArrayList<>(mdl.getModels()); + RandomForestModel newModels = fit(datasetBuilder, preprocessor); oldModels.addAll(newModels.getModels()); - return new ModelsComposition(oldModels, mdl.getPredictionsAggregator()); + return new RandomForestModel(oldModels, mdl.getPredictionsAggregator()); } /** @@ -297,16 +294,16 @@ private void split(Queue learningQueue, Map nodesToL * @param treesQueue Trees queue. * @return List of trees. */ - protected ArrayList initTrees(Queue treesQueue) { + protected ArrayList initTrees(Queue treesQueue) { assert featuresPerTree > 0; - ArrayList roots = new ArrayList<>(); + ArrayList roots = new ArrayList<>(); List allFeatureIds = IntStream.range(0, meta.size()).boxed().collect(Collectors.toList()); for (TreeNode node : treesQueue) { Collections.shuffle(allFeatureIds, random); Set featuresSubspace = allFeatureIds.stream() .limit(featuresPerTree).collect(Collectors.toSet()); - roots.add(new TreeRoot(node, featuresSubspace)); + roots.add(new RandomForestTreeModel(node, featuresSubspace)); } return roots; @@ -394,6 +391,6 @@ boolean needSplit(TreeNode parentNode, Optional split) { * @param models Models. * @return composition of built trees. */ - protected abstract ModelsComposition buildComposition(List models); + protected abstract RandomForestModel buildComposition(List models); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java index f0ecd628009fe5..a8bc849bb7ab70 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java @@ -29,10 +29,10 @@ public class NodeId implements Serializable { private static final long serialVersionUID = 4400852013136423333L; /** Tree id. */ - private final int treeId; + private int treeId; /** Node id. */ - private final long nodeId; + private long nodeId; /** * Create an instance of NodeId. @@ -45,11 +45,14 @@ public NodeId(int treeId, long nodeId) { this.nodeId = nodeId; } + public NodeId() { + } + /** * * @return Tree id. */ - public int treeId() { + public int getTreeId() { return treeId; } @@ -57,7 +60,7 @@ public int treeId() { * * @return Node id. */ - public long nodeId() { + public long getNodeId() { return nodeId; } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java index 6bdf9a9dce3adb..8146df01638e0a 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java @@ -28,13 +28,16 @@ public class NodeSplit implements Serializable { private static final long serialVersionUID = 1331311529596106124L; /** Feature id in feature vector. */ - private final int featureId; + private int featureId; /** Feature split value. */ - private final double val; + private double val; /** Impurity at this split point. */ - private final double impurity; + private double impurity; + + public NodeSplit() { + } /** * Creates an instance of NodeSplit. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/RandomForestTreeModel.java similarity index 88% rename from modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java rename to modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/RandomForestTreeModel.java index 53a2d66c2278ed..563080ad69774c 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/RandomForestTreeModel.java @@ -27,12 +27,12 @@ /** * Tree root class. */ -public class TreeRoot implements IgniteModel { +public class RandomForestTreeModel implements IgniteModel { /** Serial version uid. */ private static final long serialVersionUID = 531797299171329057L; /** Root node. */ - private TreeNode node; + private TreeNode rootNode; /** Used features. */ private Set usedFeatures; @@ -43,14 +43,17 @@ public class TreeRoot implements IgniteModel { * @param root Root. * @param usedFeatures Used features. */ - public TreeRoot(TreeNode root, Set usedFeatures) { - this.node = root; + public RandomForestTreeModel(TreeNode root, Set usedFeatures) { + this.rootNode = root; this.usedFeatures = usedFeatures; } + public RandomForestTreeModel() { + } + /** {@inheritDoc} */ @Override public Double predict(Vector vector) { - return node.predict(vector); + return rootNode.predict(vector); } /** */ @@ -60,15 +63,15 @@ public Set getUsedFeatures() { /** */ public TreeNode getRootNode() { - return node; + return rootNode; } /** * @return All leafs in tree. */ - public List getLeafs() { + public List leafs() { List res = new ArrayList<>(); - getLeafs(node, res); + leafs(rootNode, res); return res; } @@ -76,12 +79,12 @@ public List getLeafs() { * @param root Root. * @param res Result list. */ - private void getLeafs(TreeNode root, List res) { + private void leafs(TreeNode root, List res) { if (root.getType() == TreeNode.Type.LEAF) res.add(root); else { - getLeafs(root.getLeft(), res); - getLeafs(root.getRight(), res); + leafs(root.getLeft(), res); + leafs(root.getRight(), res); } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java index b373596e8f900b..7a480e60f85870 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java @@ -44,7 +44,7 @@ public enum Type { } /** Id. */ - private final NodeId id; + private NodeId id; /** Feature id. */ private int featureId; @@ -81,6 +81,9 @@ public TreeNode(long id, int treeId) { this.depth = 1; } + public TreeNode() { + } + /** {@inheritDoc} */ @Override public Double predict(Vector features) { assert type != Type.UNKNOWN; @@ -125,8 +128,8 @@ public List toConditional(int featureId, double val) { assert type == Type.UNKNOWN; toLeaf(val); - left = new TreeNode(2 * id.nodeId(), id.treeId()); - right = new TreeNode(2 * id.nodeId() + 1, id.treeId()); + left = new TreeNode(2 * id.getNodeId(), id.getTreeId()); + right = new TreeNode(2 * id.getNodeId() + 1, id.getTreeId()); this.type = Type.CONDITIONAL; this.featureId = featureId; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java index bc22ee1669223c..521b42622a4e02 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java @@ -32,8 +32,8 @@ import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; import org.apache.ignite.ml.tree.randomforest.data.NodeId; import org.apache.ignite.ml.tree.randomforest.data.NodeSplit; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.TreeNode; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; /** * Class containing logic of aggregation impurity statistics within learning dataset. @@ -52,7 +52,7 @@ public abstract class ImpurityHistogramsComputer> aggregateImpurityStatistics(ArrayList roots, + public Map> aggregateImpurityStatistics(ArrayList roots, Map histMeta, Map nodesToLearn, Dataset dataset) { @@ -73,7 +73,7 @@ public Map> aggregateImpurityStatistics(ArrayL * @return Leaf statistics for impurity computing. */ private Map> aggregateImpurityStatisticsOnPartition( - BootstrappedDatasetPartition dataset, ArrayList roots, + BootstrappedDatasetPartition dataset, ArrayList roots, Map histMeta, Map part) { @@ -85,7 +85,7 @@ private Map> aggregateImpurityStatisticsOnPart if (vector.counters()[sampleId] == 0) continue; - TreeRoot root = roots.get(sampleId); + RandomForestTreeModel root = roots.get(sampleId); NodeId key = root.getRootNode().predictNextNodeKey(vector.features()); if (!part.containsKey(key)) //if we didn't take all nodes from learning queue continue; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java index 98c2abacb221df..7c8f7e7dd72eca 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java @@ -30,8 +30,8 @@ import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector; import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; import org.apache.ignite.ml.tree.randomforest.data.NodeId; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.TreeNode; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; /** * Class containing logic of leaf values computing after building of all trees in random forest. @@ -49,11 +49,11 @@ public abstract class LeafValuesComputer implements Serializable { * @param roots Learned trees. * @param dataset Dataset. */ - public void setValuesForLeaves(ArrayList roots, + public void setValuesForLeaves(ArrayList roots, Dataset dataset) { Map leafs = roots.stream() - .flatMap(r -> r.getLeafs().stream()) + .flatMap(r -> r.leafs().stream()) .collect(Collectors.toMap(TreeNode::getId, Function.identity())); Map stats = dataset.compute( @@ -78,7 +78,7 @@ public void setValuesForLeaves(ArrayList roots, * @param data Data. * @return Statistics on labels for each leaf nodes. */ - private Map computeLeafsStatisticsInPartition(ArrayList roots, + private Map computeLeafsStatisticsInPartition(ArrayList roots, Map leafs, BootstrappedDatasetPartition data) { Map res = new HashMap<>(); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java b/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java index 8100f93799fbf7..333ade41754970 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java @@ -34,7 +34,14 @@ /** * Class with various utility methods. */ -public class Utils { +public final class Utils { + /** + * + */ + private Utils(){ + // No-op. + } + /** * Perform deep copy of an object. * diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java index cc652e8e50b691..5c7f8dad799059 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java @@ -54,7 +54,7 @@ public void predictClusters() { Assert.assertEquals(mdl.predict(new DenseVector(new double[]{-1.1, -1.1})), 3.0, PRECISION); Assert.assertEquals(mdl.distanceMeasure(), distanceMeasure); - Assert.assertEquals(mdl.getAmountOfClusters(), 4); - Assert.assertArrayEquals(mdl.getCenters(), centers); + Assert.assertEquals(mdl.amountOfClusters(), 4); + Assert.assertArrayEquals(mdl.centers(), centers); } } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java index 0d35df58b84cbe..ef33acae1a6ed6 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java @@ -83,7 +83,7 @@ public void test() { Integer zeroCentre = mdl.predict(VectorUtils.num2Vec(0.0)); - assertTrue(mdl.getCenters()[zeroCentre].get(0) == 0); + assertTrue(mdl.centers()[zeroCentre].get(0) == 0); } /** diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java index e5170505f08c91..9bd9509255e7dc 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java @@ -32,7 +32,7 @@ import org.apache.ignite.ml.math.functions.IgniteBiFunction; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; -import org.apache.ignite.ml.tree.DecisionTreeConditionalNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; import org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer; import org.junit.Test; @@ -83,7 +83,7 @@ public void testFitRegression() { assertTrue(!composition.toString(true).isEmpty()); assertTrue(!composition.toString(false).isEmpty()); - composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeConditionalNode)); + composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeModel)); assertEquals(2000, composition.getModels().size()); assertTrue(composition.getPredictionsAggregator() instanceof WeightedPredictionsAggregator); @@ -145,7 +145,7 @@ private void testClassifier(BiFunction, assertTrue(mdl instanceof ModelsComposition); ModelsComposition composition = (ModelsComposition)mdl; - composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeConditionalNode)); + composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeModel)); assertTrue(composition.getModels().size() < 500); assertTrue(composition.getPredictionsAggregator() instanceof WeightedPredictionsAggregator); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java index 0be0b5472b255b..40949c493b125b 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java @@ -43,7 +43,7 @@ public class DistanceTest { new BrayCurtisDistance(), new CanberraDistance(), new JensenShannonDistance(), - new WeightedMinkowskiDistance(4, new DenseVector(new double[]{1, 1, 1})), + new WeightedMinkowskiDistance(4, new double[]{1, 1, 1}), new MinkowskiDistance(Math.random())); /** */ @@ -197,9 +197,9 @@ public void weightedMinkowskiDistance() { double precistion = 0.01; int p = 2; double expRes = 5.0; - Vector v = new DenseVector(new double[]{2, 3, 4}); + double[] weights = new double[]{2, 3, 4}; - DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(p, v); + DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(p, weights); assertEquals(expRes, distanceMeasure.compute(v1, data2), precistion); assertEquals(expRes, distanceMeasure.compute(v1, v2), precistion); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java index 1ab93a195b69ad..c6a1d1826d8a3e 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java @@ -72,7 +72,7 @@ public WeightedMinkowskiDistanceTest(TestData testData) { /** */ @Test public void testWeightedMinkowski() { - DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(testData.p, testData.weight); + DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(testData.p, testData.weights); assertEquals(testData.expRes, distanceMeasure.compute(testData.vectorA, testData.vectorB), PRECISION); @@ -87,15 +87,15 @@ private static class TestData { public final Integer p; - public final Vector weight; + public final double[] weights; public final Double expRes; - private TestData(double[] vectorA, double[] vectorB, Integer p, double[] weight, double expRes) { + private TestData(double[] vectorA, double[] vectorB, Integer p, double[] weights, double expRes) { this.vectorA = new DenseVector(vectorA); this.vectorB = new DenseVector(vectorB); this.p = p; - this.weight = new DenseVector(weight); + this.weights = weights; this.expRes = expRes; } @@ -104,7 +104,7 @@ private TestData(double[] vectorA, double[] vectorB, Integer p, double[] weight, Arrays.toString(vectorA.asArray()), Arrays.toString(vectorB.asArray()), p, - Arrays.toString(weight.asArray()), + Arrays.toString(weights), expRes ); } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java index 96c7158da4a9f4..a64651af1e29c5 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java @@ -59,11 +59,11 @@ public void testSmallDataFit() { assertArrayEquals( new double[]{72.26948107, 15.95144674, 24.07403921, 66.73038781}, - mdl.getWeights().getStorage().data(), + mdl.weights().getStorage().data(), 1e-6 ); - assertEquals(2.8421709430404007e-14, mdl.getIntercept(), 1e-6); + assertEquals(2.8421709430404007e-14, mdl.intercept(), 1e-6); } /** @@ -95,9 +95,9 @@ public void testBigDataFit() { new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) ); - assertArrayEquals(coef, mdl.getWeights().getStorage().data(), 1e-6); + assertArrayEquals(coef, mdl.weights().getStorage().data(), 1e-6); - assertEquals(intercept, mdl.getIntercept(), 1e-6); + assertEquals(intercept, mdl.intercept(), 1e-6); } /** */ @@ -142,10 +142,10 @@ public void testUpdate() { vectorizer ); - assertArrayEquals(originalMdl.getWeights().getStorage().data(), updatedOnSameDS.getWeights().getStorage().data(), 1e-6); - assertEquals(originalMdl.getIntercept(), updatedOnSameDS.getIntercept(), 1e-6); + assertArrayEquals(originalMdl.weights().getStorage().data(), updatedOnSameDS.weights().getStorage().data(), 1e-6); + assertEquals(originalMdl.intercept(), updatedOnSameDS.intercept(), 1e-6); - assertArrayEquals(originalMdl.getWeights().getStorage().data(), updatedOnEmptyDS.getWeights().getStorage().data(), 1e-6); - assertEquals(originalMdl.getIntercept(), updatedOnEmptyDS.getIntercept(), 1e-6); + assertArrayEquals(originalMdl.weights().getStorage().data(), updatedOnEmptyDS.weights().getStorage().data(), 1e-6); + assertEquals(originalMdl.intercept(), updatedOnEmptyDS.intercept(), 1e-6); } } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java index 22b16d15829fc5..9f503697697af6 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java @@ -64,11 +64,11 @@ public void testSmallDataFit() { assertArrayEquals( new double[]{72.26948107, 15.95144674, 24.07403921, 66.73038781}, - mdl.getWeights().getStorage().data(), + mdl.weights().getStorage().data(), 1e-1 ); - assertEquals(2.8421709430404007e-14, mdl.getIntercept(), 1e-1); + assertEquals(2.8421709430404007e-14, mdl.intercept(), 1e-1); } /** */ @@ -112,19 +112,19 @@ public void testUpdate() { ); assertArrayEquals( - originalMdl.getWeights().getStorage().data(), - updatedOnSameDS.getWeights().getStorage().data(), + originalMdl.weights().getStorage().data(), + updatedOnSameDS.weights().getStorage().data(), 1.0 ); - assertEquals(originalMdl.getIntercept(), updatedOnSameDS.getIntercept(), 1.0); + assertEquals(originalMdl.intercept(), updatedOnSameDS.intercept(), 1.0); assertArrayEquals( - originalMdl.getWeights().getStorage().data(), - updatedOnEmptyDS.getWeights().getStorage().data(), + originalMdl.weights().getStorage().data(), + updatedOnEmptyDS.weights().getStorage().data(), 1e-1 ); - assertEquals(originalMdl.getIntercept(), updatedOnEmptyDS.getIntercept(), 1e-1); + assertEquals(originalMdl.intercept(), updatedOnEmptyDS.intercept(), 1e-1); } } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java index 7122c6907d104a..bfccc715910672 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.paramgrid.RandomStrategy; import org.apache.ignite.ml.selection.scoring.metric.MetricName; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import org.junit.Test; import static org.apache.ignite.ml.common.TrainerTest.twoLinearlySeparableClasses; @@ -53,7 +53,7 @@ public void testScoreWithGoodDataset() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DebugCrossValidation scoreCalculator = + DebugCrossValidation scoreCalculator = new DebugCrossValidation<>(); Vectorizer vectorizer = new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST); @@ -84,7 +84,7 @@ public void testScoreWithGoodDatasetAndBinaryMetrics() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DebugCrossValidation scoreCalculator = + DebugCrossValidation scoreCalculator = new DebugCrossValidation<>(); int folds = 4; @@ -298,7 +298,7 @@ public void testScoreWithBadDataset() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DebugCrossValidation scoreCalculator = + DebugCrossValidation scoreCalculator = new DebugCrossValidation<>(); int folds = 4; diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java index d64c35ede37af7..1c3f1407f7dfca 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java @@ -72,11 +72,12 @@ public void testFit() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DecisionTreeNode tree = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); + DecisionTreeModel tree = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); - assertTrue(tree instanceof DecisionTreeConditionalNode); + DecisionTreeNode decisionTreeNode = tree.getRootNode(); + assertTrue(decisionTreeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) decisionTreeNode; assertEquals(0, node.getThreshold(), 1e-3); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java index ed7c4fe856d280..e618f634c02340 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java @@ -75,11 +75,11 @@ public void testFit() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0) .withUseIndex(useIdx == 1); - DecisionTreeNode tree = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)); + DecisionTreeNode treeNode = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)).getRootNode(); - assertTrue(tree instanceof DecisionTreeConditionalNode); + assertTrue(treeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode)tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode)treeNode; assertEquals(0, node.getThreshold(), 1e-3); assertEquals(0, node.getCol()); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java index 587dacdc4af2ca..686949f2652a17 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java @@ -78,15 +78,15 @@ public void testFit() { DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(1, 0); - DecisionTreeNode tree = trainer.fit( + DecisionTreeNode treeNode = trainer.fit( ignite, data, new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) - ); + ).getRootNode(); - assertTrue(tree instanceof DecisionTreeConditionalNode); + assertTrue(treeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) treeNode; assertEquals(0, node.getThreshold(), 1e-3); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java index 64663500784450..98e3e7a6f42001 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java @@ -74,11 +74,11 @@ public void testFit() { DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(1, 0) .withUsingIdx(useIdx == 1); - DecisionTreeNode tree = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)); + DecisionTreeNode treeNode = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)).getRootNode(); - assertTrue(tree instanceof DecisionTreeConditionalNode); + assertTrue(treeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) treeNode; assertEquals(0, node.getThreshold(), 1e-3); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java index c94799a76f1537..cb5961dcaa5a00 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java @@ -22,7 +22,6 @@ import java.util.Map; import org.apache.ignite.ml.TestUtils; import org.apache.ignite.ml.common.TrainerTest; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; @@ -56,12 +55,12 @@ public void testFit() { ArrayList meta = new ArrayList<>(); for (int i = 0; i < 4; i++) meta.add(new FeatureMeta("", i, false)); - DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) + DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) .withAmountOfTrees(5) .withFeaturesCountSelectionStrgy(x -> 2) .withEnvironmentBuilder(TestUtils.testEnvBuilder()); - ModelsComposition mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); assertTrue(mdl.getPredictionsAggregator() instanceof OnMajorityPredictionsAggregator); assertEquals(5, mdl.getModels().size()); @@ -84,14 +83,14 @@ public void testUpdate() { ArrayList meta = new ArrayList<>(); for (int i = 0; i < 4; i++) meta.add(new FeatureMeta("", i, false)); - DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) + DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) .withAmountOfTrees(100) .withFeaturesCountSelectionStrgy(x -> 2) .withEnvironmentBuilder(TestUtils.testEnvBuilder()); - ModelsComposition originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); + RandomForestModel originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); Vector v = VectorUtils.of(5, 0.5, 0.05, 0.005); assertEquals(originalMdl.predict(v), updatedOnSameDS.predict(v), 0.01); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java index 8bb0894b1aa9f1..dc2be8536dd0fa 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java @@ -24,7 +24,6 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.util.IgniteUtils; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; @@ -85,7 +84,7 @@ public void testFit() { .withAmountOfTrees(5) .withFeaturesCountSelectionStrgy(x -> 2); - ModelsComposition mdl = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); + RandomForestModel mdl = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); assertTrue(mdl.getPredictionsAggregator() instanceof MeanValuePredictionsAggregator); assertEquals(5, mdl.getModels().size()); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java index 8ea027fa0631dc..d501dbabcda9ca 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java @@ -21,7 +21,6 @@ import java.util.HashMap; import java.util.Map; import org.apache.ignite.ml.common.TrainerTest; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; @@ -58,7 +57,7 @@ public void testFit() { .withAmountOfTrees(5) .withFeaturesCountSelectionStrgy(x -> 2); - ModelsComposition mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); assertTrue(mdl.getPredictionsAggregator() instanceof MeanValuePredictionsAggregator); assertEquals(5, mdl.getModels().size()); } @@ -84,9 +83,9 @@ public void testUpdate() { .withAmountOfTrees(100) .withFeaturesCountSelectionStrgy(x -> 2); - ModelsComposition originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); + RandomForestModel originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); Vector v = VectorUtils.of(5, 0.5, 0.05, 0.005); assertEquals(originalMdl.predict(v), updatedOnSameDS.predict(v), 0.1); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java index 0b199ff05463bc..0550eca187d31e 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java @@ -38,8 +38,8 @@ public void testPredictNextIdCondNodeAtTreeCorner() { TreeNode node = new TreeNode(5, 1); assertEquals(TreeNode.Type.UNKNOWN, node.getType()); - assertEquals(5, node.predictNextNodeKey(features1).nodeId()); - assertEquals(5, node.predictNextNodeKey(features2).nodeId()); + assertEquals(5, node.predictNextNodeKey(features1).getNodeId()); + assertEquals(5, node.predictNextNodeKey(features2).getNodeId()); } /** */ @@ -49,8 +49,8 @@ public void testPredictNextIdForLeaf() { node.toLeaf(0.5); assertEquals(TreeNode.Type.LEAF, node.getType()); - assertEquals(5, node.predictNextNodeKey(features1).nodeId()); - assertEquals(5, node.predictNextNodeKey(features2).nodeId()); + assertEquals(5, node.predictNextNodeKey(features1).getNodeId()); + assertEquals(5, node.predictNextNodeKey(features2).getNodeId()); } /** */ @@ -60,8 +60,8 @@ public void testPredictNextIdForTree() { root.toConditional(0, 0.1); assertEquals(TreeNode.Type.CONDITIONAL, root.getType()); - assertEquals(2, root.predictNextNodeKey(features1).nodeId()); - assertEquals(3, root.predictNextNodeKey(features2).nodeId()); + assertEquals(2, root.predictNextNodeKey(features1).getNodeId()); + assertEquals(3, root.predictNextNodeKey(features2).getNodeId()); } /** */ @@ -69,7 +69,7 @@ public void testPredictNextIdForTree() { public void testPredictProba() { TreeNode root = new TreeNode(1, 1); List leaves = root.toConditional(0, 0.1); - leaves.forEach(leaf -> leaf.toLeaf(leaf.getId().nodeId() % 2)); + leaves.forEach(leaf -> leaf.toLeaf(leaf.getId().getNodeId() % 2)); assertEquals(TreeNode.Type.CONDITIONAL, root.getType()); assertEquals(0.0, root.predict(features1), 0.001); diff --git a/modules/opencensus/pom.xml b/modules/opencensus/pom.xml index 648c2bcd4d91ae..9a3503a0225ced 100644 --- a/modules/opencensus/pom.xml +++ b/modules/opencensus/pom.xml @@ -116,4 +116,27 @@ test + + + + + maven-dependency-plugin + + + copy-libs + package + + copy-dependencies + + + org.apache.ignite + target/libs + runtime + false + + + + + + diff --git a/modules/osgi-karaf/src/main/resources/features.xml b/modules/osgi-karaf/src/main/resources/features.xml index 0ff71e4bd057d1..207941a4f291d4 100644 --- a/modules/osgi-karaf/src/main/resources/features.xml +++ b/modules/osgi-karaf/src/main/resources/features.xml @@ -32,14 +32,14 @@ ignite-core ignite-aop ignite-aws - ignite-camel - ignite-flume + ignite-camel-ext + ignite-flume-ext ignite-indexing ignite-jcl - ignite-jms11 + ignite-jms11-ext ignite-jta ignite-kafka-ext - ignite-mqtt + ignite-mqtt-ext ignite-rest-http @@ -48,7 +48,7 @@ ignite-slf4j ignite-spring ignite-ssh - ignite-twitter + ignite-twitter-ext ignite-urideploy ignite-web ignite-zookeeper @@ -82,7 +82,7 @@ mvn:org.apache.ignite/ignite-aws/${project.version} - +
    camel-core - mvn:org.apache.ignite/ignite-camel/${project.version} + mvn:org.apache.ignite/ignite-camel-ext/${ignite-camel-ext.version}
    - +
    wrap wrap:mvn:org.apache.flume/flume-ng-core/${flume.ng.version}$Bundle-SymbolicName=flume-ng-core&Bundle-Version=${flume.ng.version} - mvn:org.apache.ignite/ignite-flume/${project.version} + mvn:org.apache.ignite/ignite-flume-ext/${ignite-flume-ext.version}
    @@ -139,12 +139,12 @@ mvn:org.apache.ignite/ignite-jcl/${project.version} - +
    mvn:org.apache.geronimo.specs/geronimo-jms_1.1_spec/${jms.spec.version} - mvn:org.apache.ignite/ignite-jms11/${project.version} + mvn:org.apache.ignite/ignite-jms11-ext/${ignite-jms11-ext.version}
    @@ -185,7 +185,7 @@ mvn:org.apache.ignite/ignite-log4j/${project.version} - +
    @@ -193,7 +193,7 @@ mvn:com.google.guava/guava/${guava.version} mvn:org.eclipse.paho/org.eclipse.paho.client.mqttv3/${paho.version} wrap:mvn:com.github.rholder/guava-retrying/${guava.retrying.version}$Bundle-SymbolicName=guava-retrying&Bundle-SymbolicName=guava-retrying&Bundle-Version=${guava.retrying.version} - mvn:org.apache.ignite/ignite-mqtt/${project.version} + mvn:org.apache.ignite/ignite-mqtt-ext/${ignite-mqtt-ext.version}
    @@ -277,7 +277,7 @@ mvn:org.apache.ignite/ignite-ssh/${project.version} - +
    @@ -285,7 +285,7 @@ mvn:com.google.guava/guava/${guava14.version} wrap:mvn:com.twitter/hbc-core/${twitter.hbc.version}$Bundle-SymbolicName=Hosebird Client Core&Bundle-Version=${twitter.hbc.version} wrap:mvn:com.twitter/hbc-twitter4j/${twitter.hbc.version}$Bundle-SymbolicName=Hosebird Client Twitter4J&Bundle-Version=${twitter.hbc.version} - mvn:org.apache.ignite/ignite-twitter/${project.version} + mvn:org.apache.ignite/ignite-twitter-ext/${ignite-twitter-ext.version}
    diff --git a/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h b/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h index 18ba54deb64f28..66f6656a7ddf90 100644 --- a/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h +++ b/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h @@ -402,6 +402,14 @@ namespace ignite // No-op. } + /** + * Destructor. + */ + ~ThreadLocalInstance() + { + Remove(); + } + /** * Get value. * diff --git a/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h b/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h index a4f6f583291042..b1e89164c49dac 100644 --- a/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h +++ b/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h @@ -412,6 +412,14 @@ namespace ignite // No-op. } + /** + * Destructor. + */ + ~ThreadLocalInstance() + { + Remove(); + } + /** * Get value. * diff --git a/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h b/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h index 6ddd27c0b01acf..9c051a3dc89084 100644 --- a/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h +++ b/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h @@ -383,6 +383,9 @@ namespace ignite writer.WriteNull(); else writer.WriteString(schema); + + writer.WriteInt32Array(NULL, 0); // Partitions + writer.WriteInt32(1); // UpdateBatchSize } private: diff --git a/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp b/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp index 1994a0799304f6..e0bddd05326fd5 100644 --- a/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp +++ b/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp @@ -450,6 +450,7 @@ namespace ignite rawWriter.WriteInt64(handle); rawWriter.WriteBool(qry0.GetLocal()); + rawWriter.WriteBool(false); // IncludeExpired event::CacheEntryEventFilterHolderBase& filterOp = qry0.GetFilterHolder(); diff --git a/modules/platforms/cpp/odbc-test/CMakeLists.txt b/modules/platforms/cpp/odbc-test/CMakeLists.txt index 9b87a60f23c73e..3a08f42035dc43 100644 --- a/modules/platforms/cpp/odbc-test/CMakeLists.txt +++ b/modules/platforms/cpp/odbc-test/CMakeLists.txt @@ -65,6 +65,8 @@ set(SOURCES src/teamcity/teamcity_boost.cpp src/authentication_test.cpp src/sql_parsing_test.cpp src/streaming_test.cpp + src/cursor_binding_test.cpp + src/test_server.cpp ../odbc/src/log.cpp ../odbc/src/cursor.cpp ../odbc/src/diagnostic/diagnostic_record.cpp diff --git a/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h b/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h index 2381130385af01..1bdd3b077436d8 100644 --- a/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h +++ b/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h @@ -25,6 +25,16 @@ #include #include +#include + +#ifndef BOOST_TEST_CONTEXT +# define BOOST_TEST_CONTEXT(...) +#endif + +#ifndef BOOST_TEST_INFO +# define BOOST_TEST_INFO(...) +#endif + #include #include "ignite/ignite.h" @@ -97,7 +107,7 @@ namespace ignite /** * Insert requested number of TestType values with all defaults except - * for the strFields, which are generated using getTestString(). + * for the strFields, which are generated using GetTestString(). * * @param recordsNum Number of records to insert. * @param merge Set to true to use merge instead. @@ -130,13 +140,176 @@ namespace ignite */ void InsertNonFullBatchSelect(int recordsNum, int splitAt); + /** + * Get test i8Field. + * + * @param idx Index. + * @return Corresponding i8Field value. + */ + static int8_t GetTestI8Field(int64_t idx); + + /** + * Check i8Field test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestI8Value(int idx, int8_t value); + + /** + * Get test i16Field. + * + * @param idx Index. + * @return Corresponding i16Field value. + */ + static int16_t GetTestI16Field(int64_t idx); + + /** + * Check i16Field test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestI16Value(int idx, int16_t value); + + /** + * Get test i32Field. + * + * @param idx Index. + * @return Corresponding i32Field value. + */ + static int32_t GetTestI32Field(int64_t idx); + + /** + * Check i32Field test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestI32Value(int idx, int32_t value); + /** * Get test string. * - * @param ind Index. + * @param idx Index. * @return Corresponding test string. */ - static std::string getTestString(int64_t ind); + static std::string GetTestString(int64_t idx); + + /** + * Check strField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestStringValue(int idx, const std::string& value); + + /** + * Get test floatField. + * + * @param idx Index. + * @return Corresponding floatField value. + */ + static float GetTestFloatField(int64_t idx); + + /** + * Check floatField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestFloatValue(int idx, float value); + + /** + * Get test doubleField. + * + * @param idx Index. + * @return Corresponding doubleField value. + */ + static double GetTestDoubleField(int64_t idx); + + /** + * Check doubleField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestDoubleValue(int idx, double value); + + /** + * Get test boolField. + * + * @param idx Index. + * @return Corresponding boolField value. + */ + static bool GetTestBoolField(int64_t idx); + + /** + * Check boolField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestBoolValue(int idx, bool value); + + /** + * Get test dateField. + * + * @param idx Index. + * @param val Output value. + */ + static void GetTestDateField(int64_t idx, SQL_DATE_STRUCT& val); + + /** + * Check dateField test value. + * + * @param idx Index. + * @param val Value to test. + */ + static void CheckTestDateValue(int idx, const SQL_DATE_STRUCT& val); + + /** + * Get test timeField. + * + * @param idx Index. + * @param val Output value. + */ + static void GetTestTimeField(int64_t idx, SQL_TIME_STRUCT& val); + + /** + * Check timeField test value. + * + * @param idx Index. + * @param val Value to test. + */ + static void CheckTestTimeValue(int idx, const SQL_TIME_STRUCT& val); + + /** + * Get test timestampField. + * + * @param idx Index. + * @param val Output value. + */ + static void GetTestTimestampField(int64_t idx, SQL_TIMESTAMP_STRUCT& val); + + /** + * Check timestampField test value. + * + * @param idx Index. + * @param val Value to test. + */ + static void CheckTestTimestampValue(int idx, const SQL_TIMESTAMP_STRUCT& val); + + /** + * Get test i8ArrayField. + * + * @param idx Index. + * @param val Output value. + * @param valLen Value length. + */ + static void GetTestI8ArrayField(int64_t idx, int8_t* val, size_t valLen); + + /** + * Check i8ArrayField test value. + * + * @param idx Index. + * @param val Value to test. + * @param valLen Value length. + */ + static void CheckTestI8ArrayValue(int idx, const int8_t* val, size_t valLen); /** * Check that SQL error has expected SQL state. @@ -177,6 +350,14 @@ namespace ignite */ SQLRETURN ExecQuery(const std::string& qry); + /** + * Prepares SQL query. + * + * @param qry Query. + * @return Result. + */ + SQLRETURN PrepareQuery(const std::string& qry); + /** ODBC Environment. */ SQLHENV env; diff --git a/modules/platforms/cpp/odbc-test/include/test_server.h b/modules/platforms/cpp/odbc-test/include/test_server.h new file mode 100644 index 00000000000000..c50cc709c73cee --- /dev/null +++ b/modules/platforms/cpp/odbc-test/include/test_server.h @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _IGNITE_ODBC_TEST_TEST_SERVER +#define _IGNITE_ODBC_TEST_TEST_SERVER + +#include + +#include + +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0601 +#endif // _WIN32_WINNT + +#include +#include + +namespace ignite +{ + +/** + * Test Server Session. + */ +class TestServerSession +{ +public: + /** + * Construct new instance of class. + * @param service Asio service. + * @param responses Responses to provide to requests. + */ + TestServerSession(boost::asio::io_service& service, const std::vector< std::vector >& responses); + + /** + * Get socket. + */ + boost::asio::ip::tcp::socket& GetSocket() + { + return socket; + } + + /** + * Start session. + */ + void Start(); + + /** + * Get response at index. + * @param idx Index. + * @return Response. + */ + const std::vector& GetResponse(size_t idx) const + { + return responses.at(idx); + } + +private: + /** + * Receive next request. + */ + void ReadNextRequest(); + + /** + * Handle received request size. + * @param error Error. + * @param bytesTransferred Bytes transferred. + */ + void HandleRequestSizeReceived(const boost::system::error_code& error, size_t bytesTransferred); + + /** + * Handle received request. + * @param error Error. + * @param bytesTransferred Bytes transferred. + */ + void HandleRequestReceived(const boost::system::error_code& error, size_t bytesTransferred); + + /** + * Handle received request. + * @param error Error. + * @param bytesTransferred Bytes transferred. + */ + void HandleResponseSent(const boost::system::error_code& error, size_t bytesTransferred); + + // The socket used to communicate with the client. + boost::asio::ip::tcp::socket socket; + + // Received requests. + std::vector< std::vector > requests; + + // Responses to provide. + const std::vector< std::vector > responses; + + // Number of requests answered. + size_t requestsResponded; +}; + +/** + * Test Server. + */ +class TestServer +{ +public: + /** + * Constructor. + * @param port TCP port to listen. + */ + TestServer(uint16_t port = 11110); + + /** + * Destructor. + */ + ~TestServer(); + + /** + * Push new handshake response to send. + * @param accept Accept or reject response. + */ + void PushHandshakeResponse(bool accept) + { + std::vector rsp(4 + 1); + rsp[0] = 1; + rsp[4] = accept ? 1 : 0; + + PushResponse(rsp); + } + + /** + * Push new response to send. + * @param resp Response to push. + */ + void PushResponse(const std::vector& resp) + { + responses.push_back(resp); + } + + /** + * Get specified session. + * @param idx Index. + * @return Specified session. + */ + TestServerSession& GetSession(size_t idx = 0) + { + return *sessions.at(idx); + } + + /** + * Start server. + */ + void Start(); + + /** + * Stop server. + */ + void Stop(); + +private: + /** + * Start accepting connections. + */ + void StartAccept(); + + /** + * Handle accepted connection. + * @param session Accepted session. + * @param error Error. + */ + void HandleAccept(boost::shared_ptr session, const boost::system::error_code& error); + + // Service. + boost::asio::io_service service; + + // Acceptor. + boost::asio::ip::tcp::acceptor acceptor; + + // Reponses. + std::vector< std::vector > responses; + + // Sessions. + std::vector< boost::shared_ptr > sessions; + + // Server Thread. + boost::shared_ptr serverThread; +}; + +} // namespace ignite + +#endif //_IGNITE_ODBC_TEST_TEST_SERVER \ No newline at end of file diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj index 53a68b89253123..cfc5ac1687d133 100644 --- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj +++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj @@ -185,6 +185,7 @@ + @@ -209,6 +210,7 @@ + @@ -219,6 +221,7 @@ + @@ -250,4 +253,4 @@ - \ No newline at end of file + diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters index 3af88e20ea2a0b..5d6e787836902f 100644 --- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters +++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters @@ -37,6 +37,9 @@ Code + + Code + Code @@ -190,6 +193,9 @@ Externals + + Code + @@ -210,6 +216,9 @@ Code + + Code + @@ -240,4 +249,4 @@ Configs - \ No newline at end of file + diff --git a/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp b/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp index 9148ba94fde146..8fcb9659abd6c2 100644 --- a/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp @@ -113,7 +113,7 @@ struct ApiRobustnessTestSuiteFixture : public odbc::OdbcTestSuite // Operation is not supported. However, there should be no crash. BOOST_CHECK(ret == SQL_ERROR); - CheckSQLStatementDiagnosticError("HY106"); + CheckSQLStatementDiagnosticError("HYC00"); } /** diff --git a/modules/platforms/cpp/odbc-test/src/connection_test.cpp b/modules/platforms/cpp/odbc-test/src/connection_test.cpp index ee54ed265c2a4f..6d4ed9a8edce17 100644 --- a/modules/platforms/cpp/odbc-test/src/connection_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/connection_test.cpp @@ -15,6 +15,7 @@ * limitations under the License. */ +#include "test_server.h" #ifdef _WIN32 # include #endif @@ -29,7 +30,6 @@ #include "ignite/ignite.h" #include "ignite/ignition.h" -#include "test_type.h" #include "test_utils.h" #include "odbc_test_suite.h" @@ -50,7 +50,7 @@ struct ConnectionTestSuiteFixture: odbc::OdbcTestSuite ConnectionTestSuiteFixture() : OdbcTestSuite() { - StartNode(); + // No-op. } /** @@ -109,6 +109,8 @@ BOOST_FIXTURE_TEST_SUITE(ConnectionTestSuite, ConnectionTestSuiteFixture) BOOST_AUTO_TEST_CASE(TestConnectionRestore) { + StartNode(); + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); // Check that query was successfully executed. @@ -129,4 +131,19 @@ BOOST_AUTO_TEST_CASE(TestConnectionRestore) BOOST_CHECK_EQUAL(ExecQueryAndReturnError(), ""); } +BOOST_AUTO_TEST_CASE(TestConnectionMemoryLeak) +{ + TestServer testServer(11100); + + testServer.PushHandshakeResponse(true); + testServer.Start(); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11100;SCHEMA=cache"); + + ExecQuery("Select * from Test"); + + Disconnect(); + Disconnect(); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp b/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp new file mode 100644 index 00000000000000..d145e5231d0420 --- /dev/null +++ b/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef _WIN32 +# include +#endif + +#include +#include + +#include + +#include +#include + +#include + +#include "ignite/ignite.h" +#include "ignite/ignition.h" +#include "ignite/impl/binary/binary_utils.h" + +#include "test_type.h" +#include "test_utils.h" +#include "odbc_test_suite.h" + +using namespace ignite; +using namespace ignite::cache; +using namespace ignite::cache::query; +using namespace ignite::common; +using namespace ignite_test; + +using namespace boost::unit_test; + +using ignite::impl::binary::BinaryUtils; + +/** + * Test setup fixture. + */ +struct CursorBindingTestSuiteFixture : public odbc::OdbcTestSuite +{ + static Ignite StartAdditionalNode(const char* name) + { + return StartPlatformNode("queries-test.xml", name); + } + + /** + * Constructor. + */ + CursorBindingTestSuiteFixture() : + testCache(0) + { + grid = StartAdditionalNode("NodeMain"); + + testCache = grid.GetCache("cache"); + } + + /** + * Destructor. + */ + virtual ~CursorBindingTestSuiteFixture() + { + // No-op. + } + + /** Node started during the test. */ + Ignite grid; + + /** Test cache instance. */ + Cache testCache; +}; + +BOOST_FIXTURE_TEST_SUITE(CursorBindingTestSuite, CursorBindingTestSuiteFixture) + + +#define CHECK_TEST_VALUES(idx, testIdx) \ + do { \ + BOOST_TEST_CONTEXT("Test idx: " << testIdx) \ + { \ + BOOST_CHECK(RowStatus[idx] == SQL_ROW_SUCCESS || RowStatus[idx] == SQL_ROW_SUCCESS_WITH_INFO); \ + \ + BOOST_CHECK(i8FieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(i16FieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(i32FieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(strFieldsLen[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(floatFields[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(doubleFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(boolFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(dateFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(timeFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(timestampFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(i8ArrayFieldsLen[idx] != SQL_NULL_DATA); \ + \ + int8_t i8Field = static_cast(i8Fields[idx]); \ + int16_t i16Field = static_cast(i16Fields[idx]); \ + int32_t i32Field = static_cast(i32Fields[idx]); \ + std::string strField(reinterpret_cast(&strFields[idx][0]), \ + static_cast(strFieldsLen[idx])); \ + float floatField = static_cast(floatFields[idx]); \ + double doubleField = static_cast(doubleFields[idx]); \ + bool boolField = boolFields[idx] != 0; \ + \ + CheckTestI8Value(testIdx, i8Field); \ + CheckTestI16Value(testIdx, i16Field); \ + CheckTestI32Value(testIdx, i32Field); \ + CheckTestStringValue(testIdx, strField); \ + CheckTestFloatValue(testIdx, floatField); \ + CheckTestDoubleValue(testIdx, doubleField); \ + CheckTestBoolValue(testIdx, boolField); \ + CheckTestDateValue(testIdx, dateFields[idx]); \ + CheckTestTimeValue(testIdx, timeFields[idx]); \ + CheckTestTimestampValue(testIdx, timestampFields[idx]); \ + CheckTestI8ArrayValue(testIdx, reinterpret_cast(i8ArrayFields[idx]), \ + static_cast(i8ArrayFieldsLen[idx])); \ + } \ + } while (false) + +BOOST_AUTO_TEST_CASE(TestCursorBindingColumnWise) +{ + enum { ROWS_COUNT = 15 }; + enum { ROW_ARRAY_SIZE = 10 }; + enum { BUFFER_SIZE = 1024 }; + + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=8"); + + // Preloading data. + + InsertTestBatch(0, ROWS_COUNT, ROWS_COUNT); + + // Setting attributes. + + SQLUSMALLINT RowStatus[ROW_ARRAY_SIZE]; + SQLUINTEGER NumRowsFetched; + + SQLRETURN ret; + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_BIND_TYPE, SQL_BIND_BY_COLUMN, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_ARRAY_SIZE, reinterpret_cast(ROW_ARRAY_SIZE), 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_STATUS_PTR, RowStatus, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROWS_FETCHED_PTR, &NumRowsFetched, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + // Binding collumns. + + SQLSCHAR i8Fields[ROW_ARRAY_SIZE] = {0}; + SQLLEN i8FieldsInd[ROW_ARRAY_SIZE]; + + SQLSMALLINT i16Fields[ROW_ARRAY_SIZE] = {0}; + SQLLEN i16FieldsInd[ROW_ARRAY_SIZE]; + + SQLINTEGER i32Fields[ROW_ARRAY_SIZE] = {0}; + SQLLEN i32FieldsInd[ROW_ARRAY_SIZE]; + + SQLCHAR strFields[ROW_ARRAY_SIZE][BUFFER_SIZE]; + SQLLEN strFieldsLen[ROW_ARRAY_SIZE]; + + SQLREAL floatFields[ROW_ARRAY_SIZE]; + SQLLEN floatFieldsInd[ROW_ARRAY_SIZE]; + + SQLDOUBLE doubleFields[ROW_ARRAY_SIZE]; + SQLLEN doubleFieldsInd[ROW_ARRAY_SIZE]; + + SQLCHAR boolFields[ROW_ARRAY_SIZE]; + SQLLEN boolFieldsInd[ROW_ARRAY_SIZE]; + + SQL_DATE_STRUCT dateFields[ROW_ARRAY_SIZE]; + SQLLEN dateFieldsInd[ROW_ARRAY_SIZE]; + + SQL_TIME_STRUCT timeFields[ROW_ARRAY_SIZE]; + SQLLEN timeFieldsInd[ROW_ARRAY_SIZE]; + + SQL_TIMESTAMP_STRUCT timestampFields[ROW_ARRAY_SIZE]; + SQLLEN timestampFieldsInd[ROW_ARRAY_SIZE]; + + SQLCHAR i8ArrayFields[ROW_ARRAY_SIZE][BUFFER_SIZE]; + SQLLEN i8ArrayFieldsLen[ROW_ARRAY_SIZE]; + + ret = SQLBindCol(stmt, 1, SQL_C_STINYINT, i8Fields, 0, i8FieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 2, SQL_C_SSHORT, i16Fields, 0, i16FieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 3, SQL_C_LONG, i32Fields, 0, i32FieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 4, SQL_C_CHAR, strFields, BUFFER_SIZE, strFieldsLen); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 5, SQL_C_FLOAT, floatFields, 0, floatFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 6, SQL_C_DOUBLE, doubleFields, 0, doubleFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 7, SQL_C_BIT, boolFields, 0, boolFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 8, SQL_C_TYPE_DATE, dateFields, 0, dateFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 9, SQL_C_TYPE_TIME, timeFields, 0, timeFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 10, SQL_C_TYPE_TIMESTAMP, timestampFields, 0, timestampFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 11, SQL_C_BINARY, i8ArrayFields, BUFFER_SIZE, i8ArrayFieldsLen); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + SQLCHAR sql[] = "SELECT " + "i8Field, i16Field, i32Field, strField, floatField, doubleField, " + "boolField, dateField, timeField, timestampField, i8ArrayField " + "FROM TestType " + "ORDER BY _key"; + + // Execute a statement to retrieve rows from the Orders table. + ret = SQLExecDirect(stmt, sql, SQL_NTS); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFetchScroll(stmt, SQL_FETCH_NEXT, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(NumRowsFetched, (SQLUINTEGER)ROW_ARRAY_SIZE); + + for (int64_t i = 0; i < NumRowsFetched; i++) + { + CHECK_TEST_VALUES(i, static_cast(i)); + } + + ret = SQLFetch(stmt); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(NumRowsFetched, ROWS_COUNT - ROW_ARRAY_SIZE); + + for (int64_t i = 0; i < NumRowsFetched; i++) + { + int64_t testIdx = i + ROW_ARRAY_SIZE; + CHECK_TEST_VALUES(i, static_cast(testIdx)); + } + + for (int64_t i = NumRowsFetched; i < ROW_ARRAY_SIZE; i++) + { + BOOST_TEST_INFO("Checking row status for row: " << i); + BOOST_CHECK(RowStatus[i] == SQL_ROW_NOROW); + } + + ret = SQLFetchScroll(stmt, SQL_FETCH_NEXT, 0); + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + + // Close the cursor. + ret = SQLCloseCursor(stmt); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); +} + +BOOST_AUTO_TEST_CASE(TestCursorBindingRowWise) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=8"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_BIND_TYPE, reinterpret_cast(42), 0); + + BOOST_CHECK_EQUAL(ret, SQL_ERROR); + + CheckSQLStatementDiagnosticError("HYC00"); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp index bd6b31a47b4a16..04f76928b50821 100644 --- a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp @@ -161,6 +161,91 @@ BOOST_AUTO_TEST_CASE(TestGetTypeInfoAllTypes) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); } +BOOST_AUTO_TEST_CASE(TestDateTypeColumnAttributeCurdate) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select CURDATE()"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestDateTypeColumnAttributeLiteral) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select DATE '2020-10-25'"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestDateTypeColumnAttributeField) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select CAST (dateField as DATE) from TestType"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestTimeTypeColumnAttributeLiteral) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select TIME '12:42:13'"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestTimeTypeColumnAttributeField) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select timeField from TestType"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_TIME); +} + BOOST_AUTO_TEST_CASE(TestColAttributesColumnLength) { Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); @@ -216,6 +301,101 @@ BOOST_AUTO_TEST_CASE(TestColAttributesColumnScale) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); } +BOOST_AUTO_TEST_CASE(TestColAttributesColumnLengthPrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + InsertTestStrings(1); + + SQLCHAR req[] = "select strField from TestType"; + SQLPrepare(stmt, req, SQL_NTS); + + SQLLEN intVal; + SQLCHAR strBuf[1024]; + SQLSMALLINT strLen; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_COLUMN_LENGTH, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, 1, SQL_COLUMN_LENGTH, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); +} + +BOOST_AUTO_TEST_CASE(TestColAttributesColumnPresicionPrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + InsertTestStrings(1); + + SQLCHAR req[] = "select strField from TestType"; + SQLPrepare(stmt, req, SQL_NTS); + + SQLLEN intVal; + SQLCHAR strBuf[1024]; + SQLSMALLINT strLen; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_COLUMN_PRECISION, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, 1, SQL_COLUMN_PRECISION, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); +} + +BOOST_AUTO_TEST_CASE(TestColAttributesColumnScalePrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + InsertTestStrings(1); + + SQLCHAR req[] = "select strField from TestType"; + SQLPrepare(stmt, req, SQL_NTS); + + SQLLEN intVal; + SQLCHAR strBuf[1024]; + SQLSMALLINT strLen; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_COLUMN_SCALE, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, 1, SQL_COLUMN_SCALE, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); +} + BOOST_AUTO_TEST_CASE(TestGetDataWithGetTypeInfo) { Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); @@ -457,4 +637,37 @@ BOOST_AUTO_TEST_CASE(TestDdlColumnsMetaEscaped) BOOST_REQUIRE_EQUAL(ret, SQL_NO_DATA); } +BOOST_AUTO_TEST_CASE(TestSQLNumResultColsAfterSQLPrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=PUBLIC"); + + SQLRETURN ret = ExecQuery("create table TestSqlPrepare(id int primary key, test1 varchar, test2 long, test3 varchar)"); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFreeStmt(stmt, SQL_CLOSE); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = PrepareQuery("select * from PUBLIC.TestSqlPrepare"); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + SQLSMALLINT columnCount = 0; + + ret = SQLNumResultCols(stmt, &columnCount); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(columnCount, 4); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + columnCount = 0; + + ret = SQLNumResultCols(stmt, &columnCount); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(columnCount, 4); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp index 68ea3164106e49..4a08a184678a66 100644 --- a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp +++ b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp @@ -95,9 +95,7 @@ namespace ignite outstr, sizeof(outstr), &outstrlen, SQL_DRIVER_COMPLETE); if (!SQL_SUCCEEDED(ret)) - { BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_DBC, dbc)); - } // Allocate a statement handle SQLAllocHandle(SQL_HANDLE_STMT, dbc, &stmt); @@ -184,15 +182,182 @@ namespace ignite Ignition::StopAll(true); } - std::string OdbcTestSuite::getTestString(int64_t ind) + int8_t OdbcTestSuite::GetTestI8Field(int64_t idx) + { + return static_cast(idx * 8); + } + + void OdbcTestSuite::CheckTestI8Value(int idx, int8_t value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestI8Field(idx)); + } + + int16_t OdbcTestSuite::GetTestI16Field(int64_t idx) + { + return static_cast(idx * 16); + } + + void OdbcTestSuite::CheckTestI16Value(int idx, int16_t value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestI16Field(idx)); + } + + int32_t OdbcTestSuite::GetTestI32Field(int64_t idx) + { + return static_cast(idx * 32); + } + + void OdbcTestSuite::CheckTestI32Value(int idx, int32_t value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestI32Field(idx)); + } + + std::string OdbcTestSuite::GetTestString(int64_t idx) { std::stringstream builder; - builder << "String#" << ind; + builder << "String#" << idx; return builder.str(); } + void OdbcTestSuite::CheckTestStringValue(int idx, const std::string &value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestString(idx)); + } + + float OdbcTestSuite::GetTestFloatField(int64_t idx) + { + return static_cast(idx * 0.5f); + } + + void OdbcTestSuite::CheckTestFloatValue(int idx, float value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestFloatField(idx)); + } + + double OdbcTestSuite::GetTestDoubleField(int64_t idx) + { + return static_cast(idx * 0.25f); + } + + void OdbcTestSuite::CheckTestDoubleValue(int idx, double value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestDoubleField(idx)); + } + + bool OdbcTestSuite::GetTestBoolField(int64_t idx) + { + return static_cast(idx % 2 == 0); + } + + void OdbcTestSuite::CheckTestBoolValue(int idx, bool value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestBoolField(idx)); + } + + void OdbcTestSuite::GetTestDateField(int64_t idx, SQL_DATE_STRUCT& val) + { + val.year = static_cast(2017 + idx / 365); + val.month = static_cast(((idx / 28) % 12) + 1); + val.day = static_cast((idx % 28) + 1); + } + + void OdbcTestSuite::CheckTestDateValue(int idx, const SQL_DATE_STRUCT& val) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + SQL_DATE_STRUCT expected; + GetTestDateField(idx, expected); + + BOOST_CHECK_EQUAL(val.year, expected.year); + BOOST_CHECK_EQUAL(val.month, expected.month); + BOOST_CHECK_EQUAL(val.day, expected.day); + } + } + + void OdbcTestSuite::GetTestTimeField(int64_t idx, SQL_TIME_STRUCT& val) + { + val.hour = (idx / 3600) % 24; + val.minute = (idx / 60) % 60; + val.second = idx % 60; + } + + void OdbcTestSuite::CheckTestTimeValue(int idx, const SQL_TIME_STRUCT& val) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + SQL_TIME_STRUCT expected; + GetTestTimeField(idx, expected); + + BOOST_CHECK_EQUAL(val.hour, expected.hour); + BOOST_CHECK_EQUAL(val.minute, expected.minute); + BOOST_CHECK_EQUAL(val.second, expected.second); + } + } + + void OdbcTestSuite::GetTestTimestampField(int64_t idx, SQL_TIMESTAMP_STRUCT& val) + { + SQL_DATE_STRUCT date; + GetTestDateField(idx, date); + + SQL_TIME_STRUCT time; + GetTestTimeField(idx, time); + + val.year = date.year; + val.month = date.month; + val.day = date.day; + val.hour = time.hour; + val.minute = time.minute; + val.second = time.second; + val.fraction = static_cast(std::abs(idx * 914873)) % 1000000000; + } + + void OdbcTestSuite::CheckTestTimestampValue(int idx, const SQL_TIMESTAMP_STRUCT& val) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + SQL_TIMESTAMP_STRUCT expected; + GetTestTimestampField(idx, expected); + + BOOST_CHECK_EQUAL(val.year, expected.year); + BOOST_CHECK_EQUAL(val.month, expected.month); + BOOST_CHECK_EQUAL(val.day, expected.day); + BOOST_CHECK_EQUAL(val.hour, expected.hour); + BOOST_CHECK_EQUAL(val.minute, expected.minute); + BOOST_CHECK_EQUAL(val.second, expected.second); + BOOST_CHECK_EQUAL(val.fraction, expected.fraction); + } + } + + void OdbcTestSuite::GetTestI8ArrayField(int64_t idx, int8_t* val, size_t valLen) + { + for (size_t j = 0; j < valLen; ++j) + val[j] = static_cast(idx * valLen + j); + } + + void OdbcTestSuite::CheckTestI8ArrayValue(int idx, const int8_t* val, size_t valLen) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + common::FixedSizeArray expected(static_cast(valLen)); + GetTestI8ArrayField(idx, expected.GetData(), expected.GetSize()); + + for (size_t j = 0; j < valLen; ++j) + { + BOOST_TEST_INFO("Byte index: " << j); + BOOST_CHECK_EQUAL(val[j], expected[(int32_t)j]); + } + } + } + void OdbcTestSuite::CheckSQLDiagnosticError(int16_t handleType, SQLHANDLE handle, const std::string& expectSqlState) { SQLCHAR state[ODBC_BUFFER_SIZE]; @@ -227,7 +392,14 @@ namespace ignite { std::vector sql = MakeQuery(qry); - return SQLExecDirect(stmt, &sql[0], static_cast(sql.size())); + return SQLExecDirect(stmt, sql.data(), static_cast(sql.size())); + } + + SQLRETURN OdbcTestSuite::PrepareQuery(const std::string& qry) + { + std::vector sql = MakeQuery(qry); + + return SQLPrepare(stmt, sql.data(), static_cast(sql.size())); } void OdbcTestSuite::InsertTestStrings(int recordsNum, bool merge) @@ -260,7 +432,7 @@ namespace ignite for (SQLSMALLINT i = 0; i < recordsNum; ++i) { key = i + 1; - std::string val = getTestString(i); + std::string val = GetTestString(i); strncpy(strField, val.c_str(), sizeof(strField)); strFieldLen = SQL_NTS; @@ -335,36 +507,23 @@ namespace ignite int seed = from + i; keys[i] = seed; - i8Fields[i] = seed * 8; - i16Fields[i] = seed * 16; - i32Fields[i] = seed * 32; + i8Fields[i] = GetTestI8Field(seed); + i16Fields[i] = GetTestI16Field(seed); + i32Fields[i] = GetTestI32Field(seed); - std::string val = getTestString(seed); + std::string val = GetTestString(seed); strncpy(strFields.GetData() + 1024 * i, val.c_str(), 1023); strFieldsLen[i] = val.size(); - floatFields[i] = seed * 0.5f; - doubleFields[i] = seed * 0.25f; - boolFields[i] = seed % 2 == 0; - - dateFields[i].year = 2017 + seed / 365; - dateFields[i].month = ((seed / 28) % 12) + 1; - dateFields[i].day = (seed % 28) + 1; - - timeFields[i].hour = (seed / 3600) % 24; - timeFields[i].minute = (seed / 60) % 60; - timeFields[i].second = seed % 60; + floatFields[i] = GetTestFloatField(seed); + doubleFields[i] = GetTestDoubleField(seed); + boolFields[i] = GetTestBoolField(seed); - timestampFields[i].year = dateFields[i].year; - timestampFields[i].month = dateFields[i].month; - timestampFields[i].day = dateFields[i].day; - timestampFields[i].hour = timeFields[i].hour; - timestampFields[i].minute = timeFields[i].minute; - timestampFields[i].second = timeFields[i].second; - timestampFields[i].fraction = static_cast(std::abs(seed * 914873)) % 1000000000; + GetTestDateField(seed, dateFields[i]); + GetTestTimeField(seed, timeFields[i]); + GetTestTimestampField(seed, timestampFields[i]); - for (int j = 0; j < 42; ++j) - i8ArrayFields[i * 42 + j] = seed * 42 + j; + GetTestI8ArrayField(seed, &i8ArrayFields[i*42], 42); i8ArrayFieldsLen[i] = 42; } @@ -425,19 +584,19 @@ namespace ignite BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_TEST_CHECKPOINT("Binding dateFields"); - ret = SQLBindParameter(stmt, 9, SQL_PARAM_INPUT, SQL_C_DATE, SQL_DATE, 0, 0, dateFields.GetData(), 0, 0); + ret = SQLBindParameter(stmt, 9, SQL_PARAM_INPUT, SQL_C_TYPE_DATE, SQL_TYPE_DATE, 0, 0, dateFields.GetData(), 0, 0); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_TEST_CHECKPOINT("Binding timeFields"); - ret = SQLBindParameter(stmt, 10, SQL_PARAM_INPUT, SQL_C_TIME, SQL_TIME, 0, 0, timeFields.GetData(), 0, 0); + ret = SQLBindParameter(stmt, 10, SQL_PARAM_INPUT, SQL_C_TYPE_TIME, SQL_TYPE_TIME, 0, 0, timeFields.GetData(), 0, 0); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_TEST_CHECKPOINT("Binding timestampFields"); - ret = SQLBindParameter(stmt, 11, SQL_PARAM_INPUT, SQL_C_TIMESTAMP, SQL_TIMESTAMP, 0, 0, timestampFields.GetData(), 0, 0); + ret = SQLBindParameter(stmt, 11, SQL_PARAM_INPUT, SQL_C_TYPE_TIMESTAMP, SQL_TYPE_TIMESTAMP, 0, 0, timestampFields.GetData(), 0, 0); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); @@ -546,7 +705,7 @@ namespace ignite if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum; BOOST_CHECK_EQUAL(key, expectedKey); @@ -627,7 +786,7 @@ namespace ignite if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum; BOOST_CHECK_EQUAL(key, expectedKey); diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index 46fed8a75ae2ec..60333ff32c5156 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -748,15 +748,15 @@ BOOST_AUTO_TEST_CASE(TestNullFields) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - ret = SQLBindCol(stmt, 9, SQL_C_DATE, &dateColumn, 0, &columnLens[8]); + ret = SQLBindCol(stmt, 9, SQL_C_TYPE_DATE, &dateColumn, 0, &columnLens[8]); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - ret = SQLBindCol(stmt, 10, SQL_C_TIME, &timeColumn, 0, &columnLens[9]); + ret = SQLBindCol(stmt, 10, SQL_C_TYPE_TIME, &timeColumn, 0, &columnLens[9]); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - ret = SQLBindCol(stmt, 11, SQL_C_TIMESTAMP, ×tampColumn, 0, &columnLens[10]); + ret = SQLBindCol(stmt, 11, SQL_C_TYPE_TIMESTAMP, ×tampColumn, 0, &columnLens[10]); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); @@ -922,7 +922,7 @@ BOOST_AUTO_TEST_CASE(TestInsertSelect) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum + 1; BOOST_CHECK_EQUAL(key, expectedKey); @@ -999,7 +999,7 @@ BOOST_AUTO_TEST_CASE(TestInsertUpdateSelect) if (expectedKey == 42) expectedStr = "Updated value"; else - expectedStr = getTestString(selectedRecordsNum); + expectedStr = GetTestString(selectedRecordsNum); BOOST_CHECK_EQUAL(std::string(strField, strFieldLen), expectedStr); @@ -1066,7 +1066,7 @@ BOOST_AUTO_TEST_CASE(TestInsertDeleteSelect) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); int64_t expectedKey = (selectedRecordsNum + 1) * 2; - std::string expectedStr = getTestString(expectedKey - 1); + std::string expectedStr = GetTestString(expectedKey - 1); BOOST_CHECK_EQUAL(key, expectedKey); BOOST_CHECK_EQUAL(std::string(strField, strFieldLen), expectedStr); @@ -1127,7 +1127,7 @@ BOOST_AUTO_TEST_CASE(TestInsertMergeSelect) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum + 1; BOOST_CHECK_EQUAL(key, expectedKey); @@ -1629,7 +1629,7 @@ BOOST_AUTO_TEST_CASE(TestErrorMessage) BOOST_AUTO_TEST_CASE(TestAffectedRows) { - Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=1024"); const int recordsNum = 100; @@ -1670,7 +1670,41 @@ BOOST_AUTO_TEST_CASE(TestAffectedRows) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - BOOST_CHECK_EQUAL(affected, 0); + BOOST_CHECK_EQUAL(affected, 1024); +} + +BOOST_AUTO_TEST_CASE(TestAffectedRowsOnSelect) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=123"); + + const int recordsNum = 1000; + + // Inserting values. + InsertTestStrings(recordsNum); + + // Just selecting everything to make sure everything is OK + SQLCHAR selectReq[] = "SELECT _key, strField FROM TestType ORDER BY _key"; + + SQLRETURN ret = SQLExecDirect(stmt, selectReq, sizeof(selectReq)); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + for (int i = 0; i < 200; ++i) + { + SQLLEN affected = -1; + ret = SQLRowCount(stmt, &affected); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(affected, 123); + + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + } } BOOST_AUTO_TEST_CASE(TestMultipleSelects) diff --git a/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp b/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp index 9d2c2ab1b67c05..af46057159fc42 100644 --- a/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp +++ b/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp @@ -253,7 +253,7 @@ namespace ignite { SQL_DATE_STRUCT res; - CheckSingleResult0(request, SQL_C_DATE, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_DATE, &res, 0, 0); } template<> @@ -261,7 +261,7 @@ namespace ignite { SQL_TIMESTAMP_STRUCT res; - CheckSingleResult0(request, SQL_C_TIMESTAMP, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIMESTAMP, &res, 0, 0); } template<> @@ -269,7 +269,7 @@ namespace ignite { SQL_TIME_STRUCT res; - CheckSingleResult0(request, SQL_C_TIME, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIME, &res, 0, 0); } template<> @@ -305,7 +305,7 @@ namespace ignite { SQL_DATE_STRUCT res; - CheckSingleResult0(request, SQL_C_DATE, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_DATE, &res, 0, 0); using ignite::impl::binary::BinaryUtils; Date actual = common::MakeDateGmt(res.year, res.month, res.day); @@ -317,7 +317,7 @@ namespace ignite { SQL_TIMESTAMP_STRUCT res; - CheckSingleResult0(request, SQL_C_TIMESTAMP, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIMESTAMP, &res, 0, 0); using ignite::impl::binary::BinaryUtils; Timestamp actual = common::MakeTimestampGmt(res.year, res.month, res.day, res.hour, res.minute, res.second, res.fraction); @@ -331,7 +331,7 @@ namespace ignite { SQL_TIME_STRUCT res; - CheckSingleResult0(request, SQL_C_TIME, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIME, &res, 0, 0); using ignite::impl::binary::BinaryUtils; Time actual = common::MakeTimeGmt(res.hour, res.minute, res.second); diff --git a/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp index e2fbdf6a58754a..65feef06a6da95 100644 --- a/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp @@ -304,4 +304,128 @@ BOOST_AUTO_TEST_CASE(TestTimeInsert) BOOST_REQUIRE_EQUAL(out.timeField.GetSeconds(), expected.GetSeconds()); } +void FetchAndCheckDate(SQLHSTMT stmt, const std::string& req, SQLSMALLINT dataType) +{ + std::vector req0(req.begin(), req.end()); + req0.push_back(0); + + SQLExecDirect(stmt, &req0[0], SQL_NTS); + + SQL_DATE_STRUCT res; + + memset(&res, 0, sizeof(res)); + + SQLLEN resLen = 0; + SQLRETURN ret = SQLBindCol(stmt, 1, dataType, &res, 0, &resLen); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(res.day, 25); + BOOST_CHECK_EQUAL(res.month, 10); + BOOST_CHECK_EQUAL(res.year, 2020); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralDate) +{ + FetchAndCheckDate(stmt, "select DATE '2020-10-25'", SQL_C_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralDateLegacy) +{ + FetchAndCheckDate(stmt, "select DATE '2020-10-25'", SQL_C_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldDateAsDate) +{ + TestType val1; + val1.dateField = common::MakeDateGmt(2020, 10, 25); + + testCache.Put(1, val1); + + FetchAndCheckDate(stmt, "select CAST (dateField as DATE) from TestType", SQL_C_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldDateAsDateLegacy) +{ + TestType val1; + val1.dateField = common::MakeDateGmt(2020, 10, 25); + + testCache.Put(1, val1); + + FetchAndCheckDate(stmt, "select CAST (dateField as DATE) from TestType", SQL_C_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldDateAsIs) +{ + TestType val1; + val1.dateField = common::MakeDateGmt(2020, 10, 25); + + testCache.Put(1, val1); + + FetchAndCheckDate(stmt, "select dateField from TestType", SQL_C_TYPE_DATE); +} + +void FetchAndCheckTime(SQLHSTMT stmt, const std::string& req, SQLSMALLINT dataType) +{ + std::vector req0(req.begin(), req.end()); + req0.push_back(0); + + SQLExecDirect(stmt, &req0[0], SQL_NTS); + + SQL_TIME_STRUCT res; + + memset(&res, 0, sizeof(res)); + + SQLLEN resLen = 0; + SQLRETURN ret = SQLBindCol(stmt, 1, dataType, &res, 0, &resLen); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(res.hour, 12); + BOOST_CHECK_EQUAL(res.minute, 42); + BOOST_CHECK_EQUAL(res.second, 13); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralTime) +{ + FetchAndCheckTime(stmt, "select TIME '12:42:13'", SQL_C_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralTimeLegacy) +{ + FetchAndCheckTime(stmt, "select TIME '12:42:13'", SQL_C_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldTimeAsIs) +{ + TestType val1; + val1.timeField = common::MakeTimeGmt(12, 42, 13); + + testCache.Put(1, val1); + + FetchAndCheckTime(stmt, "select timeField from TestType", SQL_C_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldTimeAsIsLegacy) +{ + TestType val1; + val1.timeField = common::MakeTimeGmt(12, 42, 13); + + testCache.Put(1, val1); + + FetchAndCheckTime(stmt, "select timeField from TestType", SQL_C_TIME); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/streaming_test.cpp b/modules/platforms/cpp/odbc-test/src/streaming_test.cpp index 1d74338a8ef944..d863e3763d8644 100644 --- a/modules/platforms/cpp/odbc-test/src/streaming_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/streaming_test.cpp @@ -115,7 +115,7 @@ struct StreamingTestSuiteFixture : odbc::OdbcTestSuite for (int32_t i = begin; i < end; ++i) { key = i; - std::string val = getTestString(i); + std::string val = GetTestString(i); strncpy(strField, val.c_str(), sizeof(strField)); strFieldLen = SQL_NTS; @@ -240,7 +240,7 @@ struct StreamingTestSuiteFixture : odbc::OdbcTestSuite BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_CHECK_EQUAL(i, keyVal); - BOOST_CHECK_EQUAL(getTestString(i), std::string(strField, static_cast(strFieldLen))); + BOOST_CHECK_EQUAL(GetTestString(i), std::string(strField, static_cast(strFieldLen))); } // Resetting parameters. diff --git a/modules/platforms/cpp/odbc-test/src/test_server.cpp b/modules/platforms/cpp/odbc-test/src/test_server.cpp new file mode 100644 index 00000000000000..cc6f877b3634c5 --- /dev/null +++ b/modules/platforms/cpp/odbc-test/src/test_server.cpp @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4355) +#endif //_MSC_VER + +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0601 +#endif // _WIN32_WINNT + +#include +#include +#include + +#ifdef _MSC_VER +# pragma warning(pop) +#endif //_MSC_VER + +#include +#include + +#include "test_server.h" + +namespace ignite +{ + +TestServerSession::TestServerSession(boost::asio::io_service& service, const std::vector< std::vector >& responses) : + socket(service), + responses(responses), + requestsResponded(0) +{ + // No-op. +} + +void TestServerSession::Start() +{ + ReadNextRequest(); +} + +void TestServerSession::ReadNextRequest() +{ + requests.push_back(std::vector()); + + std::vector& newRequest = requests.back(); + newRequest.resize(4); + + async_read(socket, boost::asio::buffer(newRequest.data(), newRequest.size()), + boost::bind(&TestServerSession::HandleRequestSizeReceived, this, + boost::asio::placeholders::error, + boost::asio::placeholders::bytes_transferred)); +} + +void TestServerSession::HandleRequestSizeReceived(const boost::system::error_code& error, size_t bytesTransferred) +{ + if (error || bytesTransferred != 4) + { + socket.close(); + + return; + } + + std::vector& newRequest = requests.back(); + impl::interop::InteropUnpooledMemory mem(4); + mem.Length(4); + + memcpy(mem.Data(), newRequest.data(), newRequest.size()); + int32_t size = impl::binary::BinaryUtils::ReadInt32(mem, 0); + + newRequest.resize(4 + size); + + async_read(socket, boost::asio::buffer(newRequest.data() + 4, size), + boost::bind(&TestServerSession::HandleRequestReceived, this, + boost::asio::placeholders::error, + boost::asio::placeholders::bytes_transferred)); +} + +void TestServerSession::HandleRequestReceived(const boost::system::error_code& error, size_t bytesTransferred) +{ + if (error || !bytesTransferred || requestsResponded == responses.size()) + { + socket.close(); + + return; + } + + const std::vector& response = responses.at(requestsResponded); + + async_write(socket, boost::asio::buffer(response.data(), response.size()), + boost::bind(&TestServerSession::HandleResponseSent, this, + boost::asio::placeholders::error, + boost::asio::placeholders::bytes_transferred)); + + ++requestsResponded; +} + +void TestServerSession::HandleResponseSent(const boost::system::error_code& error, size_t bytesTransferred) +{ + if (error || !bytesTransferred) + { + socket.close(); + + return; + } + + ReadNextRequest(); +} + + +TestServer::TestServer(uint16_t port) : + acceptor(service, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port)) +{ + // No-op. +} + +TestServer::~TestServer() +{ + Stop(); +} + +void TestServer::Start() +{ + if (!serverThread) + { + StartAccept(); + serverThread.reset(new boost::thread(boost::bind(&boost::asio::io_service::run, &service))); + } +} + +void TestServer::Stop() +{ + if (serverThread) + { + service.stop(); + serverThread->join(); + serverThread.reset(); + } +} + +void TestServer::StartAccept() +{ + using namespace boost::asio; + + boost::shared_ptr newSession; + newSession.reset(new TestServerSession(service, responses)); + + acceptor.async_accept(newSession->GetSocket(), + boost::bind(&TestServer::HandleAccept, this, newSession, placeholders::error)); +} + +void TestServer::HandleAccept(boost::shared_ptr session, const boost::system::error_code& error) +{ + if (!error) + { + session->Start(); + + sessions.push_back(session); + } + + StartAccept(); +} + +} // namespace ignite diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h index d8cfeb71dd8487..255cd421bbcb7c 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h @@ -103,6 +103,13 @@ namespace ignite */ virtual void AddStatusRecord(SqlState::Type sqlState, const std::string& message); + /** + * Add new status record with SqlState::SHY000_GENERAL_ERROR state. + * + * @param message Message. + */ + virtual void AddStatusRecord(const std::string& message); + /** * Add new status record. * @@ -129,4 +136,4 @@ namespace ignite } } -#endif //_IGNITE_ODBC_DIAGNOSTIC_DIAGNOSABLE_ADAPTER \ No newline at end of file +#endif //_IGNITE_ODBC_DIAGNOSTIC_DIAGNOSABLE_ADAPTER diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h index 79e78c3e25c825..aa5a2c0d01e096 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h @@ -71,7 +71,9 @@ namespace ignite QUERY_MORE_RESULTS = 9, - STREAMING_BATCH = 10 + STREAMING_BATCH = 10, + + META_RESULTSET = 11 }; }; @@ -309,6 +311,39 @@ namespace ignite std::string column; }; + /** + * Query get result set metadata request. + */ + class QueryGetResultsetMetaRequest + { + public: + /** + * Constructor. + * + * @param schema Schema. + * @param sqlQuery SQL query itself. + */ + QueryGetResultsetMetaRequest(const std::string& schema, const std::string& sqlQuery); + + /** + * Destructor. + */ + ~QueryGetResultsetMetaRequest(); + + /** + * Write request using provided writer. + * @param writer Writer. + */ + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; + + private: + /** Schema. */ + std::string schema; + + /** SQL query. */ + std::string sqlQuery; + }; + /** * Query get tables metadata request. */ @@ -885,6 +920,42 @@ namespace ignite meta::ColumnMetaVector meta; }; + /** + * Query get resultset metadata response. + */ + class QueryGetResultsetMetaResponse : public Response + { + public: + /** + * Constructor. + */ + QueryGetResultsetMetaResponse(); + + /** + * Destructor. + */ + virtual ~QueryGetResultsetMetaResponse(); + + /** + * Get column metadata. + * @return Column metadata. + */ + const meta::ColumnMetaVector& GetMeta() const + { + return meta; + } + + private: + /** + * Read response using provided reader. + * @param reader Reader. + */ + virtual void ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&); + + /** Columns metadata. */ + meta::ColumnMetaVector meta; + }; + /** * Query get table metadata response. */ diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h index af319abf398dfb..05603d63a1a77a 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h @@ -46,7 +46,7 @@ namespace ignite * @param params SQL params. * @param timeout Timeout in seconds. */ - BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + BatchQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout); /** @@ -66,7 +66,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h index 354393e9cd6fe6..fe760a9b31ca71 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h @@ -45,7 +45,7 @@ namespace ignite * @param table Table search pattern. * @param column Column search pattern. */ - ColumnMetadataQuery(diagnostic::Diagnosable& diag, + ColumnMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& schema, const std::string& table, const std::string& column); @@ -66,7 +66,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. @@ -153,4 +153,4 @@ namespace ignite } } -#endif //_IGNITE_ODBC_QUERY_COLUMN_METADATA_QUERY \ No newline at end of file +#endif //_IGNITE_ODBC_QUERY_COLUMN_METADATA_QUERY diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h index 8c630c4d2912f3..ea3ef244ce4c70 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h @@ -46,7 +46,7 @@ namespace ignite * @param params SQL params. * @param timeout Timeout. */ - DataQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + DataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout); /** @@ -66,7 +66,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. @@ -133,6 +133,14 @@ namespace ignite */ bool IsClosedRemotely() const; + /** + * Make query prepare request and use response to set internal + * state. + * + * @return Result. + */ + SqlResult::Type MakeRequestPrepare(); + /** * Make query execute request and use response to set internal * state. @@ -162,6 +170,13 @@ namespace ignite */ SqlResult::Type MakeRequestMoreResults(); + /** + * Make result set metadata request. + * + * @return Result. + */ + SqlResult::Type MakeRequestResultsetMeta(); + /** * Process column conversion operation result. * @@ -171,7 +186,17 @@ namespace ignite * @return General SQL result. */ SqlResult::Type ProcessConversionResult(app::ConversionResult::Type convRes, int32_t rowIdx, - int32_t columnIdx); + int32_t columnIdx);; + + /** + * Process column conversion operation result. + * + * @param convRes Conversion result. + * @param rowIdx Row index. + * @param columnIdx Column index. + * @return General SQL result. + */ + void SetResultsetMeta(const meta::ColumnMetaVector& value); /** * Close query. @@ -189,7 +214,10 @@ namespace ignite /** Parameter bindings. */ const app::ParameterSet& params; - /** Columns metadata. */ + /** Result set metadata is available */ + bool resultMetaAvailable; + + /** Result set metadata. */ meta::ColumnMetaVector resultMeta; /** Cursor. */ diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h index 81e8093680e182..307decf89492e8 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h @@ -45,7 +45,7 @@ namespace ignite * @param foreignSchema Foreign key schema name. * @param foreignTable Foreign key table name. */ - ForeignKeysQuery(diagnostic::Diagnosable& diag, Connection& connection, + ForeignKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& primaryCatalog, const std::string& primarySchema, const std::string& primaryTable, const std::string& foreignCatalog, const std::string& foreignSchema, const std::string& foreignTable); @@ -67,7 +67,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h index 450420b850fc34..d6979997e85e85 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h @@ -47,7 +47,7 @@ namespace ignite * @param sql SQL query. * @param cmd Parsed command. */ - InternalQuery(diagnostic::Diagnosable& diag, const std::string& sql, std::auto_ptr cmd) : + InternalQuery(diagnostic::DiagnosableAdapter& diag, const std::string& sql, std::auto_ptr cmd) : Query(diag, QueryType::INTERNAL), sql(sql), cmd(cmd) @@ -70,7 +70,7 @@ namespace ignite */ virtual SqlResult::Type Execute() { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Internal error."); + diag.AddStatusRecord("Internal error."); return SqlResult::AI_ERROR; } @@ -118,11 +118,9 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const + virtual const meta::ColumnMetaVector* GetMeta() { - static const meta::ColumnMetaVector empty; - - return empty; + return 0; } /** diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h index 3650fcf36addac..51c233be5ab868 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h @@ -43,7 +43,7 @@ namespace ignite * @param schema Schema name. * @param table Table name. */ - PrimaryKeysQuery(diagnostic::Diagnosable& diag, + PrimaryKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog, const std::string& schema, const std::string& table); @@ -64,7 +64,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h index 22503a1410f08d..4e64a21b0c509c 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h @@ -22,7 +22,7 @@ #include -#include "ignite/odbc/diagnostic/diagnosable.h" +#include "ignite/odbc/diagnostic/diagnosable_adapter.h" #include "ignite/odbc/meta/column_meta.h" #include "ignite/odbc/common_types.h" #include "ignite/odbc/row.h" @@ -120,7 +120,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const = 0; + virtual const meta::ColumnMetaVector* GetMeta() = 0; /** * Check if data is available. @@ -157,7 +157,7 @@ namespace ignite /** * Constructor. */ - Query(diagnostic::Diagnosable& diag, QueryType::Type type) : + Query(diagnostic::DiagnosableAdapter& diag, QueryType::Type type) : diag(diag), type(type) { @@ -165,7 +165,7 @@ namespace ignite } /** Diagnostics collector. */ - diagnostic::Diagnosable& diag; + diagnostic::DiagnosableAdapter& diag; /** Query type. */ QueryType::Type type; diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h index 919febfd86c7a1..d6b244bcb92906 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h @@ -44,7 +44,7 @@ namespace ignite * @param nullable Determines whether to return special columns * that can have a NULL value. */ - SpecialColumnsQuery(diagnostic::Diagnosable& diag, int16_t type, + SpecialColumnsQuery(diagnostic::DiagnosableAdapter& diag, int16_t type, const std::string& catalog, const std::string& schema, const std::string& table, int16_t scope, int16_t nullable); @@ -89,7 +89,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Check if data is available. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h index cf87e80966cb9a..285d3fb9fc2b89 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h @@ -44,7 +44,7 @@ namespace ignite * @param params SQL params. */ StreamingQuery( - diagnostic::Diagnosable& diag, + diagnostic::DiagnosableAdapter& diag, Connection& connection, const app::ParameterSet& params); @@ -65,7 +65,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h index 776b747b65accb..fa9b720aeb7eb1 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h @@ -46,7 +46,7 @@ namespace ignite * @param table Table search pattern. * @param tableType Table type search pattern. */ - TableMetadataQuery(diagnostic::Diagnosable& diag, Connection& connection, + TableMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog, const std::string& schema, const std::string& table, const std::string& tableType); @@ -67,7 +67,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h index 3f2e76c7b4aa2b..b9638208ef750f 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h @@ -38,7 +38,7 @@ namespace ignite * @param diag Diagnostics collector. * @param sqlType SQL type. */ - TypeInfoQuery(diagnostic::Diagnosable& diag, int16_t sqlType); + TypeInfoQuery(diagnostic::DiagnosableAdapter& diag, int16_t sqlType); /** * Destructor. @@ -57,7 +57,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h index 56eea6c89fc94b..37c91b470e4cf9 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h @@ -255,7 +255,7 @@ namespace ignite * * @return Column metadata. */ - const meta::ColumnMetaVector* GetMeta() const; + const meta::ColumnMetaVector* GetMeta(); /** * Check if data is available. @@ -296,28 +296,28 @@ namespace ignite * * @param ptr Rows fetched buffer pointer. */ - void SetRowsFetchedPtr(size_t* ptr); + void SetRowsFetchedPtr(SQLINTEGER* ptr); /** * Get rows fetched buffer pointer. * * @return Rows fetched buffer pointer. */ - size_t* GetRowsFetchedPtr(); + SQLINTEGER* GetRowsFetchedPtr(); /** * Set row statuses array pointer. * * @param ptr Row statuses array pointer. */ - void SetRowStatusesPtr(uint16_t* ptr); + void SetRowStatusesPtr(SQLUSMALLINT* ptr); /** * Get row statuses array pointer. * * @return Row statuses array pointer. */ - uint16_t* GetRowStatusesPtr(); + SQLUSMALLINT* GetRowStatusesPtr(); /** * Select next parameter data for which is required. @@ -670,6 +670,13 @@ namespace ignite */ SqlResult::Type UpdateParamsMeta(); + /** + * Convert SQLRESULT to SQL_ROW_RESULT. + * + * @return Operation result. + */ + uint16_t SqlResultToRowResult(SqlResult::Type value); + /** * Constructor. * Called by friend classes. @@ -687,18 +694,18 @@ namespace ignite /** Underlying query. */ std::auto_ptr currentQuery; - /** Row bind type. */ - SqlUlen rowBindType; - /** Buffer to store number of rows fetched by the last fetch. */ - size_t* rowsFetched; + SQLINTEGER* rowsFetched; /** Array to store statuses of rows fetched by the last fetch. */ - uint16_t* rowStatuses; + SQLUSMALLINT* rowStatuses; /** Offset added to pointers to change binding of column data. */ int* columnBindOffset; + /** Row array size. */ + SqlUlen rowArraySize; + /** Parameters. */ app::ParameterSet parameters; diff --git a/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp b/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp index 2f1fbcb217fe64..35af8cfe7d052a 100644 --- a/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp +++ b/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp @@ -1707,28 +1707,36 @@ namespace ignite return buflen; case OdbcNativeType::AI_SIGNED_SHORT: + return static_cast(sizeof(SQLSMALLINT)); + case OdbcNativeType::AI_UNSIGNED_SHORT: - return static_cast(sizeof(short)); + return static_cast(sizeof(SQLUSMALLINT)); case OdbcNativeType::AI_SIGNED_LONG: + return static_cast(sizeof(SQLUINTEGER)); + case OdbcNativeType::AI_UNSIGNED_LONG: - return static_cast(sizeof(long)); + return static_cast(sizeof(SQLINTEGER)); case OdbcNativeType::AI_FLOAT: - return static_cast(sizeof(float)); + return static_cast(sizeof(SQLREAL)); case OdbcNativeType::AI_DOUBLE: - return static_cast(sizeof(double)); + return static_cast(sizeof(SQLDOUBLE)); - case OdbcNativeType::AI_BIT: case OdbcNativeType::AI_SIGNED_TINYINT: + return static_cast(sizeof(SQLSCHAR)); + + case OdbcNativeType::AI_BIT: case OdbcNativeType::AI_UNSIGNED_TINYINT: - return static_cast(sizeof(char)); + return static_cast(sizeof(SQLCHAR)); case OdbcNativeType::AI_SIGNED_BIGINT: - case OdbcNativeType::AI_UNSIGNED_BIGINT: return static_cast(sizeof(SQLBIGINT)); + case OdbcNativeType::AI_UNSIGNED_BIGINT: + return static_cast(sizeof(SQLUBIGINT)); + case OdbcNativeType::AI_TDATE: return static_cast(sizeof(SQL_DATE_STRUCT)); diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp index a8a67f0b0037c2..a5beb0ca224f8a 100644 --- a/modules/platforms/cpp/odbc/src/connection.cpp +++ b/modules/platforms/cpp/odbc/src/connection.cpp @@ -149,8 +149,7 @@ namespace ignite { LOG_MSG("Can not load OpenSSL library: " << err.GetText()); - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, - "Can not load OpenSSL library (did you set OPENSSL_HOME environment variable?)."); + AddStatusRecord("Can not load OpenSSL library (did you set OPENSSL_HOME environment variable?)"); return SqlResult::AI_ERROR; } @@ -176,7 +175,7 @@ namespace ignite if (!config.IsHostSet() && config.IsAddressesSet() && config.GetAddresses().empty()) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "No valid address to connect."); + AddStatusRecord("No valid address to connect."); return SqlResult::AI_ERROR; } @@ -211,7 +210,9 @@ namespace ignite { AddStatusRecord(SqlState::S08003_NOT_CONNECTED, "Connection is not open."); - return SqlResult::AI_ERROR; + // It is important to return SUCCESS_WITH_INFO and not ERROR here, as if we return an error, Windows + // Driver Manager may decide that connection is not valid anymore which results in memory leak. + return SqlResult::AI_SUCCESS_WITH_INFO; } Close(); @@ -430,7 +431,7 @@ namespace ignite } catch (const IgniteError& err) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -471,7 +472,7 @@ namespace ignite } catch (const IgniteError& err) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -681,7 +682,7 @@ namespace ignite if (!rsp.GetError().empty()) constructor << "Additional info: " << rsp.GetError() << " "; - constructor << "Current version of the protocol, used by the server node is " + constructor << "Current version of the protocol, used by the server node is " << rsp.GetCurrentVer().ToString() << ", " << "driver protocol version introduced in version " << protocolVersion.ToString() << "."; diff --git a/modules/platforms/cpp/odbc/src/cursor.cpp b/modules/platforms/cpp/odbc/src/cursor.cpp index b41f5b1f3c8957..cee18d80e3c50c 100644 --- a/modules/platforms/cpp/odbc/src/cursor.cpp +++ b/modules/platforms/cpp/odbc/src/cursor.cpp @@ -21,8 +21,11 @@ namespace ignite { namespace odbc { - Cursor::Cursor(int64_t queryId) : queryId(queryId), currentPage(), - currentPagePos(0), currentRow() + Cursor::Cursor(int64_t queryId) : + queryId(queryId), + currentPage(), + currentPagePos(0), + currentRow() { // No-op. } diff --git a/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp b/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp index 6de071615c9c62..eea7649086d076 100644 --- a/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp +++ b/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp @@ -48,6 +48,11 @@ namespace ignite AddStatusRecord(sqlState, message, 0, 0); } + void DiagnosableAdapter::AddStatusRecord(const std::string& message) + { + AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, message); + } + void DiagnosableAdapter::AddStatusRecord(const OdbcError& err) { AddStatusRecord(err.GetStatus(), err.GetErrorMessage(), 0, 0); diff --git a/modules/platforms/cpp/odbc/src/message.cpp b/modules/platforms/cpp/odbc/src/message.cpp index 00e0ae2d24cfe2..946529de3d6644 100644 --- a/modules/platforms/cpp/odbc/src/message.cpp +++ b/modules/platforms/cpp/odbc/src/message.cpp @@ -224,6 +224,26 @@ namespace ignite writer.WriteObject(column); } + QueryGetResultsetMetaRequest::QueryGetResultsetMetaRequest(const std::string &schema, const std::string &sqlQuery) : + schema(schema), + sqlQuery(sqlQuery) + { + // No-op. + } + + QueryGetResultsetMetaRequest::~QueryGetResultsetMetaRequest() + { + // No-op. + } + + void QueryGetResultsetMetaRequest::Write(impl::binary::BinaryWriterImpl &writer, const ProtocolVersion &) const + { + writer.WriteInt8(RequestType::META_RESULTSET); + + writer.WriteObject(schema); + writer.WriteObject(sqlQuery); + } + QueryGetTablesMetaRequest::QueryGetTablesMetaRequest(const std::string& catalog, const std::string& schema, const std::string& table, const std::string& tableTypes): catalog(catalog), @@ -478,6 +498,21 @@ namespace ignite meta::ReadColumnMetaVector(reader, meta, ver); } + QueryGetResultsetMetaResponse::QueryGetResultsetMetaResponse() + { + // No-op. + } + + QueryGetResultsetMetaResponse::~QueryGetResultsetMetaResponse() + { + // No-op. + } + + void QueryGetResultsetMetaResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl &reader, const ProtocolVersion& ver) + { + meta::ReadColumnMetaVector(reader, meta, ver); + } + QueryGetTablesMetaResponse::QueryGetTablesMetaResponse() { // No-op. diff --git a/modules/platforms/cpp/odbc/src/query/batch_query.cpp b/modules/platforms/cpp/odbc/src/query/batch_query.cpp index c687672c2d9e9c..8b26e0d59e39ba 100644 --- a/modules/platforms/cpp/odbc/src/query/batch_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/batch_query.cpp @@ -27,7 +27,7 @@ namespace ignite { namespace query { - BatchQuery::BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + BatchQuery::BatchQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout) : Query(diag, QueryType::BATCH), connection(connection), @@ -75,9 +75,9 @@ namespace ignite return res; } - const meta::ColumnMetaVector& BatchQuery::GetMeta() const + const meta::ColumnMetaVector* BatchQuery::GetMeta() { - return resultMeta; + return &resultMeta; } SqlResult::Type BatchQuery::FetchNextRow(app::ColumnBindingMap&) @@ -170,7 +170,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp b/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp index 649fa3f9e84a1c..fda92fe34a5904 100644 --- a/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp @@ -75,7 +75,7 @@ namespace ignite { namespace query { - ColumnMetadataQuery::ColumnMetadataQuery(diagnostic::Diagnosable& diag, + ColumnMetadataQuery::ColumnMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& schema, const std::string& table, const std::string& column) : Query(diag, QueryType::COLUMN_METADATA), @@ -135,9 +135,9 @@ namespace ignite return result; } - const meta::ColumnMetaVector& ColumnMetadataQuery::GetMeta() const + const meta::ColumnMetaVector* ColumnMetadataQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type ColumnMetadataQuery::FetchNextRow(app::ColumnBindingMap & columnBindings) @@ -311,7 +311,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/query/data_query.cpp b/modules/platforms/cpp/odbc/src/query/data_query.cpp index 4ba354d0cecae3..54723a1093ba90 100644 --- a/modules/platforms/cpp/odbc/src/query/data_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/data_query.cpp @@ -28,12 +28,13 @@ namespace ignite { namespace query { - DataQuery::DataQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + DataQuery::DataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout) : Query(diag, QueryType::DATA), connection(connection), sql(sql), params(params), + resultMetaAvailable(false), resultMeta(), cursor(), rowsAffected(), @@ -57,9 +58,17 @@ namespace ignite return MakeRequestExecute(); } - const meta::ColumnMetaVector & DataQuery::GetMeta() const + const meta::ColumnMetaVector* DataQuery::GetMeta() { - return resultMeta; + if (!resultMetaAvailable) + { + MakeRequestResultsetMeta(); + + if (!resultMetaAvailable) + return 0; + } + + return &resultMeta; } SqlResult::Type DataQuery::FetchNextRow(app::ColumnBindingMap& columnBindings) @@ -96,7 +105,7 @@ namespace ignite if (!row) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Unknown error."); + diag.AddStatusRecord("Unknown error."); return SqlResult::AI_ERROR; } @@ -164,8 +173,6 @@ namespace ignite { cursor.reset(); - resultMeta.clear(); - rowsAffectedIdx = 0; rowsAffected.clear(); @@ -182,7 +189,11 @@ namespace ignite int64_t DataQuery::AffectedRows() const { int64_t affected = rowsAffectedIdx < rowsAffected.size() ? rowsAffected[rowsAffectedIdx] : 0; - return affected < 0 ? 0 : affected; + + if (affected >= 0) + return affected; + + return connection.GetConfiguration().GetPageSize(); } SqlResult::Type DataQuery::NextResultSet() @@ -242,7 +253,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -256,21 +267,12 @@ namespace ignite return SqlResult::AI_ERROR; } - resultMeta.assign(rsp.GetMeta().begin(), rsp.GetMeta().end()); - rowsAffected = rsp.GetAffectedRows(); + SetResultsetMeta(rsp.GetMeta()); LOG_MSG("Query id: " << rsp.GetQueryId()); LOG_MSG("Affected Rows list size: " << rowsAffected.size()); - for (size_t i = 0; i < resultMeta.size(); ++i) - { - LOG_MSG("\n[" << i << "] SchemaName: " << resultMeta[i].GetSchemaName() - << "\n[" << i << "] TypeName: " << resultMeta[i].GetTableName() - << "\n[" << i << "] ColumnName: " << resultMeta[i].GetColumnName() - << "\n[" << i << "] ColumnType: " << static_cast(resultMeta[i].GetDataType())); - } - cursor.reset(new Cursor(rsp.GetQueryId())); rowsAffectedIdx = 0; @@ -295,7 +297,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -333,7 +335,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -374,7 +376,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -397,6 +399,53 @@ namespace ignite return SqlResult::AI_SUCCESS; } + SqlResult::Type DataQuery::MakeRequestResultsetMeta() + { + const std::string& schema = connection.GetSchema(); + + QueryGetResultsetMetaRequest req(schema, sql); + QueryGetResultsetMetaResponse rsp; + + try + { + // Setting connection timeout to 1 second more than query timeout itself. + int32_t connectionTimeout = timeout ? timeout + 1 : 0; + bool success = connection.SyncMessage(req, rsp, connectionTimeout); + + if (!success) + { + diag.AddStatusRecord(SqlState::SHYT00_TIMEOUT_EXPIRED, "Query timeout expired"); + + return SqlResult::AI_ERROR; + } + } + catch (const OdbcError& err) + { + diag.AddStatusRecord(err); + + return SqlResult::AI_ERROR; + } + catch (const IgniteError& err) + { + diag.AddStatusRecord(err.GetText()); + + return SqlResult::AI_ERROR; + } + + if (rsp.GetStatus() != ResponseStatus::SUCCESS) + { + LOG_MSG("Error: " << rsp.GetError()); + + diag.AddStatusRecord(ResponseStatusToSqlState(rsp.GetStatus()), rsp.GetError()); + + return SqlResult::AI_ERROR; + } + + SetResultsetMeta(rsp.GetMeta()); + + return SqlResult::AI_SUCCESS; + } + SqlResult::Type DataQuery::ProcessConversionResult(app::ConversionResult::Type convRes, int32_t rowIdx, int32_t columnIdx) { @@ -456,6 +505,21 @@ namespace ignite return SqlResult::AI_ERROR; } + + void DataQuery::SetResultsetMeta(const meta::ColumnMetaVector& value) + { + resultMeta.assign(value.begin(), value.end()); + resultMetaAvailable = true; + + for (size_t i = 0; i < resultMeta.size(); ++i) + { + meta::ColumnMeta& meta = resultMeta.at(i); + LOG_MSG("\n[" << i << "] SchemaName: " << meta.GetSchemaName() + << "\n[" << i << "] TypeName: " << meta.GetTableName() + << "\n[" << i << "] ColumnName: " << meta.GetColumnName() + << "\n[" << i << "] ColumnType: " << static_cast(meta.GetDataType())); + } + } } } } diff --git a/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp b/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp index 47ed89b2bec21a..2520b2006cde05 100644 --- a/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp @@ -28,7 +28,7 @@ namespace ignite { namespace query { - ForeignKeysQuery::ForeignKeysQuery(diagnostic::Diagnosable& diag, Connection& connection, + ForeignKeysQuery::ForeignKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& primaryCatalog, const std::string& primarySchema, const std::string& primaryTable, const std::string& foreignCatalog, const std::string& foreignSchema, const std::string& foreignTable) : @@ -81,9 +81,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector & ForeignKeysQuery::GetMeta() const + const meta::ColumnMetaVector* ForeignKeysQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type ForeignKeysQuery::FetchNextRow(app::ColumnBindingMap&) diff --git a/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp b/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp index bb6f90872969c1..d179f3831e89ac 100644 --- a/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp @@ -55,7 +55,7 @@ namespace ignite { namespace query { - PrimaryKeysQuery::PrimaryKeysQuery(diagnostic::Diagnosable& diag, + PrimaryKeysQuery::PrimaryKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog, const std::string& schema, const std::string& table) : Query(diag, QueryType::PRIMARY_KEYS), @@ -103,9 +103,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector & PrimaryKeysQuery::GetMeta() const + const meta::ColumnMetaVector* PrimaryKeysQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type PrimaryKeysQuery::FetchNextRow(app::ColumnBindingMap & columnBindings) diff --git a/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp b/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp index 3f176f275511d5..88eca7169a60d3 100644 --- a/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp @@ -26,7 +26,7 @@ namespace ignite { namespace query { - SpecialColumnsQuery::SpecialColumnsQuery(diagnostic::Diagnosable& diag, + SpecialColumnsQuery::SpecialColumnsQuery(diagnostic::DiagnosableAdapter& diag, int16_t type, const std::string& catalog, const std::string& schema, const std::string& table, int16_t scope, int16_t nullable) : Query(diag, QueryType::SPECIAL_COLUMNS), @@ -71,9 +71,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector& SpecialColumnsQuery::GetMeta() const + const meta::ColumnMetaVector* SpecialColumnsQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type SpecialColumnsQuery::FetchNextRow(app::ColumnBindingMap&) diff --git a/modules/platforms/cpp/odbc/src/query/streaming_query.cpp b/modules/platforms/cpp/odbc/src/query/streaming_query.cpp index dd9302f772e9a3..4bc19b166421d1 100644 --- a/modules/platforms/cpp/odbc/src/query/streaming_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/streaming_query.cpp @@ -29,7 +29,7 @@ namespace ignite namespace query { StreamingQuery::StreamingQuery( - diagnostic::Diagnosable& diag, + diagnostic::DiagnosableAdapter& diag, Connection& connection, const app::ParameterSet& params) : Query(diag, QueryType::STREAMING), @@ -49,11 +49,9 @@ namespace ignite return connection.GetStreamingContext().Execute(sql, params); } - const meta::ColumnMetaVector& StreamingQuery::GetMeta() const + const meta::ColumnMetaVector* StreamingQuery::GetMeta() { - static meta::ColumnMetaVector empty; - - return empty; + return 0; } SqlResult::Type StreamingQuery::FetchNextRow(app::ColumnBindingMap&) diff --git a/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp b/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp index 53fe49d2ccf4be..d76576d418efb9 100644 --- a/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp @@ -54,7 +54,7 @@ namespace ignite { namespace query { - TableMetadataQuery::TableMetadataQuery(diagnostic::Diagnosable& diag, + TableMetadataQuery::TableMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog,const std::string& schema, const std::string& table, const std::string& tableType) : Query(diag, QueryType::TABLE_METADATA), @@ -108,9 +108,9 @@ namespace ignite return result; } - const meta::ColumnMetaVector& TableMetadataQuery::GetMeta() const + const meta::ColumnMetaVector* TableMetadataQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type TableMetadataQuery::FetchNextRow(app::ColumnBindingMap& columnBindings) @@ -237,7 +237,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/query/type_info_query.cpp b/modules/platforms/cpp/odbc/src/query/type_info_query.cpp index c47161bebc6aed..3cc5787a3e5485 100644 --- a/modules/platforms/cpp/odbc/src/query/type_info_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/type_info_query.cpp @@ -118,7 +118,7 @@ namespace ignite { namespace query { - TypeInfoQuery::TypeInfoQuery(diagnostic::Diagnosable& diag, int16_t sqlType) : + TypeInfoQuery::TypeInfoQuery(diagnostic::DiagnosableAdapter& diag, int16_t sqlType) : Query(diag, QueryType::TYPE_INFO), columnsMeta(), executed(false), @@ -191,9 +191,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector & TypeInfoQuery::GetMeta() const + const meta::ColumnMetaVector* TypeInfoQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type TypeInfoQuery::FetchNextRow(app::ColumnBindingMap & columnBindings) diff --git a/modules/platforms/cpp/odbc/src/statement.cpp b/modules/platforms/cpp/odbc/src/statement.cpp index 6e1a9c5feb3bda..9253030f839a85 100644 --- a/modules/platforms/cpp/odbc/src/statement.cpp +++ b/modules/platforms/cpp/odbc/src/statement.cpp @@ -46,10 +46,10 @@ namespace ignite connection(parent), columnBindings(), currentQuery(), - rowBindType(SQL_BIND_BY_COLUMN), rowsFetched(0), rowStatuses(0), columnBindOffset(0), + rowArraySize(1), parameters(), timeout(0) { @@ -137,11 +137,7 @@ namespace ignite const meta::ColumnMetaVector* meta = GetMeta(); if (!meta) - { - AddStatusRecord(SqlState::SHY010_SEQUENCE_ERROR, "Query is not executed."); - return SqlResult::AI_ERROR; - } res = static_cast(meta->size()); @@ -209,7 +205,7 @@ namespace ignite if (!buffer && !resLen) { AddStatusRecord(SqlState::SHY009_INVALID_USE_OF_NULL_POINTER, - "ParameterValuePtr and StrLen_or_IndPtr are both null pointers."); + "ParameterValuePtr and StrLen_or_IndPtr are both null pointers"); return SqlResult::AI_ERROR; } @@ -238,34 +234,59 @@ namespace ignite LOG_MSG("SQL_ATTR_ROW_ARRAY_SIZE: " << val); - if (val != 1) + if (val < 1) { - AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, - "Fetching of more than one row by call is not supported."); + AddStatusRecord(SqlState::SHY092_OPTION_TYPE_OUT_OF_RANGE, + "Array size value can not be less than 1"); return SqlResult::AI_ERROR; } + rowArraySize = val; + break; } case SQL_ATTR_ROW_BIND_TYPE: { - rowBindType = reinterpret_cast(value); + SqlUlen rowBindType = reinterpret_cast(value); + + if (rowBindType != SQL_BIND_BY_COLUMN) + { + AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + "Only binding by column is currently supported"); + + return SqlResult::AI_ERROR; + } break; } case SQL_ATTR_ROWS_FETCHED_PTR: { - SetRowsFetchedPtr(reinterpret_cast(value)); + SetRowsFetchedPtr(reinterpret_cast(value)); break; } case SQL_ATTR_ROW_STATUS_PTR: { - SetRowStatusesPtr(reinterpret_cast(value)); + SetRowStatusesPtr(reinterpret_cast(value)); + + break; + } + + case SQL_ATTR_PARAM_BIND_TYPE: + { + SqlUlen paramBindType = reinterpret_cast(value); + + if (paramBindType != SQL_PARAM_BIND_BY_COLUMN) + { + AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + "Only binding by column is currently supported"); + + return SqlResult::AI_ERROR; + } break; } @@ -358,7 +379,7 @@ namespace ignite { if (!buf) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Data buffer is NULL."); + AddStatusRecord("Data buffer is NULL."); return SqlResult::AI_ERROR; } @@ -384,7 +405,7 @@ namespace ignite { SqlUlen* val = reinterpret_cast(buf); - *val = rowBindType; + *val = SQL_BIND_BY_COLUMN; break; } @@ -425,6 +446,15 @@ namespace ignite break; } + case SQL_ATTR_PARAM_BIND_TYPE: + { + SqlUlen* val = reinterpret_cast(buf); + + *val = SQL_PARAM_BIND_BY_COLUMN; + + break; + } + case SQL_ATTR_PARAM_BIND_OFFSET_PTR: { SQLULEN** val = reinterpret_cast(buf); @@ -889,7 +919,7 @@ namespace ignite { case SQL_DROP: { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Deprecated, call SQLFreeHandle instead"); + AddStatusRecord("Deprecated, call SQLFreeHandle instead"); return SqlResult::AI_ERROR; } @@ -960,7 +990,9 @@ namespace ignite if (orientation != SQL_FETCH_NEXT) { - AddStatusRecord(SqlState::SHY106_FETCH_TYPE_OUT_OF_RANGE, "The value specified for the argument FetchOrientation was not SQL_FETCH_NEXT."); + AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + "Only SQL_FETCH_NEXT FetchOrientation type is supported"); + return SqlResult::AI_ERROR; } @@ -979,8 +1011,7 @@ namespace ignite if (!currentQuery.get()) { - AddStatusRecord(SqlState::S24000_INVALID_CURSOR_STATE, - "Cursor is not in the open state."); + AddStatusRecord(SqlState::S24000_INVALID_CURSOR_STATE, "Cursor is not in the open state"); return SqlResult::AI_ERROR; } @@ -991,26 +1022,44 @@ namespace ignite it->second.SetByteOffset(*columnBindOffset); } - SqlResult::Type res = currentQuery->FetchNextRow(columnBindings); + SQLINTEGER fetched = 0; + SQLINTEGER errors = 0; - if (res == SqlResult::AI_SUCCESS) + for (SqlUlen i = 0; i < rowArraySize; ++i) { - if (rowsFetched) - *rowsFetched = 1; + for (app::ColumnBindingMap::iterator it = columnBindings.begin(); it != columnBindings.end(); ++it) + it->second.SetElementOffset(i); + + SqlResult::Type res = currentQuery->FetchNextRow(columnBindings); + + if (res == SqlResult::AI_SUCCESS || res == SqlResult::AI_SUCCESS_WITH_INFO) + ++fetched; + else if (res != SqlResult::AI_NO_DATA) + ++errors; if (rowStatuses) - rowStatuses[0] = SQL_ROW_SUCCESS; + rowStatuses[i] = SqlResultToRowResult(res); } - return res; + if (rowsFetched) + *rowsFetched = fetched < 0 ? static_cast(rowArraySize) : fetched; + + if (fetched > 0) + return errors == 0 ? SqlResult::AI_SUCCESS : SqlResult::AI_SUCCESS_WITH_INFO; + + return errors == 0 ? SqlResult::AI_NO_DATA : SqlResult::AI_ERROR; } - const meta::ColumnMetaVector* Statement::GetMeta() const + const meta::ColumnMetaVector* Statement::GetMeta() { if (!currentQuery.get()) + { + AddStatusRecord(SqlState::SHY010_SEQUENCE_ERROR, "Query is not executed."); + return 0; + } - return ¤tQuery->GetMeta(); + return currentQuery->GetMeta(); } bool Statement::DataAvailable() const @@ -1042,20 +1091,15 @@ namespace ignite strbuf, buflen, reslen, numbuf)); } - SqlResult::Type Statement::InternalGetColumnAttribute(uint16_t colIdx, - uint16_t attrId, char* strbuf, int16_t buflen, int16_t* reslen, - SqlLen* numbuf) + SqlResult::Type Statement::InternalGetColumnAttribute(uint16_t colIdx, uint16_t attrId, char* strbuf, + int16_t buflen, int16_t* reslen, SqlLen* numbuf) { const meta::ColumnMetaVector *meta = GetMeta(); if (!meta) - { - AddStatusRecord(SqlState::SHY010_SEQUENCE_ERROR, "Query is not executed."); - return SqlResult::AI_ERROR; - } - if (colIdx > meta->size() + 1 || colIdx < 1) + if (colIdx > meta->size() || colIdx < 1) { AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Column index is out of range.", 0, colIdx); @@ -1119,22 +1163,22 @@ namespace ignite return SqlResult::AI_SUCCESS; } - void Statement::SetRowsFetchedPtr(size_t* ptr) + void Statement::SetRowsFetchedPtr(SQLINTEGER* ptr) { rowsFetched = ptr; } - size_t* Statement::GetRowsFetchedPtr() + SQLINTEGER* Statement::GetRowsFetchedPtr() { return rowsFetched; } - void Statement::SetRowStatusesPtr(uint16_t* ptr) + void Statement::SetRowStatusesPtr(SQLUSMALLINT* ptr) { rowStatuses = ptr; } - uint16_t * Statement::GetRowStatusesPtr() + SQLUSMALLINT * Statement::GetRowStatusesPtr() { return rowStatuses; } @@ -1309,7 +1353,7 @@ namespace ignite } catch (const IgniteError& err) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -1332,6 +1376,24 @@ namespace ignite return SqlResult::AI_SUCCESS; } + + uint16_t Statement::SqlResultToRowResult(SqlResult::Type value) + { + switch (value) + { + case SqlResult::AI_NO_DATA: + return SQL_ROW_NOROW; + + case SqlResult::AI_SUCCESS: + return SQL_ROW_SUCCESS; + + case SqlResult::AI_SUCCESS_WITH_INFO: + return SQL_ROW_SUCCESS_WITH_INFO; + + default: + return SQL_ROW_ERROR; + } + } } } diff --git a/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp b/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp index b9ee94ad2cd0a5..54d5f881247995 100644 --- a/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp +++ b/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp @@ -123,7 +123,7 @@ namespace ignite } catch (const IgniteError& err) { - connection->AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + connection->AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/type_traits.cpp b/modules/platforms/cpp/odbc/src/type_traits.cpp index 25eff51d309c17..1310c67bca9b1e 100644 --- a/modules/platforms/cpp/odbc/src/type_traits.cpp +++ b/modules/platforms/cpp/odbc/src/type_traits.cpp @@ -336,12 +336,15 @@ namespace ignite case SQL_C_BINARY: return OdbcNativeType::AI_BINARY; + case SQL_C_DATE: case SQL_C_TYPE_DATE: return OdbcNativeType::AI_TDATE; + case SQL_C_TIME: case SQL_C_TYPE_TIME: return OdbcNativeType::AI_TTIME; + case SQL_C_TIMESTAMP: case SQL_C_TYPE_TIMESTAMP: return OdbcNativeType::AI_TTIMESTAMP; diff --git a/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h b/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h index ce0802cf56286c..64ad200d3de884 100644 --- a/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h +++ b/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h @@ -65,7 +65,10 @@ namespace ignite /** * Destructor. */ - ~TransactionProxy() {}; + ~TransactionProxy() + { + // No-op. + }; /** * Commit the transaction. diff --git a/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h b/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h index dedf0bc60f6fe8..b6f9aa13a00d60 100644 --- a/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h +++ b/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h @@ -117,12 +117,7 @@ namespace ignite /** * Sets close flag to tx. */ - void Closed(); - - /** - * @return Current transaction. - */ - static SP_TransactionImpl GetCurrent(); + void SetClosed(); /** * Starts transaction. @@ -143,10 +138,10 @@ namespace ignite ignite::common::concurrent::SharedPointer > label); protected: /** Checks current thread state. */ - static void txThreadCheck(const TransactionImpl& tx); + void ThreadCheck(); /** Completes tc and clear state from storage. */ - static void txThreadEnd(TransactionImpl& tx); + void ThreadEnd(); private: /** Transactions implementation. */ @@ -155,9 +150,6 @@ namespace ignite /** Current transaction Id. */ int32_t txId; - /** Thread local instance of the transaction. */ - static ignite::common::concurrent::ThreadLocalInstance threadTx; - /** Concurrency. */ int concurrency; diff --git a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp index 1cd52e33af5726..d78593276fe99d 100644 --- a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp +++ b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp @@ -32,11 +32,10 @@ namespace ignite { namespace transactions { - ThreadLocalInstance TransactionImpl::threadTx; - TransactionsImpl::TransactionsImpl(const SP_DataRouter& router) : router(router) { + // No-op. } template @@ -68,7 +67,7 @@ namespace ignite int32_t txSize, SharedPointer > label) { - SP_TransactionImpl tx = threadTx.Get(); + SP_TransactionImpl tx = txs.GetCurrent(); TransactionImpl* ptr = tx.Get(); @@ -87,42 +86,40 @@ namespace ignite tx = SP_TransactionImpl(new TransactionImpl(txs, curTxId, concurrency, isolation, timeout, txSize)); - threadTx.Set(tx); + txs.SetCurrent(tx); return tx; } - SP_TransactionImpl TransactionImpl::GetCurrent() + bool TransactionImpl::IsClosed() const + { + return closed; + } + + SP_TransactionImpl TransactionsImpl::GetCurrent() { SP_TransactionImpl tx = threadTx.Get(); TransactionImpl* ptr = tx.Get(); - if (ptr) + if (ptr && ptr->IsClosed()) { - if (ptr->IsClosed()) - { - tx = SP_TransactionImpl(); + threadTx.Remove(); - threadTx.Remove(); - } - } - else - { tx = SP_TransactionImpl(); } return tx; } - bool TransactionImpl::IsClosed() const + void TransactionsImpl::SetCurrent(const SP_TransactionImpl& impl) { - return closed; + threadTx.Set(impl); } - SP_TransactionImpl TransactionsImpl::GetCurrent() + void TransactionsImpl::ResetCurrent() { - return TransactionImpl::GetCurrent(); + threadTx.Remove(); } int32_t TransactionsImpl::TxCommit(int32_t txId) @@ -154,25 +151,25 @@ namespace ignite void TransactionImpl::Commit() { - txThreadCheck(*this); + ThreadCheck(); txs.TxCommit(txId); - txThreadEnd(*this); + ThreadEnd(); } void TransactionImpl::Rollback() { - txThreadCheck(*this); + ThreadCheck(); txs.TxRollback(txId); - txThreadEnd(*this); + ThreadEnd(); } void TransactionImpl::Close() { - txThreadCheck(*this); + ThreadCheck(); if (IsClosed()) { @@ -181,31 +178,31 @@ namespace ignite txs.TxClose(txId); - txThreadEnd(*this); + ThreadEnd(); } - void TransactionImpl::Closed() + void TransactionImpl::SetClosed() { closed = true; } - void TransactionImpl::txThreadEnd(TransactionImpl& tx) + void TransactionImpl::ThreadEnd() { - tx.Closed(); + this->SetClosed(); - threadTx.Set(0); + txs.ResetCurrent(); } - void TransactionImpl::txThreadCheck(const TransactionImpl& inTx) + void TransactionImpl::ThreadCheck() { - SP_TransactionImpl tx = threadTx.Get(); + SP_TransactionImpl tx = txs.GetCurrent(); TransactionImpl* ptr = tx.Get(); if (!ptr) throw IgniteError(IgniteError::IGNITE_ERR_TX_THIS_THREAD, TX_ALREADY_CLOSED); - if (ptr->TxId() != inTx.TxId()) + if (ptr->TxId() != this->TxId()) throw IgniteError(IgniteError::IGNITE_ERR_TX_THIS_THREAD, TX_DIFFERENT_THREAD); } } diff --git a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h index e8d3df9559bfbe..278545d3f4ed96 100644 --- a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h +++ b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h @@ -101,11 +101,23 @@ namespace ignite * Get active transaction for the current thread. * * @return Active transaction implementation for current thread - * or null pointer if there is no active transaction for - * the thread. + * or null pointer if there is no active transaction for the thread. */ SP_TransactionImpl GetCurrent(); + /** + * Set active transaction for the current thread. + * + * @param impl Active transaction implementation for current thread + * or null pointer if there is no active transaction for the thread. + */ + void SetCurrent(const SP_TransactionImpl& impl); + + /** + * Reset active transaction for the current thread. + */ + void ResetCurrent(); + /** * Synchronously send message and receive response. * @@ -115,10 +127,14 @@ namespace ignite */ template void SendTxMessage(const ReqT& req, RspT& rsp); + private: /** Data router. */ SP_DataRouter router; + /** Thread local instance of the transaction. */ + ignite::common::concurrent::ThreadLocalInstance threadTx; + IGNITE_NO_COPY_ASSIGNMENT(TransactionsImpl); }; } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj index ef97e9ab2dea8e..884009a4ccf720 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj @@ -89,6 +89,7 @@ + PreserveNewest diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj index 0f67ed407b7548..b28268d01a47c3 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj @@ -112,6 +112,7 @@ + @@ -439,6 +440,9 @@ + + PreserveNewest + PreserveNewest diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs index 73a1c1f8da8be7..37bc53be75728a 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs @@ -38,15 +38,9 @@ public sealed class AffinityTest [TestFixtureSetUp] public void StartGrids() { - for (int i = 0; i < 3; i++) + for (var i = 0; i < 3; i++) { - var cfg = new IgniteConfiguration(TestUtils.GetTestConfiguration()) - { - SpringConfigUrl = Path.Combine("Config", "native-client-test-cache-affinity.xml"), - IgniteInstanceName = "grid-" + i - }; - - Ignition.Start(cfg); + Ignition.Start(GetConfig(i, client: i == 2)); } } @@ -75,6 +69,21 @@ public void TestAffinity() Assert.AreEqual(node.Id, aff.MapKeyToNode(new AffinityTestKey(i, 1)).Id); } + /// + /// Tests that affinity can be retrieved from client node right after the cache has been started on server node. + /// + [Test] + public void TestAffinityRetrievalForNewCache() + { + var server = Ignition.GetIgnite("grid-0"); + var client = Ignition.GetIgnite("grid-2"); + + var serverCache = server.CreateCache(TestUtils.TestName); + var clientAff = client.GetAffinity(serverCache.Name); + + Assert.IsNotNull(clientAff); + } + /// /// Test affinity with binary flag. /// @@ -101,18 +110,20 @@ public void TestAffinityBinary() /// /// Tests that works when used on a property of a type that is /// specified as or and - /// configured in a Spring XML file. + /// configured in a Spring XML file. /// [Test] public void TestAffinityKeyMappedWithQueryEntitySpringXml() { - TestAffinityKeyMappedWithQueryEntity0(Ignition.GetIgnite("grid-0"), "cache1"); - TestAffinityKeyMappedWithQueryEntity0(Ignition.GetIgnite("grid-1"), "cache1"); + foreach (var ignite in Ignition.GetAll()) + { + TestAffinityKeyMappedWithQueryEntity0(ignite, "cache1"); + } } /// /// Tests that works when used on a property of a type that is - /// specified as or . + /// specified as or . /// [Test] public void TestAffinityKeyMappedWithQueryEntity() @@ -194,7 +205,20 @@ public override int GetHashCode() return _id; } } - + + /// + /// Gets Ignite config. + /// + private static IgniteConfiguration GetConfig(int idx, bool client = false) + { + return new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + SpringConfigUrl = Path.Combine("Config", "native-client-test-cache-affinity.xml"), + IgniteInstanceName = "grid-" + idx, + ClientMode = client + }; + } + /// /// Query entity key. /// @@ -204,12 +228,12 @@ private class QueryEntityKey /** */ [QuerySqlField] public string Data { get; set; } - + /** */ [AffinityKeyMapped] public long AffinityKey { get; set; } } - + /// /// Query entity key. /// @@ -219,7 +243,7 @@ private class QueryEntityValue /** */ [QuerySqlField] public string Name { get; set; } - + /** */ [AffinityKeyMapped] public long AffKey { get; set; } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheNodeFilterTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheNodeFilterTest.cs new file mode 100644 index 00000000000000..2c94775f6f447f --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheNodeFilterTest.cs @@ -0,0 +1,274 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Cache +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Linq; + using Apache.Ignite.Core.Cache.Configuration; + using Apache.Ignite.Core.Cluster; + using NUnit.Framework; + + /// + /// Cache node filter tests. + /// + [TestFixture] + public class CacheNodeFilterTest + { + /** */ + private const string AttrKey2 = "attr2"; + + /** */ + private const int AttrVal2 = 3; + + /** */ + private const string AttrKey3 = "my-key"; + + /** */ + private const string AttrVal3 = "my-val"; + + /** Grid instances. */ + private IIgnite _grid1, _grid2, _grid3; + + /// + /// Fixture setup. + /// + [TestFixtureSetUp] + public void TestFixtureSetUp() + { + var springConfig = new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + SpringConfigUrl = Path.Combine("Config", "cache-attribute-node-filter.xml"), + IgniteInstanceName = "springGrid" + }; + _grid1 = Ignition.Start(springConfig); + + _grid2 = Ignition.Start(GetTestConfiguration("Ignite2", + new Dictionary + { + {AttrKey2, AttrVal2} + })); + + _grid3 = Ignition.Start(GetTestConfiguration("Ignite3", + new Dictionary + { + {AttrKey2, AttrVal2}, + {AttrKey3, AttrVal3} + })); + } + + /// + /// Fixture tear down. + /// + [TestFixtureTearDown] + public void TestFixtureTearDown() + { + Ignition.StopAll(true); + } + + /// + /// Gets a test configuration. + /// + /// Grid name. + /// User attributes. + /// + private IgniteConfiguration GetTestConfiguration(string gridName, Dictionary userAttributes) + { + IgniteConfiguration cfg = TestUtils.GetTestConfiguration(name: gridName); + cfg.UserAttributes = userAttributes; + return cfg; + } + + /// + /// Tests attribute node filter with a custom user attribute name + /// and null value always matches. + /// + [Test] + public void TestUserAttributeWithNullValueMatchesAllNodes() + { + const int replicatedPartitionsCount = 512; + + var cacheCfg = new CacheConfiguration + { + Name = Guid.NewGuid().ToString(), + NodeFilter = new AttributeNodeFilter("my.custom.attr", null), + CacheMode = CacheMode.Replicated, + }; + var cache = _grid1.CreateCache(cacheCfg); + + var affinity = _grid1.GetAffinity(cache.Name); + + Assert.AreEqual(3, _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes().Count); + + var parts1 = affinity.GetAllPartitions(_grid1.GetCluster().GetLocalNode()); + var parts2 = affinity.GetAllPartitions(_grid2.GetCluster().GetLocalNode()); + var parts3 = affinity.GetAllPartitions(_grid3.GetCluster().GetLocalNode()); + + Assert.AreEqual(replicatedPartitionsCount, parts1.Length); + Assert.AreEqual(parts1, parts2); + Assert.AreEqual(parts2, parts3); + } + + /// + /// Tests attribute node filter matches the specified attribute. + /// + [Test] + public void TestAttributeNodeFilterMatchesCustomNode() + { + const int itemsCount = 10; + + var cacheCfg = new CacheConfiguration + { + Name = Guid.NewGuid().ToString(), + NodeFilter = new AttributeNodeFilter(AttrKey2, AttrVal2), + CacheMode = CacheMode.Replicated, + }; + var cache = _grid1.CreateCache(cacheCfg); + + for (int i = 0; i < itemsCount; i++) + { + cache.Put(i, i); + } + + Assert.AreEqual(2, _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes().Count); + + Assert.AreEqual(0, cache.GetLocalEntries().Count()); + + var cache2 = _grid2.GetCache(cache.Name); + var cache3 = _grid2.GetCache(cache.Name); + + Assert.AreEqual(itemsCount, cache2.GetLocalEntries().Count()); + Assert.AreEqual(itemsCount, cache3.GetLocalEntries().Count()); + } + + /// + /// Tests node filter with multiple attributes matches single node. + /// + [Test] + public void TestNodeFilterWithMultipleUserAttributes() + { + var cacheCfg = new CacheConfiguration + { + Name = Guid.NewGuid().ToString(), + NodeFilter = new AttributeNodeFilter + { + Attributes = new Dictionary + { + {AttrKey2, AttrVal2}, + {AttrKey3, AttrVal3} + } + }, + CacheMode = CacheMode.Replicated, + }; + var cache = _grid1.CreateCache(cacheCfg); + + ICollection dataNodes = _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes(); + Assert.AreEqual(1, dataNodes.Count); + Assert.AreEqual(_grid3.GetCluster().GetLocalNode(), dataNodes.Single()); + } + + /// + /// Tests Java and .NET nodes can utilize the same + /// attribute node filter configuration. + /// + [Test] + public void TestSpringAttributeNodeFilter() + { + var cache = _grid1.GetCache("cache"); + Assert.AreEqual(2, _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes().Count); + + var nodeFilter = cache.GetConfiguration().NodeFilter as AttributeNodeFilter; + Assert.IsNotNull(nodeFilter); + + Assert.AreEqual(1, nodeFilter.Attributes.Count); + + var expected = new KeyValuePair(AttrKey3, AttrVal3); + Assert.AreEqual(expected, nodeFilter.Attributes.Single()); + } + + /// + /// Tests that java node filter is not being read on .NET side. + /// + [Test] + public void TestJavaNodeFilterIsNotAccessedByNetConfig() + { + var cache = _grid1.GetCache("cacheWithJavaFilter"); + + Assert.IsNull(cache.GetConfiguration().NodeFilter); + } + + /// + /// Tests that custom node filter is not supported. + /// + [Test] + public void TestCustomFilterIsNotSupported() + { + var cacheCfg = new CacheConfiguration + { + Name = Guid.NewGuid().ToString(), + CacheMode = CacheMode.Replicated, + NodeFilter = new CustomFilter() + }; + + TestDelegate action = () => { _grid1.CreateCache(cacheCfg); }; + + var ex = Assert.Throws(action); + Assert.AreEqual("Unsupported CacheConfiguration.NodeFilter: " + + "'CustomFilter'. " + + "Only predefined implementations are supported: " + + "'AttributeNodeFilter'", ex.Message); + } + + /// + /// Tests that attribute node filter with Null + /// Attributes value is not supported. + /// + [Test] + public void TestAttributeFilterWithNullValues() + { + TestDelegate action = () => + { + var _ = new CacheConfiguration + { + NodeFilter = new AttributeNodeFilter + { + Attributes = null + }, + }; + }; + + var ex = Assert.Throws(action); + StringAssert.Contains("value", ex.Message); + } + + /// + /// Custom node filter. + /// + public class CustomFilter : IClusterNodeFilter + { + /// + /// + /// + public bool Invoke(IClusterNode node) + { + return true; + } + } + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs index d0b7332cec6aed..4c26d0ab6edc0b 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs @@ -73,7 +73,8 @@ public void TestMemoryMetrics() RegionWithMetrics, RegionWithMetricsAndPersistence, "sysMemPlc", - "TxLog" + "TxLog", + "volatileDsMemPlc" }, names, string.Join(", ", names)); @@ -96,11 +97,15 @@ public void TestMemoryMetrics() memMetrics.PhysicalMemoryPages * (memMetrics.PageSize + PageOverhead)); Assert.Greater(memMetrics.OffHeapSize, memMetrics.PhysicalMemoryPages); Assert.Greater(memMetrics.OffheapUsedSize, memMetrics.PhysicalMemoryPages); - + var sysMetrics = metrics[4]; Assert.AreEqual("sysMemPlc", sysMetrics.Name); AssertMetricsAreEmpty(sysMetrics); + var volatileMetrics = metrics[6]; + Assert.AreEqual("volatileDsMemPlc", volatileMetrics.Name); + AssertMetricsAreEmpty(volatileMetrics); + // Metrics by name. // In-memory region. emptyMetrics = ignite.GetDataRegionMetrics(RegionNoMetrics); @@ -120,6 +125,10 @@ public void TestMemoryMetrics() Assert.AreEqual("sysMemPlc", sysMetrics.Name); AssertMetricsAreEmpty(sysMetrics); + volatileMetrics = ignite.GetDataRegionMetrics("volatileDsMemPlc"); + Assert.AreEqual("volatileDsMemPlc", volatileMetrics.Name); + AssertMetricsAreEmpty(volatileMetrics); + // Invalid name. Assert.IsNull(ignite.GetDataRegionMetrics("boo")); } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs index 352c1e9dfaa610..a2b1c872eee392 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs @@ -44,7 +44,7 @@ public void TestMemoryMetrics() // Verify metrics. var metrics = ignite.GetMemoryMetrics().OrderBy(x => x.Name).ToArray(); - Assert.AreEqual(4, metrics.Length); // two defined plus system and plus TxLog. + Assert.AreEqual(5, metrics.Length); // two defined plus system, TxLog and volatile. var emptyMetrics = metrics[0]; Assert.AreEqual(MemoryPolicyNoMetrics, emptyMetrics.Name); @@ -62,6 +62,14 @@ public void TestMemoryMetrics() Assert.AreEqual("sysMemPlc", sysMetrics.Name); AssertMetricsAreEmpty(sysMetrics); + var txLogMetrics = metrics[3]; + Assert.AreEqual("TxLog", txLogMetrics.Name); + AssertMetricsAreEmpty(txLogMetrics); + + var volatileMetrics = metrics[4]; + Assert.AreEqual("volatileDsMemPlc", volatileMetrics.Name); + AssertMetricsAreEmpty(volatileMetrics); + // Metrics by name. emptyMetrics = ignite.GetMemoryMetrics(MemoryPolicyNoMetrics); Assert.AreEqual(MemoryPolicyNoMetrics, emptyMetrics.Name); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs index 54180350b6cee0..2d1b157b4af5d8 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs @@ -70,7 +70,7 @@ public void StartGrids() }); } } - + /// /// Gets the name mapper. /// @@ -89,7 +89,7 @@ public void StopGrids() } /// - /// + /// /// [SetUp] public void BeforeTest() @@ -98,7 +98,7 @@ public void BeforeTest() } /// - /// + /// /// [TearDown] public void AfterTest() @@ -127,7 +127,7 @@ private static IIgnite GetIgnite() } /// - /// + /// /// /// private static ICache Cache() @@ -348,7 +348,7 @@ private void CheckEnumeratorQuery(SqlQuery qry) /// Check SQL query. /// [Test] - public void TestSqlQuery([Values(true, false)] bool loc, [Values(true, false)] bool keepBinary, + public void TestSqlQuery([Values(true, false)] bool loc, [Values(true, false)] bool keepBinary, [Values(true, false)] bool distrJoin) { var cache = Cache(); @@ -377,7 +377,7 @@ public void TestSqlQuery([Values(true, false)] bool loc, [Values(true, false)] b /// Check SQL fields query. /// [Test] - public void TestSqlFieldsQuery([Values(true, false)] bool loc, [Values(true, false)] bool distrJoin, + public void TestSqlFieldsQuery([Values(true, false)] bool loc, [Values(true, false)] bool distrJoin, [Values(true, false)] bool enforceJoinOrder, [Values(true, false)] bool lazy) { int cnt = MaxItemCnt; @@ -609,7 +609,7 @@ private static void CheckScanQuery(bool loc, bool keepBinary) // Exception exp = PopulateCache(cache, loc, cnt, x => x < 50); qry = new ScanQuery(new ScanQueryFilter {ThrowErr = true}); - + var ex = Assert.Throws(() => ValidateQueryResults(cache, qry, exp, keepBinary)); Assert.AreEqual(ScanQueryFilter.ErrMessage, ex.Message); } @@ -658,7 +658,7 @@ private void CheckScanQueryPartitions(bool loc, bool keepBinary) ValidateQueryResults(cache, qry, exp0, keepBinary); } - + } /// @@ -791,7 +791,7 @@ public void TestCustomKeyValueFieldNames() cache[1] = new QueryPerson("John", 33); row = cache.Query(new SqlFieldsQuery("select * from QueryPerson")).GetAll()[0]; - + Assert.AreEqual(3, row.Count); Assert.AreEqual(33, row[0]); Assert.AreEqual(1, row[1]); @@ -861,7 +861,7 @@ public void TestFieldNames() var names = cur.FieldNames; Assert.AreEqual(new[] {"AGE", "NAME" }, names); - + cur.Dispose(); Assert.AreSame(names, cur.FieldNames); @@ -878,7 +878,7 @@ public void TestFieldNames() qry.Sql = "SELECT 1, AGE FROM QueryPerson"; cur = cache.Query(qry); cur.Dispose(); - + Assert.AreEqual(new[] { "1", "AGE" }, cur.FieldNames); } @@ -933,6 +933,36 @@ public void TestFieldsMetadata() ); } + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestPartitionsValidation() + { + var cache = Cache(); + var qry = new SqlFieldsQuery("SELECT * FROM QueryPerson") { Partitions = new int[0] }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Partitions must not be empty.", ex.Message); + + qry.Partitions = new[] {-1, -2}; + ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Illegal partition", ex.Message); + } + + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestUpdateBatchSizeValidation() + { + var cache = Cache(); + var qry = new SqlFieldsQuery("SELECT * FROM QueryPerson") { UpdateBatchSize = -1 }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("updateBatchSize cannot be lower than 1", ex.Message); + } + /// /// Validates fields metadata collection /// @@ -1045,7 +1075,7 @@ private static void ValidateQueryResults(ICache cache, QueryBa /// /// Asserts that all expected entries have been received. /// - private static void AssertMissingExpectedKeys(ICollection exp, ICache cache, + private static void AssertMissingExpectedKeys(ICollection exp, ICache cache, IList> all) { if (exp.Count == 0) @@ -1058,7 +1088,7 @@ private static void AssertMissingExpectedKeys(ICollection exp, ICache + /// Tests that is false by default + /// and expiration events are not delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with default settings + /// - Check that Created events are delivered, but Expired events are not + /// + [Test] + public void TestIncludeExpiredIsFalseByDefaultAndExpiredEventsAreSkipped() + { + var cache = cache1.WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + var cb = new Listener(); + + var qry = new ContinuousQuery(cb); + Assert.IsFalse(qry.IncludeExpired); + + using (cache.QueryContinuous(qry)) + { + cache[1] = Entry(1); + + TestUtils.WaitForTrueCondition(() => !cache.ContainsKey(1)); + + cache[2] = Entry(2); + } + + var events = CB_EVTS.SelectMany(e => e.entries).ToList(); + Assert.AreEqual(2, events.Count); + + Assert.AreEqual(CacheEntryEventType.Created, events[0].EventType); + Assert.AreEqual(CacheEntryEventType.Created, events[1].EventType); + } + + /// + /// Tests that enabling causes + /// events to be delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with set to true + /// - Check that Expired events are delivered + /// + [Test] + public void TestExpiredEventsAreDeliveredWhenIncludeExpiredIsTrue() + { + var cache = cache1.WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + var cb = new Listener(); + + var qry = new ContinuousQuery(cb) + { + IncludeExpired = true + }; + + using (cache.QueryContinuous(qry)) + { + cache[1] = Entry(2); + + TestUtils.WaitForTrueCondition(() => CB_EVTS.Count == 2, 5000); + } + + var events = CB_EVTS.SelectMany(e => e.entries).ToList(); + + Assert.AreEqual(2, events.Count); + Assert.AreEqual(CacheEntryEventType.Created, events[0].EventType); + Assert.AreEqual(CacheEntryEventType.Expired, events[1].EventType); + + Assert.IsTrue(events[1].HasValue); + Assert.IsTrue(events[1].HasOldValue); + Assert.AreEqual(2, ((BinarizableEntry)events[1].Value).val); + Assert.AreEqual(2, ((BinarizableEntry)events[1].Value).val); + Assert.AreEqual(1, events[1].Key); + } + /// /// Test binarizable filter logic. /// @@ -1051,6 +1124,8 @@ private static ICacheEntryEvent CreateEvent(ICacheEntryEve return new CacheEntryCreateEvent(e.Key, e.Value); case CacheEntryEventType.Updated: return new CacheEntryUpdateEvent(e.Key, e.OldValue, e.Value); + case CacheEntryEventType.Expired: + return new CacheEntryExpireEvent(e.Key, e.OldValue); default: return new CacheEntryRemoveEvent(e.Key, e.OldValue); } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs index 5148dccee443a7..f188baef56f92e 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs @@ -21,7 +21,6 @@ namespace Apache.Ignite.Core.Tests.Cache.Query.Continuous using System; using System.Collections.Concurrent; using System.Collections.Generic; - using System.Threading; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Event; using Apache.Ignite.Core.Cache.Query.Continuous; @@ -69,7 +68,7 @@ private static void PutEntry(ICache cache) cache.Put(entry.Id, entry); // Wait for events. - Thread.Sleep(100); + TestUtils.WaitForTrueCondition(() => Listener.Events.Count == 2); ICacheEntryEvent e; @@ -100,7 +99,7 @@ private class Data private class Listener : ICacheEntryEventListener { - public static readonly ConcurrentStack> Events + public static readonly ConcurrentStack> Events = new ConcurrentStack>(); public void OnEvent(IEnumerable> evts) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs index 77f79f47d9c607..a55eeb8bfb242b 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs @@ -56,7 +56,9 @@ public void TestIntrospection() ReplicatedOnly = true, #pragma warning restore 618 Colocated = true, - Lazy = true + Lazy = true, + UpdateBatchSize = 12, + EnableDistributedJoins = true }).Where(x => x.Key > 10).ToCacheQueryable(); Assert.AreEqual(cache.Name, query.CacheName); @@ -76,7 +78,6 @@ public void TestIntrospection() Assert.IsTrue(fq.Local); Assert.AreEqual(PersonCount - 11, cache.Query(fq).GetAll().Count); Assert.AreEqual(999, fq.PageSize); - Assert.IsFalse(fq.EnableDistributedJoins); Assert.IsTrue(fq.EnforceJoinOrder); #pragma warning disable 618 Assert.IsTrue(fq.ReplicatedOnly); @@ -84,22 +85,30 @@ public void TestIntrospection() Assert.IsTrue(fq.Colocated); Assert.AreEqual(TimeSpan.FromSeconds(2.5), fq.Timeout); Assert.IsTrue(fq.Lazy); + Assert.IsTrue(fq.EnableDistributedJoins); + Assert.AreEqual(12, fq.UpdateBatchSize); + Assert.IsNull(fq.Partitions); var str = query.ToString(); Assert.AreEqual(GetSqlEscapeAll() ? "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.\"Person\" as _T0 where " + "(_T0.\"_KEY\" > ?), Arguments=[10], " + - "Local=True, PageSize=999, EnableDistributedJoins=False, EnforceJoinOrder=True, " + - "Timeout=00:00:02.5000000, ReplicatedOnly=True, Colocated=True, Schema=, Lazy=True]]" + "Local=True, PageSize=999, EnableDistributedJoins=True, EnforceJoinOrder=True, " + + "Timeout=00:00:02.5000000, Partitions=[], UpdateBatchSize=12, " + + "Colocated=True, Schema=, Lazy=True]]" : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.Person as _T0 where " + "(_T0._KEY > ?), Arguments=[10], " + - "Local=True, PageSize=999, EnableDistributedJoins=False, EnforceJoinOrder=True, " + - "Timeout=00:00:02.5000000, ReplicatedOnly=True, Colocated=True, Schema=, Lazy=True]]", str); + "Local=True, PageSize=999, EnableDistributedJoins=True, EnforceJoinOrder=True, " + + "Timeout=00:00:02.5000000, Partitions=[], UpdateBatchSize=12, " + + "Colocated=True, Schema=, Lazy=True]]", str); // Check fields query - var fieldsQuery = cache.AsCacheQueryable().Select(x => x.Value.Name).ToCacheQueryable(); + var fieldsQuery = cache + .AsCacheQueryable(new QueryOptions {Partitions = new[] {1, 2}}) + .Select(x => x.Value.Name) + .ToCacheQueryable(); Assert.AreEqual(cache.Name, fieldsQuery.CacheName); #pragma warning disable 618 // Type or member is obsolete @@ -117,17 +126,18 @@ public void TestIntrospection() Assert.IsFalse(fq.EnableDistributedJoins); Assert.IsFalse(fq.EnforceJoinOrder); Assert.IsFalse(fq.Lazy); + Assert.AreEqual(new[] {1, 2}, fq.Partitions); str = fieldsQuery.ToString(); Assert.AreEqual(GetSqlEscapeAll() ? "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0.\"Name\" from PERSON_ORG_SCHEMA.\"Person\" as _T0, Arguments=[], Local=False, " + "PageSize=1024, EnableDistributedJoins=False, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]" + "Timeout=00:00:00, Partitions=[1, 2], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]" : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0.NAME from PERSON_ORG_SCHEMA.Person as _T0, Arguments=[], Local=False, " + "PageSize=1024, EnableDistributedJoins=False, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]", str); + "Timeout=00:00:00, Partitions=[1, 2], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]", str); // Check distributed joins flag propagation var distrQuery = cache.AsCacheQueryable(new QueryOptions { EnableDistributedJoins = true }) @@ -144,13 +154,13 @@ public void TestIntrospection() "(((_T0.\"_KEY\" > ?) and (_T0.\"age1\" > ?)) " + "and (_T0.\"Name\" like \'%\' || ? || \'%\') ), Arguments=[10, 20, x], Local=False, " + "PageSize=1024, EnableDistributedJoins=True, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]" + "Timeout=00:00:00, Partitions=[], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]" : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.Person as _T0 where " + "(((_T0._KEY > ?) and (_T0.AGE1 > ?)) " + "and (_T0.NAME like \'%\' || ? || \'%\') ), Arguments=[10, 20, x], Local=False, " + "PageSize=1024, EnableDistributedJoins=True, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]", str); + "Timeout=00:00:00, Partitions=[], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]", str); } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs index 6d26131b82af88..15bcfa6f186ee2 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs @@ -28,10 +28,12 @@ namespace Apache.Ignite.Core.Tests.Cache.Query.Linq { using System; using System.Linq; + using System.Linq.Expressions; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Configuration; using Apache.Ignite.Linq; using NUnit.Framework; + using NUnit.Framework.Constraints; /// /// Tests LINQ. @@ -353,5 +355,30 @@ public void TestTimeout() Assert.IsTrue(ex.ToString().Contains("QueryCancelledException: The query was cancelled while executing.")); } + + /// + /// Tests that is not supported. + /// + [Test] + public void TestInvokeThrowsNotSupportedException() + { + var constraint = new ReusableConstraint(Is.TypeOf() + .And.Message.StartsWith("The LINQ expression '") + .And.Message.Contains("Invoke") + .And.Message.Contains( + "could not be translated. Either rewrite the query in a form that can be translated, or switch to client evaluation explicitly by inserting a call to either AsEnumerable() or ToList().")); + + Func, bool> filter = entry => false; + // ReSharper disable once ReturnValueOfPureMethodIsNotUsed + Assert.Throws(constraint, () => GetPersonCache().AsCacheQueryable() + .Where(x => filter(x)) + .ToList()); + + Func, int> selector = x => x.Key; + // ReSharper disable once ReturnValueOfPureMethodIsNotUsed + Assert.Throws(constraint, () => GetPersonCache().AsCacheQueryable() + .Select(x => selector(x)) + .FirstOrDefault()); + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs index fad419111e0ac1..960ede5164b9c4 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs @@ -26,6 +26,7 @@ namespace Apache.Ignite.Core.Tests.Client.Cache using Apache.Ignite.Core.Binary; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Event; + using Apache.Ignite.Core.Cache.Expiry; using Apache.Ignite.Core.Cache.Query.Continuous; using Apache.Ignite.Core.Client; using Apache.Ignite.Core.Client.Cache; @@ -638,6 +639,76 @@ public void TestCustomTimeIntervalCausesIncompleteBatches() }); } + /// + /// Tests that is false by default + /// and expiration events are not delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with default settings + /// - Check that Created events are delivered, but Expired events are not + /// + [Test] + public void TestIncludeExpiredIsFalseByDefaultAndExpiredEventsAreSkipped() + { + var cache = Client.GetOrCreateCache(TestUtils.TestName) + .WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + + var events = new ConcurrentQueue>(); + var qry = new ContinuousQueryClient(new DelegateListener(events.Enqueue)); + Assert.IsFalse(qry.IncludeExpired); + + using (cache.QueryContinuous(qry)) + { + cache[1] = 2; + + TestUtils.WaitForTrueCondition(() => !cache.ContainsKey(1), 5000); + + cache[2] = 3; + } + + Assert.AreEqual(2, events.Count); + Assert.AreEqual(CacheEntryEventType.Created, events.First().EventType); + Assert.AreEqual(CacheEntryEventType.Created, events.Last().EventType); + } + + /// + /// Tests that enabling causes + /// events to be delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with set to true + /// - Check that Expired events are delivered + /// + [Test] + public void TestExpiredEventsAreDeliveredWhenIncludeExpiredIsTrue() + { + var cache = Client.GetOrCreateCache(TestUtils.TestName) + .WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + + var events = new ConcurrentQueue>(); + var qry = new ContinuousQueryClient(new DelegateListener(events.Enqueue)) + { + IncludeExpired = true + }; + + using (cache.QueryContinuous(qry)) + { + cache[1] = 2; + + TestUtils.WaitForTrueCondition(() => events.Count == 2, 5000); + } + + Assert.AreEqual(2, events.Count); + Assert.AreEqual(CacheEntryEventType.Created, events.First().EventType); + Assert.AreEqual(CacheEntryEventType.Expired, events.Last().EventType); + + Assert.IsTrue(events.Last().HasValue); + Assert.IsTrue(events.Last().HasOldValue); + Assert.AreEqual(2, events.Last().Value); + Assert.AreEqual(2, events.Last().OldValue); + Assert.AreEqual(1, events.Last().Key); + } + /// /// Tests batching behavior. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs index 9584af82590d75..7bbf522fed75e9 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs @@ -80,7 +80,7 @@ public void TestSqlQueryDistributedJoins() var qry = new SqlQuery(typeof(Person), string.Format("from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", CacheName, CacheName2)); - + Assert.Greater(Count, cache.Query(qry).Count()); // Distributed join fixes the problem. @@ -136,7 +136,7 @@ public void TestFieldsQueryDistributedJoins() // Non-distributed join returns incomplete results. var qry = new SqlFieldsQuery(string.Format( - "select p2.Name from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", + "select p2.Name from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", CacheName, CacheName2)); Assert.Greater(Count, cache.Query(qry).Count()); @@ -228,5 +228,35 @@ public void TestDml() Assert.AreEqual(1, res[0][0]); Assert.AreEqual("baz", cache[-10].Name); } + + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestPartitionsValidation() + { + var cache = GetClientCache(); + var qry = new SqlFieldsQuery("SELECT * FROM Person") { Partitions = new int[0] }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Partitions must not be empty.", ex.Message); + + qry.Partitions = new[] {-1, -2}; + ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Illegal partition", ex.Message); + } + + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestUpdateBatchSizeValidation() + { + var cache = GetClientCache(); + var qry = new SqlFieldsQuery("SELECT * FROM Person") { UpdateBatchSize = -1 }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("updateBatchSize cannot be lower than 1", ex.Message); + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-attribute-node-filter.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-attribute-node-filter.xml new file mode 100644 index 00000000000000..8b1c5430358067 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-attribute-node-filter.xml @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 127.0.0.1:47500 + + + + + + + + + \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml index e9e5ff461b66fd..32eed223a00a94 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml @@ -91,6 +91,13 @@ + + + + + null + + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs index 7941175af4d46c..1211d3832ff5cc 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs @@ -39,6 +39,7 @@ namespace Apache.Ignite.Core.Tests using Apache.Ignite.Core.Cache.Eviction; using Apache.Ignite.Core.Cache.Expiry; using Apache.Ignite.Core.Cache.Store; + using Apache.Ignite.Core.Cluster; using Apache.Ignite.Core.Ssl; using Apache.Ignite.Core.Common; using Apache.Ignite.Core.Communication.Tcp; @@ -159,6 +160,14 @@ public void TestPredefinedXml() Assert.IsNotNull(nearCfg); Assert.AreEqual(7, nearCfg.NearStartSize); + var nodeFilter = (AttributeNodeFilter)cacheCfg.NodeFilter; + Assert.IsNotNull(nodeFilter); + var attributes = nodeFilter.Attributes.ToList(); + Assert.AreEqual(3, nodeFilter.Attributes.Count); + Assert.AreEqual(new KeyValuePair("myNode", "true"), attributes[0]); + Assert.AreEqual(new KeyValuePair("foo", null), attributes[1]); + Assert.AreEqual(new KeyValuePair("baz", null), attributes[2]); + var plc = nearCfg.EvictionPolicy as FifoEvictionPolicy; Assert.IsNotNull(plc); Assert.AreEqual(10, plc.BatchSize); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj index 99bf6fd202bc73..58ea5f7aadb570 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj @@ -71,6 +71,7 @@ + @@ -100,6 +101,8 @@ + + @@ -159,7 +162,6 @@ - diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs index 82eb16c8018055..56681cf7457fed 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs @@ -34,6 +34,7 @@ namespace Apache.Ignite.Core.Cache.Configuration using Apache.Ignite.Core.Cache.Eviction; using Apache.Ignite.Core.Cache.Expiry; using Apache.Ignite.Core.Cache.Store; + using Apache.Ignite.Core.Cluster; using Apache.Ignite.Core.Common; using Apache.Ignite.Core.Configuration; using Apache.Ignite.Core.Impl; @@ -249,7 +250,7 @@ public CacheConfiguration(string name, params QueryEntity[] queryEntities) : thi /// Initializes a new instance of the class, /// performing a deep copy of specified cache configuration. /// - /// The other configuration to perfrom deep copy from. + /// The other configuration to perform deep copy from. public CacheConfiguration(CacheConfiguration other) { if (other != null) @@ -340,6 +341,8 @@ private void Read(BinaryReader reader) AffinityFunction = AffinityFunctionSerializer.Read(reader); ExpiryPolicyFactory = ExpiryPolicySerializer.ReadPolicyFactory(reader); + NodeFilter = reader.ReadBoolean() ? new AttributeNodeFilter(reader) : null; + KeyConfiguration = reader.ReadCollectionRaw(r => new CacheKeyConfiguration(r)); if (reader.ReadBoolean()) @@ -448,6 +451,26 @@ internal void Write(BinaryWriter writer) AffinityFunctionSerializer.Write(writer, AffinityFunction); ExpiryPolicySerializer.WritePolicyFactory(writer, ExpiryPolicyFactory); + if (NodeFilter != null) + { + writer.WriteBoolean(true); + + var attributeNodeFilter = NodeFilter as AttributeNodeFilter; + if (attributeNodeFilter == null) + { + throw new NotSupportedException(string.Format( + "Unsupported CacheConfiguration.NodeFilter: '{0}'. " + + "Only predefined implementations are supported: '{1}'", + NodeFilter.GetType().Name, typeof(AttributeNodeFilter).Name)); + } + + attributeNodeFilter.Write(writer); + } + else + { + writer.WriteBoolean(false); + } + writer.WriteCollectionRaw(KeyConfiguration); if (PlatformCacheConfiguration != null) @@ -949,5 +972,12 @@ public string MemoryPolicyName /// [IgniteExperimental] public PlatformCacheConfiguration PlatformCacheConfiguration { get; set; } + + /// + /// Gets or sets the cluster node filter. Cache will be started only on nodes that match the filter. + /// + /// Only predefined implementations are supported: . + /// + public IClusterNodeFilter NodeFilter { get; set; } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs index 8339257710b9f8..f712070b0a1dd3 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs @@ -36,6 +36,11 @@ public enum CacheEntryEventType /// /// An event type indicating that the cache entry was removed. /// - Removed + Removed, + + /// + /// An event type indicating that the cache entry was removed by expiration policy. + /// + Expired } } \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs index 4c471dc860d6fd..b24dae6d063361 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs @@ -166,6 +166,17 @@ public ContinuousQuery(ICacheEntryEventListener lsnr, ICacheEntryEventFi /// Defaults to false. /// public bool Local { get; set; } + + /// + /// Gets or sets a value indicating whether to notify about events. + /// + /// If true, then the remote listener will get notifications about expired cache entries. + /// Otherwise, only , , and + /// events will be passed to the listener. + /// + /// Defaults to false. + /// + public bool IncludeExpired { get; set; } /// /// Validate continuous query state. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs index 07f7b1a9a42ad9..40c1622e22c66c 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs @@ -32,6 +32,9 @@ public class SqlFieldsQuery : IQueryBaseInternal /// Default page size. public const int DefaultPageSize = 1024; + /// Default value for . + public const int DefaultUpdateBatchSize = 1; + /// /// Constructor. /// @@ -55,6 +58,7 @@ public SqlFieldsQuery(string sql, bool loc, params object[] args) Arguments = args; PageSize = DefaultPageSize; + UpdateBatchSize = DefaultUpdateBatchSize; } /// @@ -152,6 +156,21 @@ public SqlFieldsQuery(string sql, bool loc, params object[] args) /// public bool Lazy { get; set; } + /// + /// Gets or sets partitions for the query. + /// + /// The query will be executed only on nodes which are primary for specified partitions. + /// + [SuppressMessage("Microsoft.Performance", "CA1819:PropertiesShouldNotReturnArrays")] + public int[] Partitions { get; set; } + + /// + /// Gets or sets batch size for update queries. + /// + /// Default is 1 (. + /// + public int UpdateBatchSize { get; set; } + /// /// Returns a that represents this instance. /// @@ -160,15 +179,19 @@ public SqlFieldsQuery(string sql, bool loc, params object[] args) /// public override string ToString() { - var args = string.Join(", ", Arguments.Select(x => x == null ? "null" : x.ToString())); + var args = Arguments == null + ? "" + : string.Join(", ", Arguments.Select(x => x == null ? "null" : x.ToString())); + + var parts = Partitions == null + ? "" + : string.Join(", ", Partitions.Select(x => x.ToString())); return string.Format("SqlFieldsQuery [Sql={0}, Arguments=[{1}], Local={2}, PageSize={3}, " + - "EnableDistributedJoins={4}, EnforceJoinOrder={5}, Timeout={6}, ReplicatedOnly={7}" + - ", Colocated={8}, Schema={9}, Lazy={10}]", Sql, args, Local, -#pragma warning disable 618 - PageSize, EnableDistributedJoins, EnforceJoinOrder, Timeout, ReplicatedOnly, -#pragma warning restore 618 - Colocated, Schema, Lazy); + "EnableDistributedJoins={4}, EnforceJoinOrder={5}, Timeout={6}, Partitions=[{7}], " + + "UpdateBatchSize={8}, Colocated={9}, Schema={10}, Lazy={11}]", Sql, args, Local, + PageSize, EnableDistributedJoins, EnforceJoinOrder, Timeout, parts, + UpdateBatchSize, Colocated, Schema, Lazy); } /** */ @@ -197,6 +220,8 @@ internal void Write(BinaryWriter writer) #pragma warning restore 618 writer.WriteBoolean(Colocated); writer.WriteString(Schema); // Schema + writer.WriteIntArray(Partitions); + writer.WriteInt(UpdateBatchSize); } /** */ diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs index 6d5d6f349c079a..e678930b9dbd91 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs @@ -97,5 +97,16 @@ public ContinuousQueryClient(ICacheEntryEventListener listener) : this() /// sent only when buffer is full. /// public TimeSpan TimeInterval { get; set; } + + /// + /// Gets or sets a value indicating whether to notify about events. + /// + /// If true, then the remote listener will get notifications about expired cache entries. + /// Otherwise, only , , and + /// events will be passed to the listener. + /// + /// Defaults to false. + /// + public bool IncludeExpired { get; set; } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cluster/AttributeNodeFilter.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cluster/AttributeNodeFilter.cs new file mode 100644 index 00000000000000..4c01c0ea5c102d --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cluster/AttributeNodeFilter.cs @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Cluster +{ + using System; + using System.Collections.Generic; + using System.Diagnostics; + using System.Diagnostics.CodeAnalysis; + using Apache.Ignite.Core.Binary; + using Apache.Ignite.Core.Impl.Common; + + /// + /// Attribute node filter. + /// + /// The filter will evaluate to true if a node has all specified attributes with corresponding values. + /// + /// You can set node attributes using property. + /// + public sealed class AttributeNodeFilter : IClusterNodeFilter + { + /** */ + private IDictionary _attributes; + + /// + /// Attributes dictionary match. + /// + [SuppressMessage("Microsoft.Usage", "CA2227:CollectionPropertiesShouldBeReadOnly")] + public IDictionary Attributes + { + get { return _attributes; } + set + { + if (value == null) + { + throw new ArgumentNullException("value"); + } + + _attributes = value; + } + } + + /// + /// Initializes a new instance of . + /// + public AttributeNodeFilter() + { + // No-op. + } + + /// + /// Initializes a new instance of . + /// + /// Attribute name. + /// Attribute value. + public AttributeNodeFilter(string attrName, object attrValue) + { + IgniteArgumentCheck.NotNullOrEmpty(attrName, "attrName"); + + Attributes = new Dictionary(1) + { + {attrName, attrValue} + }; + } + + /** */ + public bool Invoke(IClusterNode node) + { + throw new NotSupportedException("Should not be called from .NET side."); + } + + /// + /// Initializes a new instance of from a binary reader. + /// + /// Reader. + internal AttributeNodeFilter(IBinaryRawReader reader) + { + IgniteArgumentCheck.NotNull(reader, "reader"); + + int count = reader.ReadInt(); + + Debug.Assert(count > 0); + + Attributes = new Dictionary(count); + + while (count > 0) + { + string attrKey = reader.ReadString(); + object attrVal = reader.ReadObject(); + + Debug.Assert(attrKey != null); + + Attributes[attrKey] = attrVal; + + count--; + } + } + + /// + /// Writes the instance to a writer. + /// + /// Writer. + internal void Write(IBinaryRawWriter writer) + { + writer.WriteInt(Attributes.Count); + + // Does not preserve ordering, it's fine. + foreach (KeyValuePair attr in Attributes) + { + writer.WriteString(attr.Key); + writer.WriteObject(attr.Value); + } + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd index 0ab59ec78bea8c..314b839312e450 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd @@ -623,6 +623,18 @@ + + + Node filter to match selected nodes. Only predefined AttributeNodeFilter is supported. + + + + + Assembly-qualified type name. + + + + Cache key configuration collection. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs index de09fdd6236c31..6c39425191f10f 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs @@ -114,6 +114,7 @@ public static unsafe int GetHashCode(T val, Marshaller marsh, IDictionary(T val, Marshaller marsh, IDictionary affinityKeyFieldIds) { using (var stream = new BinaryHeapStream(128)) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs index 869518eb529096..a49074b219f68e 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs @@ -20,7 +20,6 @@ namespace Apache.Ignite.Core.Impl.Cache using System; using System.Collections.Generic; using Apache.Ignite.Core.Cache; - using Apache.Ignite.Core.Cache.Affinity; using Apache.Ignite.Core.Cluster; using Apache.Ignite.Core.Impl.Binary; using Apache.Ignite.Core.Impl.Binary.IO; @@ -76,12 +75,9 @@ internal class CacheAffinityImpl : PlatformTargetAdapter, ICacheAffinity /** */ private const int OpPartitions = 15; - /** */ - private const int OpIsAssignmentValid = 16; - /** */ private readonly bool _keepBinary; - + /** Grid. */ private readonly IIgniteInternal _ignite; @@ -115,7 +111,7 @@ public int GetPartition(TK key) public bool IsPrimary(IClusterNode n, TK key) { IgniteArgumentCheck.NotNull(n, "n"); - + IgniteArgumentCheck.NotNull(key, "key"); return DoOutOp(OpIsPrimary, n.Id, key) == True; @@ -220,19 +216,6 @@ public IList MapPartitionToPrimaryAndBackups(int part) return DoOutInOp(OpMapPartitionToPrimaryAndBackups, w => w.WriteObject(part), r => ReadNodes(r)); } - /// - /// Checks whether given partition is still assigned to the same node as in specified version. - /// - internal bool IsAssignmentValid(AffinityTopologyVersion version, int partition) - { - return DoOutOp(OpIsAssignmentValid, (IBinaryStream s) => - { - s.WriteLong(version.Version); - s.WriteInt(version.MinorVersion); - s.WriteInt(partition); - }) != 0; - } - /** */ protected override T Unmarshal(IBinaryStream stream) { @@ -283,4 +266,4 @@ private Dictionary ReadDictionary(IBinaryStream reader, Func + /// Affinity manager. + /// + internal class CacheAffinityManager : PlatformTargetAdapter + { + /** */ + private const int OpIsAssignmentValid = 1; + + /// + /// Initializes a new instance of class. + /// + /// Target. + internal CacheAffinityManager(IPlatformTargetInternal target) : base(target) + { + // No-op. + } + + /// + /// Checks whether given partition is still assigned to the same node as in specified version. + /// + internal bool IsAssignmentValid(AffinityTopologyVersion version, int partition) + { + return DoOutOp(OpIsAssignmentValid, (IBinaryStream s) => + { + s.WriteLong(version.Version); + s.WriteInt(version.MinorVersion); + s.WriteInt(partition); + }) != 0; + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Event/CacheEntryExpireEvent.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Event/CacheEntryExpireEvent.cs new file mode 100644 index 00000000000000..2b4acc6da4e1c6 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Event/CacheEntryExpireEvent.cs @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Impl.Cache.Event +{ + using Apache.Ignite.Core.Cache.Event; + + /// + /// Cache entry expire event. + /// + internal class CacheEntryExpireEvent : ICacheEntryEvent + { + /** Key.*/ + private readonly TK _key; + + /** Old value.*/ + private readonly TV _oldVal; + + /// + /// Constructor. + /// + /// Key. + /// Old value. + public CacheEntryExpireEvent(TK key, TV oldVal) + { + _key = key; + _oldVal = oldVal; + } + + /** */ + public TK Key + { + get { return _key; } + } + + /** */ + public TV Value + { + get { return _oldVal; } + } + + /** */ + public TV OldValue + { + get { return _oldVal; } + } + + /** */ + public bool HasValue + { + get { return true; } + } + + /** */ + public bool HasOldValue + { + get { return true; } + } + + /** */ + public CacheEntryEventType EventType + { + get { return CacheEntryEventType.Expired; } + } + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs index d18aad9b50c526..c7bb09fafe522b 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs @@ -32,8 +32,8 @@ namespace Apache.Ignite.Core.Impl.Cache.Platform internal sealed class PlatformCache : IPlatformCache { /** Affinity. */ - private readonly CacheAffinityImpl _affinity; - + private readonly CacheAffinityManager _affinity; + /** Keep binary flag. */ private readonly bool _keepBinary; @@ -44,7 +44,7 @@ internal sealed class PlatformCache : IPlatformCache private readonly Func _affinityTopologyVersionFunc; /** Underlying map. */ - private readonly ConcurrentDictionary> _map = + private readonly ConcurrentDictionary> _map = new ConcurrentDictionary>(); /** Stopped flag. */ @@ -52,9 +52,9 @@ internal sealed class PlatformCache : IPlatformCache /// /// Initializes a new instance of the class. - /// Called via reflection from . + /// Called via reflection from . /// - public PlatformCache(Func affinityTopologyVersionFunc, CacheAffinityImpl affinity, bool keepBinary) + public PlatformCache(Func affinityTopologyVersionFunc, CacheAffinityManager affinity, bool keepBinary) { _affinityTopologyVersionFunc = affinityTopologyVersionFunc; _affinity = affinity; @@ -78,7 +78,7 @@ public bool TryGetValue(TKey key, out TVal val) PlatformCacheEntry entry; var key0 = (TK) (object) key; - + if (_map.TryGetValue(key0, out entry)) { if (IsValid(entry)) @@ -106,7 +106,7 @@ public int GetSize(int? partition) } var count = 0; - + foreach (var e in _map) { if (!IsValid(e.Value)) @@ -118,7 +118,7 @@ public int GetSize(int? partition) { continue; } - + count++; } @@ -179,7 +179,7 @@ public void Stop() _stopped = true; Clear(); } - + /** */ public void Clear() { @@ -227,19 +227,19 @@ public IEnumerable> GetEntries(int? partitio /// When primary node changes for a key, GridNearCacheEntry stops receiving updates for that key, /// because reader ("subscription") on new primary is not yet established. /// - /// This method is similar to GridNearCacheEntry.valid(). + /// This method is similar to GridNearCacheEntry.valid(). /// /// Entry to validate. /// Value type. /// True if entry is valid and can be returned to the user; false otherwise. private bool IsValid(PlatformCacheEntry entry) { - // See comments on _affinityTopologyVersionFunc about boxed copy approach. + // See comments on _affinityTopologyVersionFunc about boxed copy approach. var currentVerBoxed = _affinityTopologyVersionFunc(); var entryVerBoxed = entry.Version; - + Debug.Assert(currentVerBoxed != null); - + if (ReferenceEquals(currentVerBoxed, entryVerBoxed)) { // Happy path: true on stable topology. @@ -267,7 +267,7 @@ private bool IsValid(PlatformCacheEntry entry) return valid; } - + /// /// Gets boxed affinity version. Reuses existing boxing copy to reduce allocations. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs index f1fe492a06c96b..e7f913010fbd49 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs @@ -38,12 +38,12 @@ internal class PlatformCacheManager /// Holds thread-local key/val pair to be used for updating platform cache. /// internal static readonly ThreadLocal ThreadLocalPair = new ThreadLocal(); - + /// /// Platform caches per cache id. /// Multiple instances can point to the same Ignite cache, - /// and share one instance. - /// + /// and share one instance. + /// private readonly CopyOnWriteConcurrentDictionary _caches = new CopyOnWriteConcurrentDictionary(); @@ -56,9 +56,9 @@ private readonly CopyOnWriteConcurrentDictionary _caches /// Current topology version. Store as object for atomic updates. /// private volatile object _affinityTopologyVersion; - + /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// Ignite. public PlatformCacheManager(IIgniteInternal ignite) @@ -77,7 +77,7 @@ public IPlatformCache GetOrCreatePlatformCache(CacheConfiguration cacheConfigura Debug.Assert(cacheConfiguration != null); var cacheId = BinaryUtils.GetCacheId(cacheConfiguration.Name); - + return _caches.GetOrAdd(cacheId, _ => CreatePlatformCache(cacheConfiguration)); } @@ -89,15 +89,15 @@ public IPlatformCache TryGetPlatformCache(int cacheId) IPlatformCache platformCache; return _caches.TryGetValue(cacheId, out platformCache) ? platformCache : null; } - + /// /// Reads cache entry from a stream and updates the platform cache. /// public void Update(int cacheId, IBinaryStream stream, Marshaller marshaller) { - var cache = _caches.GetOrAdd(cacheId, + var cache = _caches.GetOrAdd(cacheId, _ => CreatePlatformCache(_ignite.GetCacheConfiguration(cacheId))); - + cache.Update(stream, marshaller); } @@ -133,7 +133,7 @@ public void OnAffinityTopologyVersionChanged(AffinityTopologyVersion affinityTop { _affinityTopologyVersion = affinityTopologyVersion; } - + /// /// Creates platform cache. /// @@ -141,9 +141,9 @@ private IPlatformCache CreatePlatformCache(CacheConfiguration cacheConfiguration { var platformCfg = cacheConfiguration.PlatformCacheConfiguration; Debug.Assert(platformCfg != null); - + Func affinityTopologyVersionFunc = () => _affinityTopologyVersion; - var affinity = _ignite.GetAffinity(cacheConfiguration.Name); + var affinity = _ignite.GetAffinityManager(cacheConfiguration.Name); var keepBinary = platformCfg.KeepBinary; TypeResolver resolver = null; @@ -164,7 +164,7 @@ private IPlatformCache CreatePlatformCache(CacheConfiguration cacheConfiguration if (resolved == null) { throw new InvalidOperationException(string.Format( - "Can not create .NET Platform Cache: {0}.{1} is invalid. Failed to resolve type: '{2}'", + "Can not create .NET Platform Cache: {0}.{1} is invalid. Failed to resolve type: '{2}'", typeof(PlatformCacheConfiguration).Name, fieldName, typeName)); } @@ -174,16 +174,16 @@ private IPlatformCache CreatePlatformCache(CacheConfiguration cacheConfiguration var keyType = resolve(platformCfg.KeyTypeName, "KeyTypeName"); var valType = resolve(platformCfg.ValueTypeName, "ValueTypeName"); var cacheType = typeof(PlatformCache<,>).MakeGenericType(keyType, valType); - + var platformCache = Activator.CreateInstance( - cacheType, - affinityTopologyVersionFunc, + cacheType, + affinityTopologyVersionFunc, affinity, keepBinary); - + return (IPlatformCache) platformCache; } - + /// /// Handles client disconnect. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs index 6c9012bd1d1b7e..7918d5b6835c7a 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs @@ -108,6 +108,7 @@ public ContinuousQueryHandleImpl(ContinuousQuery qry, Marshaller marsh, { writer.WriteLong(_hnd); writer.WriteBoolean(qry.Local); + writer.WriteBoolean(qry.IncludeExpired); writer.WriteBoolean(_filter != null); var javaFilter = _filter as PlatformJavaObjectFactoryProxy; diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs index fc93c48473e7db..c34a25ee4233e7 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs @@ -94,6 +94,8 @@ private static ICacheEntryEvent ReadEvent0(BinaryReader reader) return new CacheEntryUpdateEvent(key, oldVal, val); case 2: return new CacheEntryRemoveEvent(key, oldVal); + case 3: + return new CacheEntryExpireEvent(key, oldVal); default: throw new NotSupportedException(eventType.ToString()); } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs index 2f27bcfc3a8e07..bfb435ede7dfb4 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs @@ -331,7 +331,7 @@ public CacheResult GetAndReplace(TK key, TV val) IgniteArgumentCheck.NotNull(val, "val"); _ignite.Transactions.StartTxIfNeeded(); - + return DoOutInOpAffinity(ClientOp.CacheGetAndReplace, key, val, UnmarshalCacheResult); } @@ -951,6 +951,22 @@ private static void WriteSqlFieldsQuery(IBinaryRawWriter writer, SqlFieldsQuery writer.WriteBoolean(qry.Lazy); writer.WriteTimeSpanAsLong(qry.Timeout); writer.WriteBoolean(includeColumns); + + if (qry.Partitions != null) + { + writer.WriteInt(qry.Partitions.Length); + + foreach (var part in qry.Partitions) + { + writer.WriteInt(part); + } + } + else + { + writer.WriteInt(-1); + } + + writer.WriteInt(qry.UpdateBatchSize); } /// @@ -1085,7 +1101,7 @@ private void WriteContinuousQuery(ClientRequestContext ctx, ContinuousQueryClien var w = ctx.Writer; w.WriteInt(continuousQuery.BufferSize); w.WriteLong((long) continuousQuery.TimeInterval.TotalMilliseconds); - w.WriteBoolean(false); // Include expired. + w.WriteBoolean(continuousQuery.IncludeExpired); if (continuousQuery.Filter == null) { diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs index 56303d59c9a277..5b47c20f26a179 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs @@ -19,6 +19,7 @@ namespace Apache.Ignite.Core.Impl.Client { /// /// Client feature ids. Values represent the index in the bit array. + /// Unsupported flags must be commented out. /// internal enum ClientBitmaskFeature { @@ -26,6 +27,9 @@ internal enum ClientBitmaskFeature ExecuteTaskByName = 1, // ClusterStates = 2, ClusterGroupGetNodesEndpoints = 3, - ClusterGroups = 4 + ClusterGroups = 4, + ServiceInvoke = 5, // The flag is not necessary and exists for legacy reasons + // DefaultQueryTimeout = 6, // IGNITE-13692 + QueryPartitionsBatchSize = 7 } -} \ No newline at end of file +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs index 7dee984368df89..6f5ec93d2f5f53 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs @@ -229,6 +229,12 @@ public CacheAffinityImpl GetAffinity(string cacheName) throw GetClientNotSupportedException(); } + /** */ + public CacheAffinityManager GetAffinityManager(string cacheName) + { + throw GetClientNotSupportedException(); + } + /** */ public CacheConfiguration GetCacheConfiguration(int cacheId) { diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs index e62b89c1de1db6..d727908ef03f12 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs @@ -92,6 +92,13 @@ internal interface IIgniteInternal /// Cache data affinity service. CacheAffinityImpl GetAffinity(string cacheName); + /// + /// Gets internal affinity manager for a given cache. + /// + /// Cache name. + /// Cache affinity manager. + CacheAffinityManager GetAffinityManager(string cacheName); + /// /// Gets cache name by id. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs index 9dfa40b480779c..640f47219c3c73 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs @@ -35,6 +35,7 @@ namespace Apache.Ignite.Core.Impl using Apache.Ignite.Core.DataStructures; using Apache.Ignite.Core.Events; using Apache.Ignite.Core.Impl.Binary; + using Apache.Ignite.Core.Impl.Binary.IO; using Apache.Ignite.Core.Impl.Cache; using Apache.Ignite.Core.Impl.Cache.Platform; using Apache.Ignite.Core.Impl.Cluster; @@ -101,7 +102,8 @@ private enum Op SetBaselineAutoAdjustTimeout = 35, GetCacheConfig = 36, GetThreadLocal = 37, - GetOrCreateLock = 38 + GetOrCreateLock = 38, + GetAffinityManager = 39, } /** */ @@ -139,7 +141,7 @@ private enum Op new ConcurrentDictionary(); /** Client reconnect task completion source. */ - private volatile TaskCompletionSource _clientReconnectTaskCompletionSource = + private volatile TaskCompletionSource _clientReconnectTaskCompletionSource = new TaskCompletionSource(); /** Plugin processor. */ @@ -189,7 +191,7 @@ public Ignite(IgniteConfiguration cfg, string name, IPlatformTargetInternal proc SetCompactFooter(); _pluginProcessor = new PluginProcessor(this); - + _platformCacheManager = new PlatformCacheManager(this); } @@ -470,7 +472,7 @@ public ICache GetOrCreateCache(CacheConfiguration configuration, public ICache GetOrCreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration, PlatformCacheConfiguration platformCacheConfiguration) { - return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, + return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, Op.GetOrCreateCacheFromConfig); } @@ -491,7 +493,7 @@ public ICache CreateCache(CacheConfiguration configuration) } /** */ - public ICache CreateCache(CacheConfiguration configuration, + public ICache CreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration) { return CreateCache(configuration, nearConfiguration, null); @@ -501,14 +503,14 @@ public ICache CreateCache(CacheConfiguration configuration, public ICache CreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration, PlatformCacheConfiguration platformCacheConfiguration) { - return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, + return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, Op.CreateCacheFromConfig); } /// /// Gets or creates the cache. /// - private ICache GetOrCreateCache(CacheConfiguration configuration, + private ICache GetOrCreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration, PlatformCacheConfiguration platformCacheConfiguration, Op op) { IgniteArgumentCheck.NotNull(configuration, "configuration"); @@ -646,10 +648,21 @@ CacheAffinityImpl IIgniteInternal.GetAffinity(string cacheName) IgniteArgumentCheck.NotNull(cacheName, "cacheName"); var aff = DoOutOpObject((int) Op.GetAffinity, w => w.WriteString(cacheName)); - + return new CacheAffinityImpl(aff, false); } + /** */ + public CacheAffinityManager GetAffinityManager(string cacheName) + { + IgniteArgumentCheck.NotNull(cacheName, "cacheName"); + + var mgr = DoOutOpObject((int) Op.GetAffinityManager, + (IBinaryStream s) => s.WriteInt(BinaryUtils.GetCacheId(cacheName))); + + return new CacheAffinityManager(mgr); + } + /** */ public ICacheAffinity GetAffinity(string cacheName) { @@ -918,7 +931,7 @@ public void DisableWal(string cacheName) public void EnableWal(string cacheName) { IgniteArgumentCheck.NotNull(cacheName, "cacheName"); - + DoOutOp((int) Op.EnableWal, w => w.WriteString(cacheName)); } @@ -933,7 +946,7 @@ public bool IsWalEnabled(string cacheName) /** */ public void SetTxTimeoutOnPartitionMapExchange(TimeSpan timeout) { - DoOutOp((int) Op.SetTxTimeoutOnPartitionMapExchange, + DoOutOp((int) Op.SetTxTimeoutOnPartitionMapExchange, (BinaryWriter w) => w.WriteLong((long) timeout.TotalMilliseconds)); } @@ -1005,7 +1018,7 @@ public IIgniteLock GetOrCreateLock(string name) { Name = name }; - + return GetOrCreateLock(configuration, true); } @@ -1014,7 +1027,7 @@ public IIgniteLock GetOrCreateLock(LockConfiguration configuration, bool create) { IgniteArgumentCheck.NotNull(configuration, "configuration"); IgniteArgumentCheck.NotNullOrEmpty(configuration.Name, "configuration.Name"); - + // Create a copy to ignore modifications from outside. var cfg = new LockConfiguration(configuration); @@ -1025,7 +1038,7 @@ public IIgniteLock GetOrCreateLock(LockConfiguration configuration, bool create) w.WriteBoolean(configuration.IsFair); w.WriteBoolean(create); }); - + return target == null ? null : new IgniteLock(target, cfg); } @@ -1123,13 +1136,13 @@ public void UpdateNodeInfo(long memPtr) internal ITransactions GetTransactionsWithLabel(string label) { Debug.Assert(label != null); - + var platformTargetInternal = DoOutOpObject((int) Op.GetTransactions, s => { var w = BinaryUtils.Marshaller.StartMarshal(s); w.WriteString(label); }); - + return new TransactionsImpl(this, platformTargetInternal, GetLocalNode().Id, label); } @@ -1163,7 +1176,7 @@ internal void OnClientDisconnected() // Raise events. _clientReconnectTaskCompletionSource = new TaskCompletionSource(); - + var handler = ClientDisconnected; if (handler != null) handler.Invoke(this, EventArgs.Empty); @@ -1176,7 +1189,7 @@ internal void OnClientDisconnected() internal void OnClientReconnected(bool clusterRestarted) { _marsh.OnClientReconnected(clusterRestarted); - + _clientReconnectTaskCompletionSource.TrySetResult(clusterRestarted); var handler = ClientReconnected; diff --git a/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings b/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings index 59bf2e5dd4a9be..c39aa648d77630 100644 --- a/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings +++ b/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings @@ -8,6 +8,7 @@ DO_NOT_SHOW True False + True True True True diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj index 60a2f24ea7cd05..d71d1f0dbd6474 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj @@ -15,7 +15,7 @@ true bin\Debug\ - DEBUG;TRACE + DEBUG;TRACE;CODE_ANALYSIS full AnyCPU prompt diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs index 43d8e64c8454a4..780dd4e6dd4b38 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs @@ -38,7 +38,7 @@ internal class CacheFieldsQueryExecutor : IQueryExecutor { /** */ private readonly ICacheInternal _cache; - + /** */ private readonly QueryOptions _options; @@ -210,7 +210,9 @@ internal SqlFieldsQuery GetFieldsQuery(string text, object[] args) Colocated = _options.Colocated, Local = _options.Local, Arguments = args, - Lazy = _options.Lazy + Lazy = _options.Lazy, + UpdateBatchSize = _options.UpdateBatchSize, + Partitions = _options.Partitions }; } @@ -297,4 +299,4 @@ private static Func GetCompiledCtor(ConstructorInfo }); } } -} \ No newline at end of file +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs index cc14260bc280b7..91bde9a0a9d218 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs @@ -334,6 +334,7 @@ protected override Expression VisitMember(MemberExpression expression) return expression; } + /// /// Gets the name of the field from a member expression, with quotes when necessary. /// @@ -515,9 +516,8 @@ protected override Expression VisitNew(NewExpression expression) [SuppressMessage("Microsoft.Design", "CA1062:Validate arguments of public methods")] protected override Expression VisitInvocation(InvocationExpression expression) { - VisitArguments(expression.Arguments); - - return expression; + throw new NotSupportedException("The LINQ expression '" + expression + + "' could not be translated. Either rewrite the query in a form that can be translated, or switch to client evaluation explicitly by inserting a call to either AsEnumerable() or ToList()."); } /** */ diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs index 994baf26483405..3249835df53b75 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs @@ -19,6 +19,7 @@ namespace Apache.Ignite.Linq { using System; using System.ComponentModel; + using System.Diagnostics.CodeAnalysis; using Apache.Ignite.Core.Cache.Configuration; using Apache.Ignite.Core.Cache.Query; @@ -30,16 +31,20 @@ public class QueryOptions /// Default page size. public const int DefaultPageSize = SqlFieldsQuery.DefaultPageSize; + /// Default value for . + public const int DefaultUpdateBatchSize = SqlFieldsQuery.DefaultUpdateBatchSize; + /// /// Initializes a new instance of the class. /// public QueryOptions() { PageSize = DefaultPageSize; + UpdateBatchSize = DefaultUpdateBatchSize; } /// - /// Local flag. When set query will be executed only on local node, so only local + /// Local flag. When set query will be executed only on local node, so only local /// entries will be returned as query result. /// /// Defaults to false. @@ -53,7 +58,7 @@ public QueryOptions() public int PageSize { get; set; } /// - /// Gets or sets the name of the table. + /// Gets or sets the name of the table. /// /// Table name is equal to short class name of a cache value. /// When a cache has only one type of values, or only one defined, @@ -124,5 +129,20 @@ public QueryOptions() /// consumption at the cost of moderate performance hit. /// public bool Lazy { get; set; } + + /// + /// Gets or sets partitions for the query. + /// + /// The query will be executed only on nodes which are primary for specified partitions. + /// + [SuppressMessage("Microsoft.Performance", "CA1819:PropertiesShouldNotReturnArrays")] + public int[] Partitions { get; set; } + + /// + /// Gets or sets batch size for update queries. + /// + /// Default is 1 (. + /// + public int UpdateBatchSize { get; set; } } } diff --git a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java index 42b8a305eab2c2..421bf782c43ae8 100644 --- a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java +++ b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java @@ -464,7 +464,7 @@ private void processRequest(String act, HttpServletRequest req, HttpServletRespo if (sesTok != null) cmdRes.setSessionToken(U.byteArray2HexString(sesTok)); - res.setStatus(HttpServletResponse.SC_OK); + res.setStatus(cmdRes.getSuccessStatus() == GridRestResponse.SERVICE_UNAVAILABLE ? HttpServletResponse.SC_SERVICE_UNAVAILABLE : HttpServletResponse.SC_OK); } catch (Throwable e) { res.setStatus(HttpServletResponse.SC_OK); @@ -720,7 +720,8 @@ private void processRequest(String act, HttpServletRequest req, HttpServletRespo case DATA_REGION_METRICS: case DATA_STORAGE_METRICS: case NAME: - case VERSION: { + case VERSION: + case PROBE: { restReq = new GridRestRequest(); break; diff --git a/modules/spring/src/test/config/enc/enc-cache-client.xml b/modules/spring/src/test/config/enc/enc-cache-client.xml index ba4068a4606a7c..6ebef07a3a84d3 100644 --- a/modules/spring/src/test/config/enc/enc-cache-client.xml +++ b/modules/spring/src/test/config/enc/enc-cache-client.xml @@ -23,7 +23,7 @@ http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd"> - + diff --git a/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java b/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java index bad97a504032a2..8866ab64fe440b 100644 --- a/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java +++ b/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java @@ -24,6 +24,7 @@ import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgnitionEx; import org.apache.ignite.internal.encryption.EncryptedCacheRestartTest; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.typedef.T2; @@ -81,13 +82,21 @@ public void testEncryptionKeysEqualsOnThirdNodeJoin() throws Exception { int grpId = CU.cacheGroupId(enc.name(), enc.configuration().getGroupName()); - KeystoreEncryptionKey key0 = (KeystoreEncryptionKey)g.get1().context().encryption().groupKey(grpId); - KeystoreEncryptionKey key1 = (KeystoreEncryptionKey)g.get2().context().encryption().groupKey(grpId); - KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)g2.context().encryption().groupKey(grpId); + GroupKey grpKey0 = g.get1().context().encryption().groupKey(grpId); + GroupKey grpKey1 = g.get2().context().encryption().groupKey(grpId); + GroupKey grpKey2 = g2.context().encryption().groupKey(grpId); - assertNotNull(cacheName, key0); - assertNotNull(cacheName, key1); - assertNotNull(cacheName, key2); + assertNotNull(cacheName, grpKey0); + assertNotNull(cacheName, grpKey1); + assertNotNull(cacheName, grpKey2); + + KeystoreEncryptionKey key0 = (KeystoreEncryptionKey)grpKey0.key(); + KeystoreEncryptionKey key1 = (KeystoreEncryptionKey)grpKey1.key(); + KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)grpKey2.key(); + + assertNotNull(cacheName, key0.key()); + assertNotNull(cacheName, key1.key()); + assertNotNull(cacheName, key2.key()); assertNotNull(cacheName, key0.key()); assertNotNull(cacheName, key1.key()); @@ -121,15 +130,23 @@ public void testCreateEncryptedCacheGroup() throws Exception { assertNotNull(encrypted2); - KeystoreEncryptionKey key = (KeystoreEncryptionKey)g0.context().encryption().groupKey( + GroupKey grpKey = g0.context().encryption().groupKey( CU.cacheGroupId(encrypted.name(), encrypted.configuration().getGroupName())); + assertNotNull(grpKey); + + KeystoreEncryptionKey key = (KeystoreEncryptionKey)grpKey.key(); + assertNotNull(key); assertNotNull(key.key()); - KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)g0.context().encryption().groupKey( + GroupKey grpKey2 = g0.context().encryption().groupKey( CU.cacheGroupId(encrypted2.name(), encrypted2.configuration().getGroupName())); + assertNotNull(grpKey2); + + KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)grpKey2.key(); + assertNotNull(key2); assertNotNull(key2.key()); diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java index 5cdfa581c18178..ec2dc805f6aa7f 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java @@ -31,6 +31,7 @@ import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpi; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpiInternalListener; +import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.A; @@ -41,6 +42,7 @@ import org.apache.ignite.resources.LoggerResource; import org.apache.ignite.spi.IgniteSpiAdapter; import org.apache.ignite.spi.IgniteSpiConfiguration; +import org.apache.ignite.spi.IgniteSpiContext; import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.IgniteSpiMBeanAdapter; import org.apache.ignite.spi.IgniteSpiMultipleInstancesSupport; @@ -62,6 +64,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT; import static org.apache.ignite.IgniteSystemProperties.getBoolean; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; /** * Zookeeper Discovery Spi. @@ -489,6 +492,17 @@ public DiscoverySpiNodeAuthenticator getAuthenticator() { } } + /** {@inheritDoc} */ + @Override protected void onContextInitialized0(IgniteSpiContext spiCtx) throws IgniteSpiException { + super.onContextInitialized0(spiCtx); + + MetricRegistry discoReg = (MetricRegistry)getSpiContext().getOrCreateMetricRegistry(DISCO_METRICS); + + stats.registerMetrics(discoReg); + + discoReg.register("Coordinator", () -> impl.getCoordinator(), UUID.class, "Coordinator ID"); + } + /** {@inheritDoc} */ @Override public void setInternalListener(IgniteDiscoverySpiInternalListener lsnr) { if (impl != null) @@ -595,7 +609,7 @@ public ZookeeperDiscoverySpiMBeanImpl(IgniteSpiAdapter spiAdapter) { /** {@inheritDoc} */ @Override public long getNodesLeft() { - return 0; + return stats.leftNodesCnt(); } /** {@inheritDoc} */ diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallabck.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallback.java similarity index 92% rename from modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallabck.java rename to modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallback.java index b80a9ddbf129ab..427a81c27eac40 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallabck.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallback.java @@ -22,12 +22,12 @@ /** * */ -abstract class ZkAbstractCallabck { +abstract class ZkAbstractCallback { /** */ final ZkRuntimeState rtState; /** */ - private final ZookeeperDiscoveryImpl impl; + final ZookeeperDiscoveryImpl impl; /** */ private final GridSpinBusyLock busyLock; @@ -36,7 +36,7 @@ abstract class ZkAbstractCallabck { * @param rtState Runtime state. * @param impl Discovery impl. */ - ZkAbstractCallabck(ZkRuntimeState rtState, ZookeeperDiscoveryImpl impl) { + ZkAbstractCallback(ZkRuntimeState rtState, ZookeeperDiscoveryImpl impl) { this.rtState = rtState; this.impl = impl; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java index 2292e350560416..dc680f329df45f 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java @@ -24,7 +24,7 @@ /** * */ -abstract class ZkAbstractChildrenCallback extends ZkAbstractCallabck implements AsyncCallback.Children2Callback { +abstract class ZkAbstractChildrenCallback extends ZkAbstractCallback implements AsyncCallback.Children2Callback { /** * @param rtState Runtime state. * @param impl Discovery impl. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java index 9098d0520a52ab..37e65e5b90457e 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java @@ -23,7 +23,7 @@ /** * */ -abstract class ZkAbstractWatcher extends ZkAbstractCallabck implements Watcher { +abstract class ZkAbstractWatcher extends ZkAbstractCallback implements Watcher { /** * @param rtState Runtime state. * @param impl Discovery impl. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java index d667a17f6643cd..2bc49e52525228 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java @@ -30,7 +30,7 @@ abstract class ZkDiscoveryEventData implements Serializable { static final byte ZK_EVT_NODE_JOIN = 1; /** */ - static final byte ZK_EVT_NODE_FAILED = 2; + static final byte ZK_EVT_NODE_LEFT = 2; /** */ static final byte ZK_EVT_CUSTOM_EVT = 3; @@ -59,7 +59,7 @@ abstract class ZkDiscoveryEventData implements Serializable { * @param topVer Topology version. */ ZkDiscoveryEventData(long evtId, byte evtType, long topVer) { - assert evtType == ZK_EVT_NODE_JOIN || evtType == ZK_EVT_NODE_FAILED || evtType == ZK_EVT_CUSTOM_EVT : evtType; + assert evtType == ZK_EVT_NODE_JOIN || evtType == ZK_EVT_NODE_LEFT || evtType == ZK_EVT_CUSTOM_EVT : evtType; this.evtId = evtId; this.evtType = evtType; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeFailEventData.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeLeaveEventData.java similarity index 53% rename from modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeFailEventData.java rename to modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeLeaveEventData.java index c76158ff090a77..77d1157aa4ba80 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeFailEventData.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeLeaveEventData.java @@ -20,36 +20,59 @@ /** * */ -class ZkDiscoveryNodeFailEventData extends ZkDiscoveryEventData { +class ZkDiscoveryNodeLeaveEventData extends ZkDiscoveryEventData { /** */ private static final long serialVersionUID = 0L; /** */ - private long failedNodeInternalId; + private final long leftNodeInternalId; + + /** */ + private final boolean failed; /** * @param evtId Event ID. * @param topVer Topology version. - * @param failedNodeInternalId Failed node ID. + * @param leftNodeInternalId Failed node ID. */ - ZkDiscoveryNodeFailEventData(long evtId, long topVer, long failedNodeInternalId) { - super(evtId, ZK_EVT_NODE_FAILED, topVer); + ZkDiscoveryNodeLeaveEventData(long evtId, long topVer, long leftNodeInternalId) { + this(evtId, topVer, leftNodeInternalId, false); + } - this.failedNodeInternalId = failedNodeInternalId; + /** + * @param evtId Event ID. + * @param topVer Topology version. + * @param leftNodeInternalId Left node ID. + */ + ZkDiscoveryNodeLeaveEventData(long evtId, long topVer, long leftNodeInternalId, boolean failed) { + super(evtId, ZK_EVT_NODE_LEFT, topVer); + + this.leftNodeInternalId = leftNodeInternalId; + + this.failed = failed; + } + + /** + * @return Left node ID. + */ + long leftNodeInternalId() { + return leftNodeInternalId; } /** - * @return Failed node ID. + * + * @return {@code true} if failed. */ - long failedNodeInternalId() { - return failedNodeInternalId; + boolean failed() { + return failed; } /** {@inheritDoc} */ @Override public String toString() { - return "ZkDiscoveryNodeFailEventData [" + + return "ZkDiscoveryNodeLeaveEventData [" + "evtId=" + eventId() + ", topVer=" + topologyVersion() + - ", nodeId=" + failedNodeInternalId + ']'; + ", nodeId=" + leftNodeInternalId + + ", failed=" + failed + ']'; } } diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java index 4e542549adef01..02e9d36a946570 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java @@ -44,6 +44,9 @@ public class ZkIgnitePaths { /** Directory to store acknowledge messages for custom events. */ private static final String CUSTOM_EVTS_ACKS_DIR = "ca"; + /** Directory to store node's stopped flags. */ + private static final String STOPPED_NODES_FLAGS_DIR = "sf"; + /** Directory to store EPHEMERAL znodes for alive cluster nodes. */ static final String ALIVE_NODES_DIR = "n"; @@ -71,6 +74,9 @@ public class ZkIgnitePaths { /** */ final String customEvtsAcksDir; + /** */ + final String stoppedNodesFlagsDir; + /** * @param zkRootPath Base Zookeeper directory for all Ignite nodes. */ @@ -83,6 +89,7 @@ public class ZkIgnitePaths { customEvtsDir = zkPath(CUSTOM_EVTS_DIR); customEvtsPartsDir = zkPath(CUSTOM_EVTS_PARTS_DIR); customEvtsAcksDir = zkPath(CUSTOM_EVTS_ACKS_DIR); + stoppedNodesFlagsDir = zkPath(STOPPED_NODES_FLAGS_DIR); } /** @@ -90,7 +97,7 @@ public class ZkIgnitePaths { * @return Full path. */ private String zkPath(String path) { - return clusterDir + "/" + path; + return join(clusterDir, path); } /** @@ -99,7 +106,7 @@ private String zkPath(String path) { * @return Path. */ String joiningNodeDataPath(UUID nodeId, UUID prefixId) { - return joinDataDir + '/' + prefixId + ":" + nodeId.toString(); + return join(joinDataDir, prefixId + ":" + nodeId.toString()); } /** @@ -109,7 +116,7 @@ String joiningNodeDataPath(UUID nodeId, UUID prefixId) { static long aliveInternalId(String path) { int idx = path.lastIndexOf('|'); - return Integer.parseInt(path.substring(idx + 1)); + return Long.parseLong(path.substring(idx + 1)); } /** @@ -123,7 +130,7 @@ String aliveNodePathForCreate(String prefix, ZookeeperClusterNode node) { if (node.isClient()) flags |= CLIENT_NODE_FLAG_MASK; - return aliveNodesDir + "/" + prefix + ":" + node.id() + ":" + encodeFlags(flags) + "|"; + return join(aliveNodesDir, prefix + ":" + node.id() + ":" + encodeFlags(flags) + "|"); } /** @@ -155,6 +162,26 @@ static UUID aliveNodeId(String path) { return UUID.fromString(idStr); } + /** + * @param node Leaving node. + * @return Stopped node path. + */ + String nodeStoppedFlag(ZookeeperClusterNode node) { + String path = node.id().toString() + '|' + node.internalId(); + + return join(stoppedNodesFlagsDir, path); + } + + /** + * @param path Leaving flag path. + * @return Stopped node internal id. + */ + static long stoppedFlagNodeInternalId(String path) { + int idx = path.lastIndexOf('|'); + + return Long.parseLong(path.substring(idx + 1)); + } + /** * @param path Event zk path. * @return Event sequence number. @@ -212,7 +239,7 @@ static int customEventPartsCount(String path) { * @return Path. */ String createCustomEventPath(String prefix, UUID nodeId, int partCnt) { - return customEvtsDir + "/" + prefix + ":" + nodeId + ":" + String.format("%04d", partCnt) + '|'; + return join(customEvtsDir, prefix + ":" + nodeId + ":" + String.format("%04d", partCnt) + '|'); } /** @@ -221,7 +248,7 @@ String createCustomEventPath(String prefix, UUID nodeId, int partCnt) { * @return Path. */ String customEventPartsBasePath(String prefix, UUID nodeId) { - return customEvtsPartsDir + "/" + prefix + ":" + nodeId + ":"; + return join(customEvtsPartsDir, prefix + ":" + nodeId + ":"); } /** @@ -239,7 +266,7 @@ String customEventPartPath(String prefix, UUID nodeId, int part) { * @return Event zk path. */ String joinEventDataPathForJoined(long evtId) { - return evtsPath + "/fj-" + evtId; + return join(evtsPath,"fj-" + evtId); } /** @@ -247,7 +274,7 @@ String joinEventDataPathForJoined(long evtId) { * @return Event zk path. */ String joinEventSecuritySubjectPath(long topVer) { - return evtsPath + "/s-" + topVer; + return join(evtsPath, "s-" + topVer); } /** @@ -257,7 +284,7 @@ String joinEventSecuritySubjectPath(long topVer) { String ackEventDataPath(long origEvtId) { assert origEvtId != 0; - return customEvtsAcksDir + "/" + String.valueOf(origEvtId); + return join(customEvtsAcksDir, String.valueOf(origEvtId)); } /** @@ -265,7 +292,7 @@ String ackEventDataPath(long origEvtId) { * @return Future path. */ String distributedFutureBasePath(UUID id) { - return evtsPath + "/f-" + id; + return join(evtsPath, "f-" + id); } /** @@ -273,7 +300,7 @@ String distributedFutureBasePath(UUID id) { * @return Future path. */ String distributedFutureResultPath(UUID id) { - return evtsPath + "/fr-" + id; + return join(evtsPath, "fr-" + id); } /** @@ -305,6 +332,14 @@ private static byte aliveFlags(String path) { return (byte)(Integer.parseInt(flagsStr, 16) - 128); } + /** + * @param paths Paths to join. + * @return Paths joined with separator. + */ + public static String join(String... paths) { + return String.join(PATH_SEPARATOR, paths); + } + /** * Validate the provided znode path string. * diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java index 965bdc0f458510..1be63e02c3e0ec 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java @@ -20,7 +20,7 @@ /** * Zk Runnable. */ -public abstract class ZkRunnable extends ZkAbstractCallabck implements Runnable { +public abstract class ZkRunnable extends ZkAbstractCallback implements Runnable { /** * @param rtState Runtime state. * @param impl Discovery impl. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java index 0aac6db91e3366..547c185642d3b0 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java @@ -39,7 +39,7 @@ class ZkRuntimeState { final boolean reconnect; /** */ - ZookeeperClient zkClient; + volatile ZookeeperClient zkClient; /** */ long internalOrder; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java index 7e1bb9af29a9cc..e98bc01199a918 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java @@ -441,6 +441,32 @@ String createIfNeeded(String path, byte[] data, CreateMode createMode) } } + /** + * @param path Path. + * @param data Data. + * @param createMode Create mode. + * @return Created path. + * @throws KeeperException In case of zookeeper error. + * @throws InterruptedException If interrupted. + */ + String createIfNeededNoRetry(String path, byte[] data, CreateMode createMode) + throws KeeperException, InterruptedException { + assert !createMode.isSequential() : createMode; + + if (data == null) + data = EMPTY_BYTES; + + try { + return zk.create(path, data, ZK_ACL, createMode); + } + catch (KeeperException.NodeExistsException e) { + if (log.isDebugEnabled()) + log.debug("Node already exists: " + path); + + return path; + } + } + /** * @param checkPrefix Unique prefix to check in case of retry. * @param parentPath Parent node path. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java index e9196f2663c1b6..279aa1d80ea07f 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java @@ -54,6 +54,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CommunicationFailureResolver; import org.apache.ignite.events.EventType; +import org.apache.ignite.events.NodeValidationFailedEvent; import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException; import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException; @@ -101,6 +102,7 @@ import static org.apache.ignite.events.EventType.EVT_CLIENT_NODE_RECONNECTED; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_JOINED; +import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.events.EventType.EVT_NODE_SEGMENTED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SECURITY_CREDENTIALS; @@ -191,7 +193,7 @@ public class ZookeeperDiscoveryImpl { private IgniteThreadPoolExecutor utilityPool; /** */ - private ZkRuntimeState rtState; + private volatile ZkRuntimeState rtState; /** */ private volatile ConnectionState connState = ConnectionState.STARTED; @@ -296,7 +298,9 @@ public ClusterNode localNode() { @Nullable public ZookeeperClusterNode node(UUID nodeId) { assert nodeId != null; - return rtState.top.nodesById.get(nodeId); + ZkRuntimeState rtState0 = rtState; + + return rtState0 != null ? rtState0.top.nodesById.get(nodeId) : null; } /** @@ -306,7 +310,9 @@ public ClusterNode localNode() { @Nullable public ZookeeperClusterNode node(long nodeOrder) { assert nodeOrder > 0 : nodeOrder; - return rtState.top.nodesByOrder.get(nodeOrder); + ZkRuntimeState rtState0 = rtState; + + return rtState0 != null ? rtState.top.nodesByOrder.get(nodeOrder) : null; } /** @@ -854,7 +860,8 @@ private void initZkNodes() throws InterruptedException { zkPaths.customEvtsDir, zkPaths.customEvtsPartsDir, zkPaths.customEvtsAcksDir, - zkPaths.aliveNodesDir}; + zkPaths.aliveNodesDir, + zkPaths.stoppedNodesFlagsDir}; List dirs = new ArrayList<>(); @@ -1009,7 +1016,7 @@ private void startJoin(ZkRuntimeState rtState, @Nullable ZkRuntimeState prevStat final int OVERHEAD = 5; // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8193 - String joinDataPath = zkPaths.joinDataDir + "/" + prefix + ":" + locNode.id(); + String joinDataPath = ZkIgnitePaths.join(zkPaths.joinDataDir, prefix + ":" + locNode.id()); if (zkClient.needSplitNodeData(joinDataPath, joinDataBytes, OVERHEAD)) { List parts = zkClient.splitNodeData(joinDataPath, joinDataBytes, OVERHEAD); @@ -1379,7 +1386,7 @@ private void checkIsCoordinator(final List aliveNodes) throws Exception PreviousNodeWatcher watcher = new ServerPreviousNodeWatcher(rtState); - rtState.zkClient.existsAsync(zkPaths.aliveNodesDir + "/" + prevE.getValue(), watcher, watcher); + rtState.zkClient.existsAsync(ZkIgnitePaths.join(zkPaths.aliveNodesDir, prevE.getValue()), watcher, watcher); } } @@ -1478,7 +1485,7 @@ private void checkClientsStatus(final List aliveNodes) throws Exception PreviousNodeWatcher watcher = new ClientPreviousNodeWatcher(rtState); - rtState.zkClient.existsAsync(zkPaths.aliveNodesDir + "/" + watchPath, watcher, watcher); + rtState.zkClient.existsAsync(ZkIgnitePaths.join(zkPaths.aliveNodesDir, watchPath), watcher, watcher); } } @@ -1512,6 +1519,16 @@ private void generateNoServersEvent(ZkDiscoveryEventsData evtsData, Stat evtsSta * @throws Exception If failed. */ private void previousCoordinatorCleanup(ZkDiscoveryEventsData lastEvts) throws Exception { + for (String stoppedFlagPath : rtState.zkClient.getChildren(zkPaths.stoppedNodesFlagsDir)) { + long leftIntId = ZkIgnitePaths.stoppedFlagNodeInternalId(stoppedFlagPath); + + if (!rtState.top.nodesByInternalId.containsKey(leftIntId)) { + rtState.zkClient.deleteIfExistsAsync( + ZkIgnitePaths.join(zkPaths.stoppedNodesFlagsDir, stoppedFlagPath) + ); + } + } + for (ZkDiscoveryEventData evtData : lastEvts.evts.values()) { if (evtData instanceof ZkDiscoveryCustomEventData) { ZkDiscoveryCustomEventData evtData0 = (ZkDiscoveryCustomEventData)evtData; @@ -1620,7 +1637,7 @@ private void onBecomeCoordinator(List aliveNodes) throws Exception { private void watchAliveNodeData(String alivePath) { assert rtState.locNodeZkPath != null; - String path = zkPaths.aliveNodesDir + "/" + alivePath; + String path = ZkIgnitePaths.join(zkPaths.aliveNodesDir, alivePath); if (!path.equals(rtState.locNodeZkPath)) rtState.zkClient.getDataAsync(path, rtState.aliveNodeDataWatcher, rtState.aliveNodeDataWatcher); @@ -1642,6 +1659,11 @@ private void generateTopologyEvents(List aliveNodes) throws Exception { rtState.updateAlives = false; } + Set stoppedNodes = new HashSet<>(); + + for (String stoppedFlagPath : rtState.zkClient.getChildren(zkPaths.stoppedNodesFlagsDir)) + stoppedNodes.add(ZkIgnitePaths.stoppedFlagNodeInternalId(stoppedFlagPath)); + TreeMap alives = new TreeMap<>(); for (String child : aliveNodes) { @@ -1670,7 +1692,7 @@ private void generateTopologyEvents(List aliveNodes) throws Exception { failedNodes.add(failedNode); - generateNodeFail(curTop, failedNode); + generateNodeLeave(curTop, failedNode, !stoppedNodes.contains(failedNode.internalId())); newEvts++; @@ -2031,15 +2053,11 @@ private void processJoinError(String aliveNodePath, String joinDataPath = zkPaths.joiningNodeDataPath(nodeId, prefixId); client.setData(joinDataPath, marshalZip(joinErr), -1); - - client.deleteIfExists(zkPaths.aliveNodesDir + "/" + aliveNodePath, -1); } - else { - if (log.isInfoEnabled()) + else if (log.isInfoEnabled()) log.info("Ignore join data, node was failed by previous coordinator: " + aliveNodePath); - client.deleteIfExists(zkPaths.aliveNodesDir + "/" + aliveNodePath, -1); - } + client.deleteIfExists(ZkIgnitePaths.join(zkPaths.aliveNodesDir, aliveNodePath), -1); } /** @@ -2075,6 +2093,8 @@ private ZkNodeValidateResult validateJoiningNode(ZkJoiningNodeData joiningNodeDa } if (err != null) { + spi.getSpiContext().recordEvent(new NodeValidationFailedEvent(locNode, node, err)); + LT.warn(log, err.message()); res.err = err.sendMessage(); @@ -2180,25 +2200,35 @@ else if (log.isDebugEnabled()) { /** * @param curTop Current topology. - * @param failedNode Failed node. + * @param leftNode Failed node. + * @param failed Whether node failed or stopped. */ - private void generateNodeFail(TreeMap curTop, ZookeeperClusterNode failedNode) { - Object rmvd = curTop.remove(failedNode.order()); + private void generateNodeLeave( + TreeMap curTop, + ZookeeperClusterNode leftNode, + boolean failed + ) { + Object rmvd = curTop.remove(leftNode.order()); assert rmvd != null; rtState.evtsData.topVer++; rtState.evtsData.evtIdGen++; - ZkDiscoveryNodeFailEventData evtData = new ZkDiscoveryNodeFailEventData( + ZkDiscoveryNodeLeaveEventData evtData = new ZkDiscoveryNodeLeaveEventData( rtState.evtsData.evtIdGen, rtState.evtsData.topVer, - failedNode.internalId()); + leftNode.internalId(), + failed + ); rtState.evtsData.addEvent(curTop.values(), evtData); - if (log.isInfoEnabled()) - log.info("Generated NODE_FAILED event [evt=" + evtData + ']'); + if (log.isInfoEnabled()) { + String evtName = failed ? "NODE_FAILED" : "NODE_LEFT"; + + log.info("Generated " + evtName + " event [evt=" + evtData + ']'); + } } /** @@ -2275,8 +2305,6 @@ private void addJoinedNode( joinCtx.addJoinedNode(nodeEvtData, commonData); rtState.evtsData.onNodeJoin(joinedNode); - - stats.onNodeJoined(); } /** @@ -2389,12 +2417,14 @@ private void cleanupPreviousClusterData(long startInternalOrder) throws Exceptio batch.addAll(client.getChildrenPaths(zkPaths.customEvtsAcksDir)); + batch.addAll(client.getChildrenPaths(zkPaths.stoppedNodesFlagsDir)); + client.deleteAll(batch, -1); if (startInternalOrder > 0) { for (String alive : client.getChildren(zkPaths.aliveNodesDir)) { if (ZkIgnitePaths.aliveInternalId(alive) < startInternalOrder) - client.deleteIfExists(zkPaths.aliveNodesDir + "/" + alive, -1); + client.deleteIfExists(ZkIgnitePaths.join(zkPaths.aliveNodesDir, alive), -1); } } @@ -2423,7 +2453,7 @@ private byte[] readCustomEventData(ZookeeperClient zkClient, String evtPath, UUI return readMultipleParts(zkClient, partsBasePath, partCnt); } else - return zkClient.getData(zkPaths.customEvtsDir + "/" + evtPath); + return zkClient.getData(ZkIgnitePaths.join(zkPaths.customEvtsDir, evtPath)); } /** @@ -2594,7 +2624,7 @@ private void deleteAliveNode(long internalId) throws Exception { for (String child : rtState.zkClient.getChildren(zkPaths.aliveNodesDir)) { if (ZkIgnitePaths.aliveInternalId(child) == internalId) { // Need use sync delete to do not process again join of this node again. - rtState.zkClient.deleteIfExists(zkPaths.aliveNodesDir + "/" + child, -1); + rtState.zkClient.deleteIfExists(ZkIgnitePaths.join(zkPaths.aliveNodesDir, child), -1); return; } @@ -2623,7 +2653,7 @@ private void deleteCustomEventDataAsync(ZookeeperClient zkClient, String evtPath } } - zkClient.deleteIfExistsAsync(zkPaths.customEvtsDir + "/" + evtPath); + zkClient.deleteIfExistsAsync(ZkIgnitePaths.join(zkPaths.customEvtsDir, evtPath)); } /** @@ -2690,13 +2720,13 @@ private void processNewEvents(final ZkDiscoveryEventsData evtsData) throws Excep break; } - case ZkDiscoveryEventData.ZK_EVT_NODE_FAILED: { + case ZkDiscoveryEventData.ZK_EVT_NODE_LEFT: { if (!rtState.joined) break; evtProcessed = true; - notifyNodeFail((ZkDiscoveryNodeFailEventData)evtData); + notifyNodeLeave((ZkDiscoveryNodeLeaveEventData)evtData); break; } @@ -2792,9 +2822,12 @@ private void processNewEvents(final ZkDiscoveryEventsData evtsData) throws Excep throw e; } - if (rtState.joined) + if (rtState.joined) { rtState.evtsData = evtsData; + stats.onTopologyChanged(rtState.evtsData.topVer); + } + if (rtState.crd) handleProcessedEvents("procEvt"); else @@ -3204,7 +3237,7 @@ private void deleteAliveNodes(@Nullable GridLongList internalIds) throws Excepti String alive = alives.get(i); if (internalIds.contains(ZkIgnitePaths.aliveInternalId(alive))) - rtState.zkClient.deleteIfExistsAsync(zkPaths.aliveNodesDir + "/" + alive); + rtState.zkClient.deleteIfExistsAsync(ZkIgnitePaths.join(zkPaths.aliveNodesDir, alive)); } } @@ -3527,13 +3560,15 @@ private void notifyNodeJoin(ZkJoinedNodeEvtData joinedEvtData, ZkJoiningNodeData null ) ).get(); + + stats.onNodeJoined(); } /** * @param evtData Event data. */ - private void notifyNodeFail(final ZkDiscoveryNodeFailEventData evtData) { - notifyNodeFail(evtData.failedNodeInternalId(), evtData.topologyVersion()); + private void notifyNodeLeave(final ZkDiscoveryNodeLeaveEventData evtData) { + notifyNodeLeave(evtData.leftNodeInternalId(), evtData.topologyVersion(), evtData.failed()); } /** @@ -3541,11 +3576,23 @@ private void notifyNodeFail(final ZkDiscoveryNodeFailEventData evtData) { * @param topVer Topology version. */ private void notifyNodeFail(long nodeInternalOrder, long topVer) { - final ZookeeperClusterNode failedNode = rtState.top.removeNode(nodeInternalOrder); + notifyNodeLeave(nodeInternalOrder, topVer, true); + } + + /** + * @param nodeInternalOrder Node order. + * @param topVer Topology version. + * @param failed {@code true} if node failed, {@code false} otherwise. + */ + private void notifyNodeLeave(long nodeInternalOrder, long topVer, boolean failed) { + final ZookeeperClusterNode leftNode = rtState.top.removeNode(nodeInternalOrder); + + assert leftNode != null && !leftNode.isLocal() : leftNode; - assert failedNode != null && !failedNode.isLocal() : failedNode; + if (!failed && rtState.crd) + rtState.zkClient.deleteIfExistsAsync(zkPaths.nodeStoppedFlag(leftNode)); - PingFuture pingFut = pingFuts.get(failedNode.order()); + PingFuture pingFut = pingFuts.get(leftNode.order()); if (pingFut != null) pingFut.onDone(false); @@ -3554,9 +3601,9 @@ private void notifyNodeFail(long nodeInternalOrder, long topVer) { lsnr.onDiscovery( new DiscoveryNotification( - EVT_NODE_FAILED, + failed ? EVT_NODE_FAILED : EVT_NODE_LEFT, topVer, - failedNode, + leftNode, topSnapshot, Collections.emptyMap(), null, @@ -3564,7 +3611,10 @@ private void notifyNodeFail(long nodeInternalOrder, long topVer) { ) ).get(); - stats.onNodeFailed(); + if (failed) + stats.onNodeFailed(); + else + stats.onNodeLeft(); } /** @@ -3680,11 +3730,11 @@ private void handleProcessedEvents(String ctx) throws Exception { break; } - case ZkDiscoveryEventData.ZK_EVT_NODE_FAILED: { + case ZkDiscoveryEventData.ZK_EVT_NODE_LEFT: { if (log.isDebugEnabled()) - log.debug("All nodes processed node fail [evtData=" + evtData + ']'); + log.debug("All nodes processed node left [evtData=" + evtData + ']'); - break; // Do not need addition cleanup. + break; } } @@ -3899,7 +3949,7 @@ void runInWorkerThread(Runnable c) { * */ public void stop() { - stop0(new IgniteSpiException("Node stopped")); + stop0(null); } /** @@ -3913,6 +3963,14 @@ private void stop0(Throwable e) { if (rtState.zkClient != null && rtState.locNodeZkPath != null && rtState.zkClient.connected()) { try { + if (e == null && rtState.joined) { + rtState.zkClient.createIfNeededNoRetry( + zkPaths.nodeStoppedFlag(locNode), + null, + PERSISTENT + ); + } + rtState.zkClient.deleteIfExistsNoRetry(rtState.locNodeZkPath, -1); } catch (Exception err) { @@ -4582,7 +4640,12 @@ enum ConnectionState { /** */ public UUID getCoordinator() { - return rtState.top.nodesByOrder.values().stream() + ZkRuntimeState rtState0 = rtState; + + if (rtState0 == null) + return null; + + return rtState0.top.nodesByOrder.values().stream() .filter(n -> !n.isClient() && !n.isDaemon()) .map(ZookeeperClusterNode::id) .findFirst() @@ -4591,12 +4654,14 @@ public UUID getCoordinator() { /** */ public String getSpiState() { - return rtState.zkClient.state(); + return connState.toString(); } /** */ public String getZkSessionId() { - if (rtState.zkClient != null && rtState.zkClient.zk() != null) + ZkRuntimeState rtState0 = rtState; + + if (rtState0 != null && rtState0.zkClient != null) return Long.toHexString(rtState.zkClient.zk().getSessionId()); else return null; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java index cc95dd3fe9f0ee..faf00d60434da5 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java @@ -16,6 +16,9 @@ */ package org.apache.ignite.spi.discovery.zk.internal; +import org.apache.ignite.internal.processors.metric.MetricRegistry; +import org.apache.ignite.internal.processors.metric.impl.AtomicLongMetric; +import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.internal.S; /** @@ -23,42 +26,74 @@ */ public class ZookeeperDiscoveryStatistics { /** */ - private long joinedNodesCnt; + private final LongAdderMetric joinedNodesCnt = new LongAdderMetric("JoinedNodes", "Joined nodes count"); /** */ - private long failedNodesCnt; + private final LongAdderMetric failedNodesCnt = new LongAdderMetric("FailedNodes", "Failed nodes count"); + + /** */ + private final LongAdderMetric leftNodesCnt = new LongAdderMetric("LeftNodes", "Left nodes count"); /** Communication error count. */ - private long commErrCnt; + private final LongAdderMetric commErrCnt = new LongAdderMetric("CommunicationErrors", "Communication errors count"); + + /** Current topology version */ + private final AtomicLongMetric topVer = new AtomicLongMetric("CurrentTopologyVersion", "Current topology version"); + + /** + * @param discoReg Discovery metric registry. + */ + public void registerMetrics(MetricRegistry discoReg) { + discoReg.register(joinedNodesCnt); + discoReg.register(failedNodesCnt); + discoReg.register(leftNodesCnt); + discoReg.register(commErrCnt); + discoReg.register(topVer); + } /** */ public long joinedNodesCnt() { - return joinedNodesCnt; + return joinedNodesCnt.value(); } /** */ public long failedNodesCnt() { - return failedNodesCnt; + return failedNodesCnt.value(); + } + + /** */ + public long leftNodesCnt() { + return leftNodesCnt.value(); } /** */ public long commErrorCount() { - return commErrCnt; + return commErrCnt.value(); } /** */ public void onNodeJoined() { - joinedNodesCnt++; + joinedNodesCnt.increment(); } /** */ public void onNodeFailed() { - failedNodesCnt++; + failedNodesCnt.increment(); + } + + /** */ + public void onNodeLeft() { + leftNodesCnt.increment(); } /** */ public void onCommunicationError() { - commErrCnt++; + commErrCnt.increment(); + } + + /** */ + public void onTopologyChanged(long topVer) { + this.topVer.value(topVer); } /** {@inheritDoc} */ diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java index d5be8817925567..03d6a43904a925 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java @@ -24,6 +24,7 @@ import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryConcurrentStartAndStartStopTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryCustomEventsTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryMiscTest; +import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryRandomStopOrFailConcurrentTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoverySegmentationAndConnectionRestoreTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoverySpiSaslFailedAuthTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoverySpiSaslSuccessfulAuthTest; @@ -43,6 +44,7 @@ ZookeeperValidatePathsTest.class, ZookeeperDiscoverySegmentationAndConnectionRestoreTest.class, ZookeeperDiscoveryConcurrentStartAndStartStopTest.class, + ZookeeperDiscoveryRandomStopOrFailConcurrentTest.class, ZookeeperDiscoveryTopologyChangeAndReconnectTest.class, ZookeeperDiscoveryCommunicationFailureTest.class, ZookeeperDiscoveryClientDisconnectTest.class, diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java index a9146cc3d38f3f..ac73c29885d273 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java @@ -18,6 +18,7 @@ package org.apache.ignite.spi.discovery.zk; import org.apache.ignite.internal.ClusterNodeMetricsUpdateTest; +import org.apache.ignite.internal.IgniteNodeValidationFailedEventTest; import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCachePutRetryAtomicSelfTest; import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCachePutRetryTransactionalSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicMultiNodeFullApiSelfTest; @@ -42,7 +43,8 @@ GridCacheReplicatedAtomicMultiNodeFullApiSelfTest.class, IgniteCacheReplicatedQuerySelfTest.class, DistributedMetaStorageTest.class, - DistributedMetaStoragePersistentTest.class + DistributedMetaStoragePersistentTest.class, + IgniteNodeValidationFailedEventTest.class }) public class ZookeeperDiscoverySpiTestSuite4 { /** */ diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java index 1572af532bd256..cea59751eacecc 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java @@ -180,7 +180,9 @@ private void concurrentStartStop(final int initNodes) throws Exception { }, NODES, "stop-node"); for (int j = 0; j < NODES; j++) - expEvts[j] = ZookeeperDiscoverySpiTestHelper.failEvent(++topVer); + expEvts[j] = ZookeeperDiscoverySpiTestHelper.leftEvent(++topVer, false); + + helper.checkEvents(ignite(0), evts, expEvts); checkEventsConsistency(); } @@ -199,6 +201,8 @@ public void testClusterRestart() throws Exception { startGridsMultiThreaded(3, false); + checkZkNodesCleanup(); + waitForTopology(3); } diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java index c644a4b1ff5a9b..57aae67fe445af 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java @@ -33,6 +33,7 @@ import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.SecurityCredentialsAttrFilterPredicate; +import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.security.SecurityContext; import org.apache.ignite.internal.util.lang.gridfunc.PredicateMapView; import org.apache.ignite.internal.util.typedef.G; @@ -47,11 +48,14 @@ import org.apache.ignite.spi.discovery.DiscoverySpiNodeAuthenticator; import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi; import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpiMBean; +import org.apache.ignite.spi.metric.LongMetric; +import org.apache.ignite.spi.metric.ObjectMetric; import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SECURITY_SUBJECT_V2; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; /** * Tests for Zookeeper SPI discovery. @@ -217,23 +221,44 @@ public void testDefaultConsistentId() throws Exception { */ @Test public void testMbean() throws Exception { - startGrids(3); + int cnt = 3; + + startGrids(cnt); UUID crdNodeId = grid(0).localNode().id(); try { - for (int i = 0; i < 3; i++) { + for (int i = 0; i < cnt; i++) { IgniteEx grid = grid(i); ZookeeperDiscoverySpiMBean bean = getMxBean(grid.context().igniteInstanceName(), "SPIs", ZookeeperDiscoverySpi.class, ZookeeperDiscoverySpiMBean.class); + MetricRegistry discoReg = grid.context().metric().registry(DISCO_METRICS); + assertNotNull(bean); assertEquals(String.valueOf(grid.cluster().node(crdNodeId)), bean.getCoordinatorNodeFormatted()); assertEquals(String.valueOf(grid.cluster().localNode()), bean.getLocalNodeFormatted()); assertEquals(zkCluster.getConnectString(), bean.getZkConnectionString()); assertEquals((long)grid.configuration().getFailureDetectionTimeout(), bean.getZkSessionTimeout()); + + assertEquals(grid.cluster().topologyVersion(), + discoReg.findMetric("CurrentTopologyVersion").value()); + + assertEquals(grid.cluster().node(crdNodeId).id(), + discoReg.>findMetric("Coordinator").value()); + + assertEquals(cnt - i - 1, bean.getNodesJoined()); + assertEquals(cnt - i - 1, discoReg.findMetric("JoinedNodes").value()); + + Arrays.asList("LeftNodes", "FailedNodes", "CommunicationErrors").forEach(name -> { + assertEquals(0, discoReg.findMetric(name).value()); + }); + + assertEquals(0, bean.getNodesLeft()); + assertEquals(0, bean.getNodesFailed()); + assertEquals(0, bean.getCommErrorProcNum()); } } finally { @@ -255,6 +280,8 @@ public void testMbeanGetCoordinator() throws Exception { stopGrid(0); + waitForTopology(2); + assertEquals(mbean.getCoordinator(), srv2.localNode().id()); assertEquals(mbean.getCoordinatorNodeFormatted(), String.valueOf(srv2.localNode())); } diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryRandomStopOrFailConcurrentTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryRandomStopOrFailConcurrentTest.java new file mode 100644 index 00000000000000..0f9935b2ec6a8a --- /dev/null +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryRandomStopOrFailConcurrentTest.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.discovery.zk.internal; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.ignite.Ignite; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.spi.discovery.DiscoverySpiMBean; +import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi; +import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpiMBean; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.zookeeper.ZkTestClientCnxnSocketNIO; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * + */ +@RunWith(Parameterized.class) +public class ZookeeperDiscoveryRandomStopOrFailConcurrentTest extends ZookeeperDiscoverySpiTestBase { + /** */ + private static final int NUM_CLIENTS = 10; + + /** */ + private static final int NUM_SERVERS = 10; + + /** */ + private static final int ZK_SESSION_TIMEOUT = 5_000; + + /** */ + @Parameterized.Parameters(name = "stop mode = {0}, with crd = {1}") + public static Collection parameters() { + List params = new ArrayList<>(); + + for (StopMode stopMode: StopMode.values()) { + params.add(new Object[] {stopMode, true}); + params.add(new Object[] {stopMode, false}); + } + + return params; + } + + /** */ + @Parameterized.Parameter(0) + public StopMode stopMode; + + /** */ + @Parameterized.Parameter(1) + public boolean killCrd; + + /** */ + private final AtomicLong nodesLeft = new AtomicLong(0); + + /** */ + private final AtomicLong nodesFailed = new AtomicLong(0); + + /** */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setClusterStateOnStart(ClusterState.INACTIVE); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + sesTimeout = ZK_SESSION_TIMEOUT; + + testSockNio = true; + + clientReconnectDisabled = true; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + for (Ignite g: G.allGrids()) { + ZkTestClientCnxnSocketNIO cnxn = ZkTestClientCnxnSocketNIO.forNode(g); + + if (cnxn != null) + cnxn.allowConnect(); + } + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected void waitForTopology(int expSize) throws Exception { + assertTrue(GridTestUtils.waitForCondition(() -> grid(0).cluster().nodes().size() == expSize, 30_000)); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testStopOrFailConcurrently() throws Exception { + IgniteEx client = startServersAndClients(NUM_SERVERS, NUM_CLIENTS); + + int crd = getCoordinatorIndex(); + + List srvToStop = IntStream.range(1, NUM_SERVERS + 1) + .filter(j -> j != crd) + .boxed() + .collect(Collectors.collectingAndThen(Collectors.toList(), list -> { + Collections.shuffle(list); + + return list.subList(0, NUM_SERVERS / 2); + })); + + if (killCrd) + srvToStop.set(0, crd); + + List cliToStop = IntStream.range(NUM_SERVERS + 1, NUM_CLIENTS + NUM_SERVERS) + .boxed() + .collect(Collectors.collectingAndThen(Collectors.toList(), list -> { + Collections.shuffle(list); + + return list.subList(0, NUM_CLIENTS / 2); + })); + + srvToStop.addAll(cliToStop); + + stopOrKillMultithreaded(srvToStop); + + waitForTopology(NUM_CLIENTS + NUM_SERVERS - srvToStop.size()); + + checkStopFlagsDeleted(10_000); + + DiscoverySpiMBean mBean = getMbean(client); + + GridTestUtils.waitForCondition(() -> nodesLeft.get() == mBean.getNodesLeft(), 10_000); + GridTestUtils.waitForCondition(() -> nodesFailed.get() == mBean.getNodesFailed(), 10_000); + } + + /** */ + private void checkStopFlagsDeleted(long timeout) throws Exception { + ZookeeperClient zkClient = new ZookeeperClient(getTestResources().getLogger(), + zkCluster.getConnectString(), + 30_000, + null); + + ZkIgnitePaths paths = new ZkIgnitePaths(ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); + + GridTestUtils.waitForCondition(() -> { + try { + return zkClient.getChildren(paths.stoppedNodesFlagsDir).isEmpty(); + } + catch (Exception e) { + if (e instanceof InterruptedException) + Thread.currentThread().interrupt(); + + throw new RuntimeException("Failed to wait for stopped nodes flags", e); + } + }, timeout); + } + + /** */ + private void stopOrKillMultithreaded(final List stopIndices) throws Exception { + log.info("Stopping or killing nodes by idx: " + stopIndices.toString()); + + final StopMode mode = stopMode; + + GridTestUtils.runMultiThreaded((idx) -> { + try { + Random rnd = ThreadLocalRandom.current(); + + int nodeIdx = stopIndices.get(idx); + + if (mode == StopMode.FAIL_ONLY || (mode == StopMode.RANDOM && rnd.nextBoolean())) { + ZkTestClientCnxnSocketNIO c0 = ZkTestClientCnxnSocketNIO.forNode(grid(nodeIdx)); + + c0.closeSocket(true); + + nodesFailed.incrementAndGet(); + } + else { + stopGrid(nodeIdx); + + nodesLeft.incrementAndGet(); + } + } + catch (Exception e) { + e.printStackTrace(); + + fail(e.getMessage()); + } + }, stopIndices.size(), "stop-node"); + } + + /** */ + private int getCoordinatorIndex() { + UUID crdId = getMbean(grid(0)).getCoordinator(); + + Optional crdIdx = grid(0).cluster().nodes().stream().filter(n -> n.id().equals(crdId)) + .map(n -> getTestIgniteInstanceIndex((String)n.consistentId())).findAny(); + + assertTrue(crdIdx.isPresent()); + + return crdIdx.get(); + } + + /** */ + private DiscoverySpiMBean getMbean(IgniteEx grid) { + ZookeeperDiscoverySpiMBean bean = getMxBean(grid.context().igniteInstanceName(), "SPIs", + ZookeeperDiscoverySpi.class, ZookeeperDiscoverySpiMBean.class); + + assertNotNull(bean); + + return bean; + } + + /** */ + private IgniteEx startServersAndClients(int numServers, int numClients) throws Exception { + startGridsMultiThreaded(1, numServers); + startClientGridsMultiThreaded(numServers + 1, numClients - 1); + + IgniteEx res = startClientGrid(0); + + waitForTopology(numClients + numServers); + + // Set initial value of counters from MBean. + nodesLeft.addAndGet(getMbean(res).getNodesLeft()); + nodesFailed.addAndGet(getMbean(res).getNodesFailed()); + + return res; + } + + enum StopMode { + STOP_ONLY, + FAIL_ONLY, + RANDOM + } +} diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java index 49e39a881909b4..d33932b53121d4 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java @@ -391,7 +391,7 @@ private void connectionRestore_NonCoordinator(boolean failWhenDisconnected) thro closeZkClient(spi); - helper.checkEvents(node0, evts, ZookeeperDiscoverySpiTestHelper.failEvent(4)); + helper.checkEvents(node0, evts, ZookeeperDiscoverySpiTestHelper.leftEvent(4, true)); } c1.allowConnect(); @@ -399,7 +399,7 @@ private void connectionRestore_NonCoordinator(boolean failWhenDisconnected) thro helper.checkEvents(ignite(1), evts, ZookeeperDiscoverySpiTestHelper.joinEvent(3)); if (failWhenDisconnected) { - helper.checkEvents(ignite(1), evts, ZookeeperDiscoverySpiTestHelper.failEvent(4)); + helper.checkEvents(ignite(1), evts, ZookeeperDiscoverySpiTestHelper.leftEvent(4, true)); IgnitionEx.stop(getTestIgniteInstanceName(2), true, ShutdownPolicy.IMMEDIATE, true); } diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java index d23aa97dba9c8e..bed11bd696835a 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java @@ -80,7 +80,10 @@ import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpiTestUtil; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZKUtil; import org.apache.zookeeper.ZkTestClientCnxnSocketNIO; +import org.apache.zookeeper.ZooKeeper; import org.jetbrains.annotations.Nullable; import static java.util.concurrent.TimeUnit.SECONDS; @@ -566,6 +569,93 @@ void stopZkCluster() { } } + /** + * @throws Exception If failed. + */ + protected void checkZkNodesCleanup() throws Exception { + final ZookeeperClient zkClient = new ZookeeperClient(getTestResources().getLogger(), + zkCluster.getConnectString(), + 30_000, + null); + + final String basePath = ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT + "/"; + + final String aliveDir = basePath + ZkIgnitePaths.ALIVE_NODES_DIR + "/"; + + try { + List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); + + boolean foundAlive = false; + + for (String znode : znodes) { + if (znode.startsWith(aliveDir)) { + foundAlive = true; + + break; + } + } + + assertTrue(foundAlive); // Sanity check to make sure we check correct directory. + + assertTrue("Failed to wait for unused znodes cleanup", GridTestUtils.waitForCondition(new GridAbsPredicate() { + @Override public boolean apply() { + try { + List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); + + for (String znode : znodes) { + if (znode.startsWith(aliveDir) || znode.length() < basePath.length()) + continue; + + znode = znode.substring(basePath.length()); + + if (!znode.contains("/")) // Ignore roots. + continue; + + // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8193 + if (znode.startsWith("jd/")) + continue; + + log.info("Found unexpected znode: " + znode); + + return false; + } + + return true; + } + catch (Exception e) { + error("Unexpected error: " + e, e); + + fail("Unexpected error: " + e); + } + + return false; + } + }, 10_000)); + } + finally { + zkClient.close(); + } + } + + /** + * @param zk ZooKeeper client. + * @param root Root path. + * @return All children znodes for given path. + * @throws Exception If failed/ + */ + private List listSubTree(ZooKeeper zk, String root) throws Exception { + for (int i = 0; i < 30; i++) { + try { + return ZKUtil.listSubTreeBFS(zk, root); + } + catch (KeeperException.NoNodeException e) { + info("NoNodeException when get znodes, will retry: " + e); + } + } + + throw new Exception("Failed to get znodes: " + root); + } + /** */ private CacheConfiguration getCacheConfiguration() { CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME); diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java index 32e3855b0ca0a7..be5f2e6bfdda05 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java @@ -135,8 +135,10 @@ static DiscoveryEvent joinEvent(long topVer) { * @param topVer Topology version. * @return Expected event instance. */ - static DiscoveryEvent failEvent(long topVer) { - DiscoveryEvent expEvt = new DiscoveryEvent(null, null, EventType.EVT_NODE_FAILED, null); + static DiscoveryEvent leftEvent(long topVer, boolean fail) { + int eventType = fail ? EventType.EVT_NODE_FAILED : EventType.EVT_NODE_LEFT; + + DiscoveryEvent expEvt = new DiscoveryEvent(null, null, eventType, null); expEvt.topologySnapshot(topVer, null); diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java index ba17a2fdca42be..f38baa7fc0355d 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java @@ -41,7 +41,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheAbstractFullApiSelfTest; import org.apache.ignite.internal.processors.query.DummyQueryIndexing; import org.apache.ignite.internal.processors.query.GridQueryProcessor; -import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteInClosure; @@ -49,8 +48,6 @@ import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi; import org.apache.ignite.testframework.GridTestUtils; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.ZKUtil; import org.apache.zookeeper.ZkTestClientCnxnSocketNIO; import org.apache.zookeeper.ZooKeeper; import org.junit.Ignore; @@ -237,74 +234,6 @@ public void testRandomTopologyChanges() throws Exception { randomTopologyChanges(false, false); } - /** - * @throws Exception If failed. - */ - private void checkZkNodesCleanup() throws Exception { - final ZookeeperClient zkClient = new ZookeeperClient(getTestResources().getLogger(), - zkCluster.getConnectString(), - 30_000, - null); - - final String basePath = ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT + "/"; - - final String aliveDir = basePath + ZkIgnitePaths.ALIVE_NODES_DIR + "/"; - - try { - List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); - - boolean foundAlive = false; - - for (String znode : znodes) { - if (znode.startsWith(aliveDir)) { - foundAlive = true; - - break; - } - } - - assertTrue(foundAlive); // Sanity check to make sure we check correct directory. - - assertTrue("Failed to wait for unused znodes cleanup", GridTestUtils.waitForCondition(new GridAbsPredicate() { - @Override public boolean apply() { - try { - List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); - - for (String znode : znodes) { - if (znode.startsWith(aliveDir) || znode.length() < basePath.length()) - continue; - - znode = znode.substring(basePath.length()); - - if (!znode.contains("/")) // Ignore roots. - continue; - - // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8193 - if (znode.startsWith("jd/")) - continue; - - log.info("Found unexpected znode: " + znode); - - return false; - } - - return true; - } - catch (Exception e) { - error("Unexpected error: " + e, e); - - fail("Unexpected error: " + e); - } - - return false; - } - }, 10_000)); - } - finally { - zkClient.close(); - } - } - /** * @throws Exception If failed. */ @@ -766,25 +695,6 @@ else if (evt.type() == EVT_CLIENT_NODE_RECONNECTED) { client.events().stopLocalListen(p); } - /** - * @param zk ZooKeeper client. - * @param root Root path. - * @return All children znodes for given path. - * @throws Exception If failed/ - */ - private List listSubTree(ZooKeeper zk, String root) throws Exception { - for (int i = 0; i < 30; i++) { - try { - return ZKUtil.listSubTreeBFS(zk, root); - } - catch (KeeperException.NoNodeException e) { - info("NoNodeException when get znodes, will retry: " + e); - } - } - - throw new Exception("Failed to get znodes: " + root); - } - /** * @param cacheName Cache name. * @return Configuration. diff --git a/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java b/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java index 2b741a19260143..47fe0acb31fca4 100644 --- a/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java +++ b/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java @@ -114,7 +114,8 @@ public ZkTestClientCnxnSocketNIO(ZKClientConfig clientCfg) throws IOException { * */ public void allowConnect() { - assert blockConnectLatch != null && blockConnectLatch.getCount() == 1 : blockConnectLatch; + if (blockConnectLatch == null || blockConnectLatch.getCount() == 0) + return; log.info("ZkTestClientCnxnSocketNIO allowConnect [node=" + nodeName + ']'); diff --git a/packaging/deb/control b/packaging/deb/control index 40467c0f5a744b..0c258f8ab9f7a4 100644 --- a/packaging/deb/control +++ b/packaging/deb/control @@ -8,7 +8,7 @@ Package: apache-ignite Architecture: all Section: misc Priority: optional -Depends: openjdk-8-jdk | oracle-java8-installer, systemd, passwd +Depends: openjdk-8-jdk | openjdk-11-jdk | default-jdk | java-sdk, systemd, passwd Description: Apache Ignite In-Memory Computing, Database and Caching Platform Ignite™ is a memory-centric distributed database, caching, and processing platform for transactional, analytical, and streaming workloads, delivering diff --git a/parent/pom.xml b/parent/pom.xml index 4291becd7901f3..ab52dbb6bd1992 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -107,7 +107,7 @@ 1.5.0 3.5.0 3.1.1 - 8.35 + 8.37 3.4.6 8.0.13 1.1.2 @@ -226,6 +226,35 @@ + + org.apache.maven.plugins + maven-checkstyle-plugin + ${maven.checkstyle.plugin.version} + + true + + ${project.build.sourceDirectory} + ${project.build.testSourceDirectory} + + true + true + true + true + ${project.build.directory}/checkstyle-result.xml + ../checkstyle/checkstyle.xml + ../checkstyle/checkstyle-suppressions.xml + true + **/generated/**/* + + + + com.puppycrawl.tools + checkstyle + ${checkstyle.puppycrawl.version} + + + + net.alchim31.maven scala-maven-plugin @@ -736,6 +765,20 @@ + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + style + + check + + validate + + + @@ -747,34 +790,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - ${maven.checkstyle.plugin.version} - - - style - - check - - validate - - true - true - true - true - ${project.build.directory}/checkstyle-result.xml - ../checkstyle/checkstyle.xml - ../checkstyle/checkstyle-suppressions.xml - true - **/generated/**/* - - - - - - com.puppycrawl.tools - checkstyle - ${checkstyle.puppycrawl.version} - - + + false +