From 652b3fc54800532a8ff44674fde907f4f113935b Mon Sep 17 00:00:00 2001 From: Andrey Gura Date: Thu, 30 Jan 2020 16:42:55 +0300 Subject: [PATCH 001/110] IGNITE-13619 Thread dumps on failure processor invocation should be enabled by default --- .../processors/failure/FailureProcessor.java | 33 ++++++++++++++----- ...lureProcessorThreadDumpThrottlingTest.java | 2 +- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java index 9dfdd1c1a80f3..2eab2c552f0ac 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/failure/FailureProcessor.java @@ -48,10 +48,11 @@ public class FailureProcessor extends GridProcessorAdapter { public static final int DFLT_FAILURE_HANDLER_RESERVE_BUFFER_SIZE = 64 * 1024; /** Value of the system property that enables threads dumping on failure. */ - private final boolean igniteDumpThreadsOnFailure = IgniteSystemProperties.getBoolean(IGNITE_DUMP_THREADS_ON_FAILURE); + private final boolean igniteDumpThreadsOnFailure = + IgniteSystemProperties.getBoolean(IGNITE_DUMP_THREADS_ON_FAILURE, true); /** Timeout for throttling of thread dumps generation. */ - long dumpThreadsTrottlingTimeout; + private long dumpThreadsTrottlingTimeout; /** Ignored failure log message. */ static final String IGNORED_FAILURE_LOG_MSG = "Possible failure suppressed accordingly to a configured handler "; @@ -61,7 +62,7 @@ public class FailureProcessor extends GridProcessorAdapter { "Will be handled accordingly to configured handler "; /** Thread dump per failure type timestamps. */ - private Map threadDumpPerFailureTypeTime; + private final Map threadDumpPerFailureTypeTs; /** Ignite. */ private final Ignite ignite; @@ -83,6 +84,8 @@ public FailureProcessor(GridKernalContext ctx) { ignite = ctx.grid(); + Map threadDumpPerFailureTypeTs = null; + if (igniteDumpThreadsOnFailure) { dumpThreadsTrottlingTimeout = IgniteSystemProperties.getLong( @@ -91,12 +94,14 @@ public FailureProcessor(GridKernalContext ctx) { ); if (dumpThreadsTrottlingTimeout > 0) { - threadDumpPerFailureTypeTime = new EnumMap<>(FailureType.class); + threadDumpPerFailureTypeTs = new EnumMap<>(FailureType.class); for (FailureType type : FailureType.values()) - threadDumpPerFailureTypeTime.put(type, 0L); + threadDumpPerFailureTypeTs.put(type, 0L); } } + + this.threadDumpPerFailureTypeTs = threadDumpPerFailureTypeTs; } /** {@inheritDoc} */ @@ -124,7 +129,8 @@ public boolean nodeStopping() { } /** - * This method is used to initialize local failure handler if {@link IgniteConfiguration} don't contain configured one. + * This method is used to initialize local failure handler if {@link IgniteConfiguration} + * doesn't contain configured one. * * @return Default {@link FailureHandler} implementation. */ @@ -202,7 +208,16 @@ public synchronized boolean process(FailureContext failureCtx, FailureHandler hn } /** - * Defines whether thread dump should be throttled for givn failure type or not. + * Returns timeout for throttling of thread dumps generation. + * + * @return Timeout for throttling of thread dumps generation. + */ + long dumpThreadsTrottlingTimeout() { + return dumpThreadsTrottlingTimeout; + } + + /** + * Defines whether thread dump should be throttled for given failure type or not. * * @param type Failure type. * @return {@code True} if thread dump generation should be throttled fro given failure type. @@ -213,14 +228,14 @@ private boolean throttleThreadDump(FailureType type) { long curr = U.currentTimeMillis(); - Long last = threadDumpPerFailureTypeTime.get(type); + Long last = threadDumpPerFailureTypeTs.get(type); assert last != null : "Unknown failure type " + type; boolean throttle = curr - last < dumpThreadsTrottlingTimeout; if (!throttle) - threadDumpPerFailureTypeTime.put(type, curr); + threadDumpPerFailureTypeTs.put(type, curr); else { if (log.isInfoEnabled()) { log.info("Thread dump is hidden due to throttling settings. " + diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java index 027895a2c837a..d694e5ff8b371 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/failure/FailureProcessorThreadDumpThrottlingTest.java @@ -196,7 +196,7 @@ public void testDefaultThrottlingTimeout() throws Exception { IgniteEx ignite = ignite(0); assertEquals( - ignite.context().failure().dumpThreadsTrottlingTimeout, + ignite.context().failure().dumpThreadsTrottlingTimeout(), ignite.configuration().getFailureDetectionTimeout().longValue() ); } From 287eb66ce4c838c054768cd90047d66309603ffe Mon Sep 17 00:00:00 2001 From: Denis Mekhanikov Date: Tue, 27 Oct 2020 10:24:41 +0300 Subject: [PATCH 002/110] IGNITE-12794 Fix "Unexpected row key" assertion during scan query - Fixes #7541. Signed-off-by: Aleksey Plekhanov --- .../processors/cache/GridCacheMapEntry.java | 3 +- ...canQueryConcurrentUpdatesAbstractTest.java | 210 ++++++++++++++++++ .../query/ScanQueryConcurrentUpdatesTest.java | 54 +++++ .../ScanQueryConcurrentSqlUpdatesTest.java | 87 ++++++++ .../IgniteBinaryCacheQueryTestSuite.java | 4 + 5 files changed, 357 insertions(+), 1 deletion(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index de4edb0949e98..b38b8944f6cfe 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -564,7 +564,8 @@ protected GridDhtLocalPartition localPartition() { checkObsolete(); if (isStartVersion() && ((flags & IS_UNSWAPPED_MASK) == 0)) { - assert row == null || row.key() == key : "Unexpected row key"; + assert row == null || Objects.equals(row.key(), key) : + "Unexpected row key [row.key=" + row.key() + ", cacheEntry.key=" + key + "]"; CacheDataRow read = row == null ? cctx.offheap().read(this) : row; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java new file mode 100644 index 0000000000000..502a628d3a4e9 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java @@ -0,0 +1,210 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import javax.cache.Cache; +import javax.cache.expiry.Duration; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A base for tests that check the behaviour of scan queries run on a data set that is modified concurrently. + * Actual tests should implement a way of cache creation, modification and destruction. + */ +public abstract class ScanQueryConcurrentUpdatesAbstractTest extends GridCommonAbstractTest { + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrids(4); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + } + + /** + * Creates a cache with given parameters. + * + * @param cacheName Name of the cache. + * @param cacheMode Cache mode. + * @param expiration {@link Duration} for {@link javax.cache.expiry.ExpiryPolicy}. If {@code null}, then + * {@link javax.cache.expiry.ExpiryPolicy} won't be configured. + * + * @return Instance of the created cache. + */ + protected abstract IgniteCache createCache(String cacheName, CacheMode cacheMode, + Duration expiration); + + /** + * Performs modification of a provided cache. Records with keys in range {@code 0..(recordsNum - 1)} are updated. + * + * @param cache Cache to update. + * @param recordsNum Number of records to update. + */ + protected abstract void updateCache(IgniteCache cache, int recordsNum); + + /** + * Destroys the provided cache. + * + * @param cache Cache to destroy. + */ + protected abstract void destroyCache(IgniteCache cache); + + /** + * Tests behaviour of scan queries with concurrent modification. + * + * @param cache Cache to test. + * @param recordsNum Number of records to load to the cache. + */ + private void testStableDataset(IgniteCache cache, int recordsNum) { + int iterations = 1000; + + AtomicBoolean finished = new AtomicBoolean(); + + try { + updateCache(cache, recordsNum); + GridTestUtils.runAsync(() -> { + while (!finished.get()) + updateCache(cache, recordsNum); + }); + + for (int i = 0; i < iterations; i++) { + List> res = cache.query(new ScanQuery()).getAll(); + + assertEquals("Unexpected query result size.", recordsNum, res.size()); + + for (Cache.Entry e : res) + assertEquals(e.getKey(), e.getValue()); + } + } + finally { + finished.set(true); + destroyCache(cache); + } + } + + /** + * Tests behaviour of scan queries with entries expired and modified concurrently. + * + * @param cache Cache to test. + */ + private void testExpiringDataset(IgniteCache cache) { + int iterations = 100; + int recordsNum = 100; + + try { + for (int i = 0; i < iterations; i++) { + updateCache(cache, recordsNum); + + long updateTime = U.currentTimeMillis(); + + List> res = cache.query(new ScanQuery()).getAll(); + + assertTrue("Query result set is too big: " + res.size(), res.size() <= recordsNum); + + for (Cache.Entry e : res) + assertEquals(e.getKey(), e.getValue()); + + while (U.currentTimeMillis() == updateTime) + doSleep(10L); + } + } + finally { + destroyCache(cache); + } + } + + /** */ + @Test + public void testReplicatedOneRecordLongExpiry() { + testStableDataset(createCache("replicated_long_expiry", + CacheMode.REPLICATED, Duration.ONE_HOUR), 1); + } + + /** */ + @Test + public void testReplicatedManyRecordsLongExpiry() { + testStableDataset(createCache("replicated_long_expiry", + CacheMode.REPLICATED, Duration.ONE_HOUR), 1000); + } + + /** */ + @Test + public void testReplicatedOneRecordNoExpiry() { + testStableDataset(createCache("replicated_no_expiry", + CacheMode.REPLICATED, null), 1); + } + + /** */ + @Test + public void testReplicatedManyRecordsNoExpiry() { + testStableDataset(createCache("replicated_no_expiry", + CacheMode.REPLICATED, null), 1000); + } + + /** */ + @Test + public void testPartitionedOneRecordLongExpiry() { + testStableDataset(createCache("partitioned_long_expiry", + CacheMode.PARTITIONED, Duration.ONE_HOUR), 1); + } + + /** */ + @Test + public void testPartitionedManyRecordsLongExpiry() { + testStableDataset(createCache("partitioned_long_expiry", + CacheMode.PARTITIONED, Duration.ONE_HOUR), 1000); + } + + /** */ + @Test + public void testPartitionedOneRecordNoExpiry() { + testStableDataset(createCache("partitioned_no_expiry", + CacheMode.PARTITIONED, null), 1); + } + + /** */ + @Test + public void testPartitionedManyRecordsNoExpiry() { + testStableDataset(createCache("partitioned_no_expiry", + CacheMode.PARTITIONED, null), 1000); + } + + /** */ + @Test + public void testPartitionedShortExpiry() { + testExpiringDataset(createCache("partitioned_short_expiry", + CacheMode.PARTITIONED, new Duration(TimeUnit.MILLISECONDS, 1))); + } + + /** */ + @Test + public void testReplicatedShortExpiry() { + testExpiringDataset(createCache("partitioned_short_expiry", + CacheMode.REPLICATED, new Duration(TimeUnit.MILLISECONDS, 1))); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java new file mode 100644 index 0000000000000..17dd603f26eaa --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.configuration.CacheConfiguration; + +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; + +/** + * {@link ScanQueryConcurrentUpdatesAbstractTest} with caches created, updates and destroyed using Java API. + */ +public class ScanQueryConcurrentUpdatesTest extends ScanQueryConcurrentUpdatesAbstractTest { + /** {@inheritDoc} */ + @Override protected IgniteCache createCache(String cacheName, CacheMode cacheMode, + Duration expiration) { + CacheConfiguration cacheCfg = new CacheConfiguration<>(cacheName); + cacheCfg.setCacheMode(cacheMode); + if (expiration != null) { + cacheCfg.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(expiration)); + cacheCfg.setEagerTtl(true); + } + + return grid(0).createCache(cacheCfg); + } + + /** {@inheritDoc} */ + @Override protected void updateCache(IgniteCache cache, int recordsNum) { + for (int i = 0; i < recordsNum; i++) + cache.put(i, i); + } + + /** {@inheritDoc} */ + @Override protected void destroyCache(IgniteCache cache) { + cache.destroy(); + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java new file mode 100644 index 0000000000000..bb6753541fc6f --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.IgniteEx; + +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; + +/** + * {@link ScanQueryConcurrentUpdatesAbstractTest} with caches created, updates and destroyed using SQL DDL queries. + */ +public class ScanQueryConcurrentSqlUpdatesTest extends ScanQueryConcurrentUpdatesAbstractTest { + /** + * A name for a cache that will be used to execute DDL queries. + */ + private static final String DUMMY_CACHE_NAME = "dummy"; + + /** {@inheritDoc} */ + @Override protected IgniteCache createCache(String cacheName, CacheMode cacheMode, + Duration expiration) { + CacheConfiguration cacheCfg = new CacheConfiguration<>(cacheName); + cacheCfg.setCacheMode(cacheMode); + if (expiration != null) { + cacheCfg.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(expiration)); + cacheCfg.setEagerTtl(true); + } + + IgniteEx ignite = grid(0); + ignite.addCacheConfiguration(cacheCfg); + + ignite.getOrCreateCache(DUMMY_CACHE_NAME).query(new SqlFieldsQuery("CREATE TABLE " + cacheName + " " + + "(key int primary key, val int) " + + "WITH \"template=" + cacheName + ",wrap_value=false\"")); + + return ignite.cache("SQL_PUBLIC_" + cacheName.toUpperCase()); + } + + /** {@inheritDoc} */ + @Override protected void updateCache(IgniteCache cache, int recordsNum) { + String tblName = tableName(cache); + + for (int i = 0; i < recordsNum; i++) { + cache.query(new SqlFieldsQuery( + "INSERT INTO " + tblName + " (key, val) " + + "VALUES (" + i + ", " + i + ")")); + } + } + + /** {@inheritDoc} */ + @Override protected void destroyCache(IgniteCache cache) { + grid(0).cache(DUMMY_CACHE_NAME).query(new SqlFieldsQuery("DROP TABLE " + tableName(cache))); + } + + /** + * @param cache Cache to determine a table name for. + * @return Name of the table corresponding to the provided cache. + */ + @SuppressWarnings("unchecked") + private String tableName(IgniteCache cache) { + CacheConfiguration cacheCfg = + (CacheConfiguration) cache.getConfiguration(CacheConfiguration.class); + QueryEntity qe = cacheCfg.getQueryEntities().iterator().next(); + + return qe.getTableName(); + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java index 09a2abf0c3f19..457d2cd42f7de 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java @@ -183,6 +183,8 @@ import org.apache.ignite.internal.processors.cache.query.IndexingSpiQuerySelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryTxSelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryWithH2IndexingSelfTest; +import org.apache.ignite.internal.processors.cache.query.ScanQueryConcurrentSqlUpdatesTest; +import org.apache.ignite.internal.processors.cache.query.ScanQueryConcurrentUpdatesTest; import org.apache.ignite.internal.processors.cache.transaction.DmlInsideTransactionTest; import org.apache.ignite.internal.processors.client.ClientConnectorConfigurationValidationSelfTest; import org.apache.ignite.internal.processors.database.baseline.IgniteStableBaselineBinObjFieldsQuerySelfTest; @@ -536,6 +538,8 @@ IgniteCheckClusterStateBeforeExecuteQueryTest.class, OptimizedMarshallerIndexNameTest.class, SqlSystemViewsSelfTest.class, + ScanQueryConcurrentUpdatesTest.class, + ScanQueryConcurrentSqlUpdatesTest.class, GridIndexRebuildSelfTest.class, GridIndexRebuildTest.class, From 6f3d6336e1a1e068632cce05aa1973905d5873ce Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Tue, 27 Oct 2020 14:18:42 +0300 Subject: [PATCH 003/110] IGNITE-12843 TDE - Phase-3. Cache key rotation - Fixes #7941. Signed-off-by: Aleksey Plekhanov --- .../org/apache/ignite/IgniteEncryption.java | 16 + .../DataStorageConfiguration.java | 24 + .../EncryptionConfiguration.java | 108 ++ .../ignite/internal/IgniteFeatures.java | 7 +- .../encryption/CacheGroupEncryptionKeys.java | 376 ++++++ .../encryption/CacheGroupPageScanner.java | 479 ++++++++ .../ChangeCacheEncryptionRequest.java | 108 ++ .../encryption/EncryptionMXBeanImpl.java | 6 + .../encryption/GridEncryptionManager.java | 754 +++++++++--- .../managers/encryption/GroupKey.java | 85 ++ .../encryption/GroupKeyChangeProcess.java | 356 ++++++ .../encryption/GroupKeyEncrypted.java | 57 + .../encryption/ReencryptStateUtils.java | 46 + .../wal/IgniteWriteAheadLogManager.java | 5 + .../wal/record/MasterKeyChangeRecord.java | 3 + .../wal/record/MasterKeyChangeRecordV2.java | 70 ++ .../wal/record/ReencryptionStartRecord.java | 52 + .../pagemem/wal/record/WALRecord.java | 20 +- .../delta/MetaPageUpdateIndexDataRecord.java | 109 ++ .../MetaPageUpdatePartitionDataRecordV3.java | 123 ++ .../cache/CacheGroupMetricsImpl.java | 50 + .../processors/cache/GridCacheUtils.java | 6 +- .../processors/cache/mvcc/txlog/TxLog.java | 3 +- .../GridCacheDatabaseSharedManager.java | 20 +- .../persistence/GridCacheOffheapManager.java | 140 ++- .../persistence/file/EncryptedFileIO.java | 70 +- .../cache/persistence/tree/io/PageMetaIO.java | 3 +- .../persistence/tree/io/PageMetaIOV2.java | 106 ++ .../tree/io/PagePartitionMetaIO.java | 20 +- .../tree/io/PagePartitionMetaIOV2.java | 17 +- .../tree/io/PagePartitionMetaIOV3.java | 123 ++ .../wal/FileWriteAheadLogManager.java | 7 + .../serializer/RecordDataV1Serializer.java | 146 ++- .../serializer/RecordDataV2Serializer.java | 5 +- .../internal/util/BasicRateLimiter.java | 153 +++ .../util/distributed/DistributedProcess.java | 12 +- .../ignite/mxbean/EncryptionMXBean.java | 12 + .../encryption/AbstractEncryptionTest.java | 272 ++++- .../encryption/CacheGroupKeyChangeTest.java | 1063 +++++++++++++++++ .../CacheGroupReencryptionTest.java | 867 ++++++++++++++ .../EncryptedCacheBigEntryTest.java | 9 +- .../encryption/EncryptedCacheCreateTest.java | 8 +- .../encryption/EncryptedCacheDestroyTest.java | 10 +- .../EncryptedCacheGroupCreateTest.java | 14 +- .../encryption/EncryptedCacheRestartTest.java | 10 +- .../encryption/EncryptionMXBeanTest.java | 24 + .../encryption/MasterKeyChangeTest.java | 9 +- .../persistence/pagemem/NoOpWALManager.java | 5 + .../internal/util/BasicRateLimiterTest.java | 108 ++ .../junits/common/GridCommonAbstractTest.java | 9 +- .../testframework/wal/record/RecordUtils.java | 51 +- .../IgniteBasicWithPersistenceTestSuite.java | 5 + .../testsuites/IgniteUtilSelfTestSuite.java | 5 +- .../IgnitePdsWithIndexingTestSuite.java | 4 +- .../SpringEncryptedCacheRestartTest.java | 33 +- 55 files changed, 5871 insertions(+), 332 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupEncryptionKeys.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ChangeCacheEncryptionRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKey.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyChangeProcess.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyEncrypted.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ReencryptStateUtils.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecordV2.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/ReencryptionStartRecord.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateIndexDataRecord.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV3.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIOV2.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV3.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupKeyChangeTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java b/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java index 68439b7516f70..debc7797b9b2c 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteEncryption.java @@ -17,6 +17,7 @@ package org.apache.ignite; +import java.util.Collection; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.lang.IgniteFuture; @@ -70,4 +71,19 @@ public interface IgniteEncryption { * @return Future for this operation. */ public IgniteFuture changeMasterKey(String masterKeyName); + + /** + * Starts cache group encryption key change process. + *

+ * NOTE: Node join is rejected during rotation of cache group encryption key. Background re-encryption of + * existing data in the specified cache group(s) begins after the encryption key(s) is changed. During + * re-encryption, node join is not rejected, the cluster remains fully functional, it is fault-tolerant operation + * that automatically continues after restart. Secondary rotation of the encryption key of a cache group is only + * possible after background re-encryption of existing data in this cache group is completed. + * + * @param cacheOrGrpNames Cache or group names. + * @return Future which will be completed when new encryption key(s) are set for writing on all nodes in the cluster + * and re-encryption of existing cache data is initiated. + */ + public IgniteFuture changeCacheGroupKey(Collection cacheOrGrpNames); } diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java index 3b70891a0aa5e..2a1927b79fdaa 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java @@ -314,6 +314,9 @@ public class DataStorageConfiguration implements Serializable { /** Default warm-up configuration. */ @Nullable private WarmUpConfiguration dfltWarmUpCfg; + /** Encryption configuration. */ + private EncryptionConfiguration encCfg = new EncryptionConfiguration(); + /** * Creates valid durable memory configuration with all default values. */ @@ -1118,6 +1121,27 @@ public DataStorageConfiguration setWalPageCompressionLevel(Integer walPageCompre return this; } + /** + * Gets encryyption configuration. + * + * @return Encryption configuration. + */ + public EncryptionConfiguration getEncryptionConfiguration() { + return encCfg; + } + + /** + * Sets encryption configuration. + * + * @param encCfg Encryption configuration. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setEncryptionConfiguration(EncryptionConfiguration encCfg) { + this.encCfg = encCfg; + + return this; + } + /** * Sets default warm-up configuration. * diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java new file mode 100644 index 0000000000000..79e205eb5386f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.configuration; + +import java.io.Serializable; +import org.apache.ignite.internal.util.typedef.internal.A; + +/** + * Encryption configuration. + */ +public class EncryptionConfiguration implements Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** Default re-encryption rate limit. The value is {@code 0}, which means that scan speed is not limited. */ + public static final double DFLT_REENCRYPTION_RATE_MBPS = 0.0; + + /** Default number of pages that is scanned during reencryption under checkpoint lock. The value is {@code 100}. */ + public static final int DFLT_REENCRYPTION_BATCH_SIZE = 100; + + /** Re-encryption rate limit in megabytes per second (set {@code 0} for unlimited scanning). */ + private double reencryptionRateLimit = DFLT_REENCRYPTION_RATE_MBPS; + + /** The number of pages that is scanned during re-encryption under checkpoint lock. */ + private int reencryptionBatchSize = DFLT_REENCRYPTION_BATCH_SIZE; + + /** + * Creates valid encryption configuration with all default values. + */ + public EncryptionConfiguration() { + // No-op. + } + + /** + * Constructs the copy of the configuration. + * + * @param cfg Configuration to copy. + */ + public EncryptionConfiguration(EncryptionConfiguration cfg) { + assert cfg != null; + + reencryptionBatchSize = cfg.getReencryptionBatchSize(); + reencryptionRateLimit = cfg.getReencryptionRateLimit(); + } + + /** + * Gets re-encryption rate limit. + * + * @return Re-encryption rate limit in megabytes per second. + */ + public double getReencryptionRateLimit() { + return reencryptionRateLimit; + } + + /** + * Sets re-encryption rate limit. + * + * @param reencryptionRateLimit Re-encryption rate limit in megabytes per second. + * @return {@code this} for chaining. + */ + public EncryptionConfiguration setReencryptionRateLimit(double reencryptionRateLimit) { + A.ensure(reencryptionRateLimit >= 0, + "Reencryption rate limit (" + reencryptionRateLimit + ") must be non-negative."); + + this.reencryptionRateLimit = reencryptionRateLimit; + + return this; + } + + /** + * Gets the number of pages that is scanned during re-encryption under checkpoint lock. + * + * @return The number of pages that is scanned during re-encryption under checkpoint lock. + */ + public int getReencryptionBatchSize() { + return reencryptionBatchSize; + } + + /** + * Sets the number of pages that is scanned during re-encryption under checkpoint lock. + * + * @param reencryptionBatchSize The number of pages that is scanned during re-encryption under checkpoint lock. + * @return {@code this} for chaining. + */ + public EncryptionConfiguration setReencryptionBatchSize(int reencryptionBatchSize) { + A.ensure(reencryptionBatchSize > 0, + "Reencryption batch size(" + reencryptionBatchSize + ") must be positive."); + + this.reencryptionBatchSize = reencryptionBatchSize; + + return this; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java index 11ef19886aa16..e1f09e5b7f59a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java @@ -18,6 +18,8 @@ package org.apache.ignite.internal; import java.util.BitSet; +import java.util.Collection; +import org.apache.ignite.IgniteEncryption; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.cluster.ClusterState; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpi; @@ -128,7 +130,10 @@ public enum IgniteFeatures { SPECIFIED_SEQ_PK_KEYS(45), /** Compatibility support for new fields which are configured split. */ - SPLITTED_CACHE_CONFIGURATIONS_V2(46); + SPLITTED_CACHE_CONFIGURATIONS_V2(46), + + /** Cache encryption key change. See {@link IgniteEncryption#changeCacheGroupKey(Collection)}. */ + CACHE_GROUP_KEY_CHANGE(47); /** * Unique feature identifier. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupEncryptionKeys.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupEncryptionKeys.java new file mode 100644 index 0000000000000..03b884bf58172 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupEncryptionKeys.java @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CopyOnWriteArrayList; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.encryption.EncryptionSpi; +import org.jetbrains.annotations.Nullable; + +/** + * Serves for managing encryption keys and related datastructure located in the heap. + */ +class CacheGroupEncryptionKeys { + /** Group encryption keys. */ + private final Map> grpKeys = new ConcurrentHashMap<>(); + + /** + * WAL segments encrypted with previous encryption keys prevent keys from being deleted + * until the associated segment is deleted. + */ + private final Collection trackedWalSegments = new ConcurrentLinkedQueue<>(); + + /** Encryption spi. */ + private final EncryptionSpi encSpi; + + /** + * @param encSpi Encryption spi. + */ + CacheGroupEncryptionKeys(EncryptionSpi encSpi) { + this.encSpi = encSpi; + } + + /** + * Returns group encryption key, that was set for writing. + * + * @param grpId Cache group ID. + * @return Group encryption key with ID, that was set for writing. + */ + @Nullable GroupKey getActiveKey(int grpId) { + List keys = grpKeys.get(grpId); + + if (F.isEmpty(keys)) + return null; + + return keys.get(0); + } + + /** + * Returns group encryption key with specified ID. + * + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @return Group encryption key. + */ + @Nullable GroupKey getKey(int grpId, int keyId) { + List keys = grpKeys.get(grpId); + + if (keys == null) + return null; + + for (GroupKey groupKey : keys) { + if (groupKey.unsignedId() == keyId) + return groupKey; + } + + return null; + } + + /** + * Gets the existing encryption key IDs for the specified cache group. + * + * @param grpId Cache group ID. + * @return List of the key IDs. + */ + @Nullable List keyIds(int grpId) { + List keys = grpKeys.get(grpId); + + if (keys == null) + return null; + + List keyIds = new ArrayList<>(keys.size()); + + for (GroupKey groupKey : keys) + keyIds.add(groupKey.unsignedId()); + + return keyIds; + } + + /** + * @return Cache group IDs for which encryption keys are stored. + */ + Set groupIds() { + return grpKeys.keySet(); + } + + /** + * @return Local encryption keys. + */ + @Nullable HashMap getAll() { + if (F.isEmpty(grpKeys)) + return null; + + HashMap keys = U.newHashMap(grpKeys.size()); + + for (Map.Entry> entry : grpKeys.entrySet()) { + int grpId = entry.getKey(); + GroupKey grpKey = entry.getValue().get(0); + + keys.put(grpId, new GroupKeyEncrypted(grpKey.unsignedId(), encSpi.encryptKey(grpKey.key()))); + } + + return keys; + } + + /** + * @param grpId Cache group ID. + * + * @return Local encryption keys used for specified cache group. + */ + @Nullable List getAll(int grpId) { + List grpKeys = this.grpKeys.get(grpId); + + if (F.isEmpty(grpKeys)) + return null; + + List encryptedKeys = new ArrayList<>(grpKeys.size()); + + for (GroupKey grpKey : grpKeys) + encryptedKeys.add(new GroupKeyEncrypted(grpKey.unsignedId(), encSpi.encryptKey(grpKey.key()))); + + return encryptedKeys; + } + + /** + * Sets new encryption key for writing. + * + * @param grpId Cache group ID. + * @param keyId ID of the existing encryption key to be set for writing.. + * @return Previous encryption key used for writing. + */ + GroupKey changeActiveKey(int grpId, int keyId) { + List keys = grpKeys.get(grpId); + + assert !F.isEmpty(keys) : "grpId=" + grpId; + + GroupKey prevKey = keys.get(0); + + assert prevKey.unsignedId() != keyId : "keyId=" + keyId; + + GroupKey newKey = null; + + for (ListIterator itr = keys.listIterator(keys.size()); itr.hasPrevious(); ) { + GroupKey key = itr.previous(); + + if (key.unsignedId() != keyId) + continue; + + newKey = key; + + break; + } + + assert newKey != null : "exp=" + keyId + ", act=" + keys; + + keys.add(0, newKey); + + // Remove the duplicate key(s) from the tail of the list. + keys.subList(1, keys.size()).removeIf(k -> k.unsignedId() == keyId); + + return prevKey; + } + + /** + * Adds new encryption key. + * + * @param grpId Cache group ID. + * @param newEncKey New encrypted key for writing. + * @return {@code True} If a key has been added, {@code False} if the specified key is already present. + */ + boolean addKey(int grpId, GroupKeyEncrypted newEncKey) { + List keys = grpKeys.computeIfAbsent(grpId, v -> new CopyOnWriteArrayList<>()); + + GroupKey grpKey = new GroupKey(newEncKey.id(), encSpi.decryptKey(newEncKey.key())); + + if (!keys.contains(grpKey)) + return keys.add(grpKey); + + return false; + } + + /** + * @param grpId Cache group ID. + * @param encryptedKeys Encrypted keys. + */ + void setGroupKeys(int grpId, List encryptedKeys) { + List keys = new CopyOnWriteArrayList<>(); + + for (GroupKeyEncrypted grpKey : encryptedKeys) + keys.add(new GroupKey(grpKey.id(), encSpi.decryptKey(grpKey.key()))); + + grpKeys.put(grpId, keys); + } + + /** + * Remove encrytion keys associated with the specified cache group. + * + * @param grpId Cache group ID. + * @return List of encryption keys of the removed cache group. + */ + List remove(int grpId) { + return grpKeys.remove(grpId); + } + + /** + * @param grpId Cache group ID. + * @param ids Key IDs for deletion. + * @return {@code True} if the keys have been deleted. + */ + boolean removeKeysById(int grpId, Set ids) { + List keys = grpKeys.get(grpId); + + if (F.isEmpty(keys)) + return false; + + return keys.subList(1, keys.size()).removeIf(key -> ids.contains(key.unsignedId())); + } + + /** + * Remove unused keys. + * + * @param grpId Cache group ID. + * @return Removed key IDs, + */ + Set removeUnusedKeys(int grpId) { + List keys = grpKeys.get(grpId); + Set rmvKeyIds = U.newHashSet(keys.size() - 1); + + rmvKeyIds.addAll(F.viewReadOnly(keys.subList(1, keys.size()), GroupKey::unsignedId)); + + for (TrackedWalSegment segment : trackedWalSegments) { + if (segment.grpId != grpId) + continue; + + rmvKeyIds.remove(segment.keyId); + } + + if (keys.removeIf(key -> rmvKeyIds.contains(key.unsignedId()))) + return rmvKeyIds; + + return Collections.emptySet(); + } + + /** + * @return A collection of tracked (encrypted with previous encryption keys) WAL segments. + */ + Collection trackedWalSegments() { + return Collections.unmodifiableCollection(trackedWalSegments); + } + + /** + * @param segments WAL segments, mapped to cache group encryption key IDs. + */ + void trackedWalSegments(Collection segments) { + trackedWalSegments.addAll(segments); + } + + /** + * Associate WAL segment index with the specified key ID + * to prevent deletion of that encryption key before deleting the segment. + * + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @param walIdx WAL segment index. + */ + void reserveWalKey(int grpId, int keyId, long walIdx) { + trackedWalSegments.add(new TrackedWalSegment(walIdx, grpId, keyId)); + } + + /** + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @return Wal segment index or null if there no segment associated with the specified cache group ID and key ID. + */ + @Nullable Long reservedSegment(int grpId, int keyId) { + for (TrackedWalSegment segment : trackedWalSegments) { + if (segment.grpId != grpId) + continue; + + if (segment.keyId == keyId) + return segment.idx; + } + + return null; + } + + /** + * Remove all of the segments that are not greater than the specified index. + * + * @param walIdx WAL segment index. + * @return Map of group IDs with key IDs that were associated with removed WAL segments. + */ + Map> releaseWalKeys(long walIdx) { + Map> rmvKeys = new HashMap<>(); + Iterator iter = trackedWalSegments.iterator(); + + while (iter.hasNext()) { + TrackedWalSegment segment = iter.next(); + + if (segment.idx > walIdx) + break; + + iter.remove(); + + rmvKeys.computeIfAbsent(segment.grpId, v -> new HashSet<>()).add(segment.keyId); + } + + return rmvKeys; + } + + /** + * A WAL segment encrypted with a specific encryption key ID. + */ + protected static final class TrackedWalSegment implements Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** WAL segment index. */ + private final long idx; + + /** Cache group ID. */ + private final int grpId; + + /** Encryption key ID. */ + private final int keyId; + + /** + * @param idx WAL segment index. + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + */ + public TrackedWalSegment(long idx, int grpId, int keyId) { + this.idx = idx; + this.grpId = grpId; + this.keyId = keyId; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java new file mode 100644 index 0000000000000..dc0a29b05eb30 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java @@ -0,0 +1,479 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.NodeStoppingException; +import org.apache.ignite.internal.managers.communication.GridIoPolicy; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; +import org.apache.ignite.internal.util.BasicRateLimiter; +import org.apache.ignite.internal.util.GridConcurrentHashSet; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.lang.IgniteInClosureX; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.thread.IgniteThreadPoolExecutor; +import org.apache.ignite.thread.OomExceptionHandler; + +import static org.apache.ignite.internal.util.IgniteUtils.MB; + +/** + * Cache group page stores scanner. + * Scans a range of pages and marks them as dirty to re-encrypt them with the last encryption key on disk. + */ +public class CacheGroupPageScanner implements CheckpointListener { + /** Thread prefix for scanning tasks. */ + private static final String REENCRYPT_THREAD_PREFIX = "reencrypt"; + + /** Kernal context. */ + private final GridKernalContext ctx; + + /** Logger. */ + private final IgniteLogger log; + + /** Lock. */ + private final ReentrantLock lock = new ReentrantLock(); + + /** Mapping of cache group ID to group scanning task. */ + private final Map grps = new ConcurrentHashMap<>(); + + /** Collection of groups waiting for a checkpoint. */ + private final Collection cpWaitGrps = new ConcurrentLinkedQueue<>(); + + /** Page scanning speed limiter. */ + private final BasicRateLimiter limiter; + + /** Single-threaded executor to run cache group scan task. */ + private final ThreadPoolExecutor singleExecSvc; + + /** Number of pages that is scanned during reencryption under checkpoint lock. */ + private final int batchSize; + + /** Stop flag. */ + private boolean stopped; + + /** + * @param ctx Grid kernal context. + */ + public CacheGroupPageScanner(GridKernalContext ctx) { + this.ctx = ctx; + + log = ctx.log(getClass()); + + DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); + + if (ctx.clientNode() || !CU.isPersistenceEnabled(dsCfg)) { + batchSize = -1; + limiter = null; + singleExecSvc = null; + + return; + } + + double rateLimit = dsCfg.getEncryptionConfiguration().getReencryptionRateLimit(); + + limiter = rateLimit > 0 ? new BasicRateLimiter(rateLimit * MB / + (dsCfg.getPageSize() == 0 ? DataStorageConfiguration.DFLT_PAGE_SIZE : dsCfg.getPageSize())) : null; + + batchSize = dsCfg.getEncryptionConfiguration().getReencryptionBatchSize(); + + singleExecSvc = new IgniteThreadPoolExecutor(REENCRYPT_THREAD_PREFIX, + ctx.igniteInstanceName(), + 1, + 1, + IgniteConfiguration.DFLT_THREAD_KEEP_ALIVE_TIME, + new LinkedBlockingQueue<>(), + GridIoPolicy.SYSTEM_POOL, + new OomExceptionHandler(ctx)); + + singleExecSvc.allowCoreThreadTimeOut(true); + } + + /** {@inheritDoc} */ + @Override public void onCheckpointBegin(Context cpCtx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void beforeCheckpointBegin(Context cpCtx) { + Set completeCandidates = new HashSet<>(); + + cpWaitGrps.removeIf(completeCandidates::add); + + cpCtx.finishedStateFut().listen( + f -> { + // Retry if error occurs. + if (f.error() != null || f.isCancelled()) { + cpWaitGrps.addAll(completeCandidates); + + return; + } + + lock.lock(); + + try { + for (GroupScanTask grpScanTask : completeCandidates) { + grps.remove(grpScanTask.groupId()); + + grpScanTask.onDone(); + + if (log.isInfoEnabled()) + log.info("Cache group reencryption is finished [grpId=" + grpScanTask.groupId() + "]"); + } + + if (!grps.isEmpty()) + return; + + ((GridCacheDatabaseSharedManager)ctx.cache().context().database()). + removeCheckpointListener(this); + } + finally { + lock.unlock(); + } + } + ); + } + + /** {@inheritDoc} */ + @Override public void onMarkCheckpointBegin(Context ctx) { + // No-op. + } + + /** + * Schedule scanning partitions. + * + * @param grpId Cache group ID. + */ + public IgniteInternalFuture schedule(int grpId) throws IgniteCheckedException { + CacheGroupContext grp = ctx.cache().cacheGroup(grpId); + + if (grp == null || !grp.affinityNode()) { + if (log.isInfoEnabled()) + log.info("Skip reencryption, cache group doesn't exist on the local node [grp=" + grpId + "]"); + + return new GridFinishedFuture<>(); + } + + lock.lock(); + + try { + if (stopped) + throw new NodeStoppingException("Operation has been cancelled (node is stopping)."); + + if (grps.isEmpty()) + ((GridCacheDatabaseSharedManager)ctx.cache().context().database()).addCheckpointListener(this); + + GroupScanTask prevState = grps.get(grpId); + + if (prevState != null && !prevState.isDone()) { + if (log.isDebugEnabled()) + log.debug("Reencryption already scheduled [grpId=" + grpId + "]"); + + return prevState; + } + + Set parts = new HashSet<>(); + + forEachPageStore(grp, new IgniteInClosureX() { + @Override public void applyx(Integer partId) { + if (ctx.encryption().getEncryptionState(grpId, partId) == 0) { + if (log.isDebugEnabled()) + log.debug("Skipping partition reencryption [grp=" + grpId + ", p=" + partId + "]"); + + return; + } + + parts.add(partId); + } + }); + + GroupScanTask grpScan = new GroupScanTask(grp, parts); + + singleExecSvc.submit(grpScan); + + if (log.isInfoEnabled()) + log.info("Scheduled reencryption [grpId=" + grpId + "]"); + + grps.put(grpId, grpScan); + + return grpScan; + } + finally { + lock.unlock(); + } + } + + /** + * @param grpId Cache group ID. + * @return Future that will be completed when all partitions have been scanned and pages have been written to disk. + */ + public IgniteInternalFuture statusFuture(int grpId) { + GroupScanTask grpScanTask = grps.get(grpId); + + return grpScanTask == null ? new GridFinishedFuture<>() : grpScanTask; + } + + /** + * Shutdown scanning and disable new tasks scheduling. + */ + public void stop() throws IgniteCheckedException { + lock.lock(); + + try { + stopped = true; + + for (GroupScanTask grpScanTask : grps.values()) + grpScanTask.cancel(); + + if (singleExecSvc != null) + singleExecSvc.shutdownNow(); + } finally { + lock.unlock(); + } + } + + /** + * Stop scannig the specified partition. + * + * @param grpId Cache group ID. + * @param partId Partition ID. + * @return {@code True} if reencryption was cancelled. + */ + public boolean excludePartition(int grpId, int partId) { + GroupScanTask grpScanTask = grps.get(grpId); + + if (grpScanTask == null) + return false; + + return grpScanTask.excludePartition(partId); + } + + /** + * Collect current number of pages in the specified cache group. + * + * @param grp Cache group. + * @return Partitions with current page count. + * @throws IgniteCheckedException If failed. + */ + public long[] pagesCount(CacheGroupContext grp) throws IgniteCheckedException { + // The last element of the array is used to store the status of the index partition. + long[] partStates = new long[grp.affinity().partitions() + 1]; + + ctx.cache().context().database().checkpointReadLock(); + + try { + forEachPageStore(grp, new IgniteInClosureX() { + @Override public void applyx(Integer partId) throws IgniteCheckedException { + int pagesCnt = ctx.cache().context().pageStore().pages(grp.groupId(), partId); + + partStates[Math.min(partId, partStates.length - 1)] = pagesCnt; + } + }); + } finally { + ctx.cache().context().database().checkpointReadUnlock(); + } + + return partStates; + } + + /** + * @param grp Cache group. + * @param hnd Partition handler. + */ + private void forEachPageStore(CacheGroupContext grp, IgniteInClosureX hnd) throws IgniteCheckedException { + int parts = grp.affinity().partitions(); + + IgnitePageStoreManager pageStoreMgr = ctx.cache().context().pageStore(); + + for (int p = 0; p < parts; p++) { + if (!pageStoreMgr.exists(grp.groupId(), p)) + continue; + + hnd.applyx(p); + } + + hnd.applyx(PageIdAllocator.INDEX_PARTITION); + } + + /** + * Cache group partition scanning task. + */ + private class GroupScanTask extends GridFutureAdapter implements Runnable { + /** Cache group ID. */ + private final CacheGroupContext grp; + + /** Partition IDs. */ + private final Set parts; + + /** Page memory. */ + private final PageMemoryEx pageMem; + + /** + * @param grp Cache group. + */ + public GroupScanTask(CacheGroupContext grp, Set parts) { + this.grp = grp; + this.parts = new GridConcurrentHashSet<>(parts); + + pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); + } + + /** {@inheritDoc} */ + @Override public synchronized boolean cancel() throws IgniteCheckedException { + return onCancelled(); + } + + /** + * Stop reencryption of the specified partition. + * + * @param partId Partition ID. + * @return {@code True} if reencryption was cancelled. + */ + public synchronized boolean excludePartition(int partId) { + return parts.remove(partId); + } + + /** + * @return Cache group ID. + */ + public int groupId() { + return grp.groupId(); + } + + /** {@inheritDoc} */ + @Override public void run() { + try { + for (int partId : parts) { + long state = ctx.encryption().getEncryptionState(grp.groupId(), partId); + + if (state == 0) + continue; + + scanPartition(partId, ReencryptStateUtils.pageIndex(state), ReencryptStateUtils.pageCount(state)); + + if (isDone()) + return; + } + + boolean added = cpWaitGrps.add(this); + + assert added; + } + catch (Throwable t) { + if (X.hasCause(t, NodeStoppingException.class)) + onCancelled(); + else + onDone(t); + } + } + + /** + * @param partId Partition ID. + * @param off Start page offset. + * @param cnt Count of pages to scan. + */ + private void scanPartition(int partId, int off, int cnt) throws IgniteCheckedException { + if (log.isDebugEnabled()) { + log.debug("Partition reencryption is started [grpId=" + grp.groupId() + + ", p=" + partId + ", remain=" + (cnt - off) + ", total=" + cnt + "]"); + } + + while (off < cnt) { + int pagesCnt = Math.min(batchSize, cnt - off); + + if (limiter != null) + limiter.acquire(pagesCnt); + + synchronized (this) { + if (isDone() || !parts.contains(partId)) + break; + + ctx.cache().context().database().checkpointReadLock(); + + try { + off += scanPages(partId, off, pagesCnt); + } + finally { + ctx.cache().context().database().checkpointReadUnlock(); + } + } + + ctx.encryption().setEncryptionState(grp, partId, off, cnt); + } + + if (log.isDebugEnabled()) { + log.debug("Partition reencryption is finished " + + "[grpId=" + grp.groupId() + + ", p=" + partId + + ", remain=" + (cnt - off) + + ", total=" + cnt + "]"); + } + } + + /** + * @param off Start page offset. + * @param cnt Count of pages to scan. + * @return Count of scanned pages. + * @throws IgniteCheckedException If failed. + */ + private int scanPages(int partId, int off, int cnt) throws IgniteCheckedException { + int grpId = grp.groupId(); + byte flag = GroupPartitionId.getFlagByPartId(partId); + + for (int pageIdx = off; pageIdx < off + cnt; pageIdx++) { + long pageId = PageIdUtils.pageId(partId, flag, pageIdx); + long page = pageMem.acquirePage(grpId, pageId); + + try { + if (pageMem.isDirty(grpId, pageId, page)) + continue; + + pageMem.writeLock(grpId, pageId, page, true); + pageMem.writeUnlock(grpId, pageId, page, null, true); + } + finally { + pageMem.releasePage(grpId, pageId, page); + } + } + + return cnt; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ChangeCacheEncryptionRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ChangeCacheEncryptionRequest.java new file mode 100644 index 0000000000000..54d7405df98bb --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ChangeCacheEncryptionRequest.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; +import java.util.Objects; +import java.util.UUID; + +/** + * Change cache group encryption key request. + */ +@SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") +public class ChangeCacheEncryptionRequest implements Serializable { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Request ID. */ + private final UUID reqId = UUID.randomUUID(); + + /** Cache group IDs. */ + private final int[] grpIds; + + /** Encryption keys. */ + private final byte[][] keys; + + /** Key identifiers. */ + private final byte[] keyIds; + + /** Master key digest. */ + private final byte[] masterKeyDigest; + + /** + * @param grpIds Cache group IDs. + * @param keys Encryption keys. + * @param keyIds Key identifiers. + * @param masterKeyDigest Master key digest. + */ + public ChangeCacheEncryptionRequest(int[] grpIds, byte[][] keys, byte[] keyIds, byte[] masterKeyDigest) { + this.grpIds = grpIds; + this.keys = keys; + this.keyIds = keyIds; + this.masterKeyDigest = masterKeyDigest; + } + + /** + * @return Request ID. + */ + public UUID requestId() { + return reqId; + } + + /** + * @return Cache group IDs. + */ + public int[] groupIds() { + return grpIds; + } + + /** + * @return Encryption keys. + */ + public byte[][] keys() { + return keys; + } + + /** + * @return Key identifiers. + */ + public byte[] keyIds() { return keyIds; } + + /** + * @return Master key digest. + */ + public byte[] masterKeyDigest() { + return masterKeyDigest; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + return Objects.equals(reqId, ((ChangeCacheEncryptionRequest)o).reqId); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(reqId); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java index 027022286bd1a..f2e31a6f5d5b3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/EncryptionMXBeanImpl.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.managers.encryption; +import java.util.Collections; import org.apache.ignite.internal.GridKernalContextImpl; import org.apache.ignite.mxbean.EncryptionMXBean; @@ -41,4 +42,9 @@ public EncryptionMXBeanImpl(GridKernalContextImpl ctx) { @Override public void changeMasterKey(String masterKeyName) { encryptionMgr.changeMasterKey(masterKeyName).get(); } + + /** {@inheritDoc} */ + @Override public void changeCacheGroupKey(String cacheOrGrpName) { + encryptionMgr.changeCacheGroupKey(Collections.singleton(cacheOrGrpName)).get(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index 78590d330cbf2..368331368659e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -40,6 +40,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteFeatures; @@ -47,7 +48,9 @@ import org.apache.ignite.internal.managers.GridManagerAdapter; import org.apache.ignite.internal.managers.communication.GridMessageListener; import org.apache.ignite.internal.managers.eventstorage.DiscoveryEventListener; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener; import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadOnlyMetastorage; @@ -61,6 +64,7 @@ import org.apache.ignite.internal.util.future.IgniteFutureImpl; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteFuture; @@ -72,8 +76,6 @@ import org.apache.ignite.spi.discovery.DiscoveryDataBag; import org.apache.ignite.spi.discovery.DiscoveryDataBag.GridDiscoveryData; import org.apache.ignite.spi.discovery.DiscoveryDataBag.JoiningNodeDiscoveryData; -import org.apache.ignite.spi.discovery.DiscoverySpi; -import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.encryption.EncryptionSpi; import org.jetbrains.annotations.Nullable; @@ -83,6 +85,7 @@ import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; import static org.apache.ignite.internal.GridComponent.DiscoveryDataExchangeType.ENCRYPTION_MGR; import static org.apache.ignite.internal.GridTopic.TOPIC_GEN_ENC_KEY; +import static org.apache.ignite.internal.IgniteFeatures.CACHE_GROUP_KEY_CHANGE; import static org.apache.ignite.internal.IgniteFeatures.MASTER_KEY_CHANGE; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SYSTEM_POOL; import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.MASTER_KEY_CHANGE_FINISH; @@ -115,6 +118,8 @@ *

    *
  • 1. If new key for group doesn't exists locally it added to local store.
  • *
  • 2. If new key for group exists locally, then received key skipped.
  • + *
  • 3. If a cache group is encrypted with a different (previous) encryption key, then background + * re-encryption of this group with a new key is started.
  • *
* * @@ -136,6 +141,22 @@ public class GridEncryptionManager extends GridManagerAdapter imp */ private static final IgniteProductVersion CACHE_ENCRYPTION_SINCE = IgniteProductVersion.fromString("2.7.0"); + /** Prefix for a master key name. */ + public static final String MASTER_KEY_NAME_PREFIX = "encryption-master-key-name"; + + /** Prefix for a encryption group key in meta store, which contains encryption keys with identifiers. */ + public static final String ENCRYPTION_KEYS_PREFIX = "grp-encryption-keys-"; + + /** Initial identifier for cache group encryption key. */ + public static final int INITIAL_KEY_ID = 0; + + /** The name on the meta store key, that contains wal segments encrypted using previous encryption keys. */ + private static final String REENCRYPTED_WAL_SEGMENTS = "reencrypted-wal-segments"; + + /** Prefix for a encryption group key in meta store. */ + @Deprecated + private static final String ENCRYPTION_KEY_PREFIX = "grp-encryption-key-"; + /** Synchronization mutex. */ private final Object metaStorageMux = new Object(); @@ -154,14 +175,8 @@ public class GridEncryptionManager extends GridManagerAdapter imp /** Flag to enable/disable write to metastore on cluster state change. */ private volatile boolean writeToMetaStoreEnabled; - /** Prefix for a encryption group key in meta store. */ - public static final String ENCRYPTION_KEY_PREFIX = "grp-encryption-key-"; - - /** Prefix for a master key name. */ - public static final String MASTER_KEY_NAME_PREFIX = "encryption-master-key-name"; - - /** Group encryption keys. */ - private final ConcurrentHashMap grpEncKeys = new ConcurrentHashMap<>(); + /** Cache group encryption keys. */ + private CacheGroupEncryptionKeys grpKeys; /** Pending generate encryption key futures. */ private ConcurrentMap genEncKeyFuts = new ConcurrentHashMap<>(); @@ -182,7 +197,7 @@ public class GridEncryptionManager extends GridManagerAdapter imp private volatile boolean recoveryMasterKeyName; /** Master key change future. Not {@code null} on request initiator. */ - private MasterKeyChangeFuture masterKeyChangeFut; + private KeyChangeFuture masterKeyChangeFut; /** Pending master key request or {@code null} if there is no ongoing master key change process. */ private volatile MasterKeyChangeRequest masterKeyChangeRequest; @@ -194,10 +209,25 @@ public class GridEncryptionManager extends GridManagerAdapter imp * Master key change prepare process. Checks that all server nodes have the same new master key and then starts * finish process. */ - private DistributedProcess prepareMKChangeProc; + private DistributedProcess prepareMKChangeProc; /** Process to perform the master key change. Changes master key and reencrypt group keys. */ - private DistributedProcess performMKChangeProc; + private DistributedProcess performMKChangeProc; + + /** + * A two-phase distributed process that rotates the encryption keys of specified cache groups and initiates + * re-encryption of those cache groups. + */ + private GroupKeyChangeProcess grpKeyChangeProc; + + /** Cache groups for which encryption key was changed, and they must be re-encrypted. */ + private final Map reencryptGroups = new ConcurrentHashMap<>(); + + /** Cache groups for which encryption key was changed on node join. */ + private final Map reencryptGroupsForced = new ConcurrentHashMap<>(); + + /** Cache group page stores scanner. */ + private CacheGroupPageScanner pageScanner; /** * @param ctx Kernel context. @@ -283,11 +313,17 @@ public GridEncryptionManager(GridKernalContext ctx) { performMKChangeProc = new DistributedProcess<>(ctx, MASTER_KEY_CHANGE_FINISH, this::performMasterKeyChange, this::finishPerformMasterKeyChange); + + grpKeys = new CacheGroupEncryptionKeys(getSpi()); + pageScanner = new CacheGroupPageScanner(ctx); + grpKeyChangeProc = new GroupKeyChangeProcess(ctx, grpKeys); } /** {@inheritDoc} */ @Override public void stop(boolean cancel) throws IgniteCheckedException { stopSpi(); + + pageScanner.stop(); } /** {@inheritDoc} */ @@ -340,7 +376,7 @@ public GridEncryptionManager(GridKernalContext ctx) { * Callback for local join. */ public void onLocalJoin() { - if (!isCoordinator()) + if (!U.isLocalNodeCoordinator(ctx.discovery())) return; //We can't store keys before node join to cluster(on statically configured cache registration). @@ -350,7 +386,7 @@ public void onLocalJoin() { //And sends that keys to every joining node. synchronized (metaStorageMux) { //Keys read from meta storage. - HashMap knownEncKeys = knownEncryptionKeys(); + HashMap knownEncKeys = grpKeys.getAll(); //Generated(not saved!) keys for a new caches. //Configured statically in config, but doesn't stored on the disk. @@ -362,7 +398,7 @@ public void onLocalJoin() { //We can store keys to the disk, because we are on a coordinator. for (Map.Entry entry : newEncKeys.entrySet()) { - groupKey(entry.getKey(), entry.getValue()); + addGroupKey(entry.getKey(), new GroupKeyEncrypted(INITIAL_KEY_ID, entry.getValue())); U.quietAndInfo(log, "Added encryption key on local join [grpId=" + entry.getKey() + "]"); } @@ -392,6 +428,12 @@ public void onLocalJoin() { if (res != null) return res; + if (grpKeyChangeProc.inProgress()) { + return new IgniteNodeValidationResult(ctx.localNodeId(), + "Cache group key change is in progress! Node join is rejected. [node=" + node.id() + "]", + "Cache group key change is in progress! Node join is rejected."); + } + NodeEncryptionKeys nodeEncKeys = (NodeEncryptionKeys)discoData.joiningNodeData(); if (!discoData.hasJoiningNodeData() || nodeEncKeys == null) { @@ -406,23 +448,49 @@ public void onLocalJoin() { "Master key digest differs! Node join is rejected."); } + if (!IgniteFeatures.nodeSupports(node, CACHE_GROUP_KEY_CHANGE)) { + return new IgniteNodeValidationResult(ctx.localNodeId(), + "Joining node doesn't support multiple encryption keys for single group [node=" + node.id() + "]", + "Joining node doesn't support multiple encryption keys for single group."); + } + if (F.isEmpty(nodeEncKeys.knownKeys)) { U.quietAndInfo(log, "Joining node doesn't have stored group keys [node=" + node.id() + "]"); return null; } - for (Map.Entry entry : nodeEncKeys.knownKeys.entrySet()) { - Serializable locEncKey = grpEncKeys.get(entry.getKey()); + assert !F.isEmpty(nodeEncKeys.knownKeysWithIds); + + for (Map.Entry> entry : nodeEncKeys.knownKeysWithIds.entrySet()) { + int grpId = entry.getKey(); + + GroupKey locEncKey = groupKey(grpId); if (locEncKey == null) continue; - Serializable rmtKey = getSpi().decryptKey(entry.getValue()); + List rmtKeys = entry.getValue(); - if (F.eq(locEncKey, rmtKey)) + if (rmtKeys == null) continue; + GroupKeyEncrypted rmtKeyEncrypted = null; + + for (GroupKeyEncrypted rmtKey0 : rmtKeys) { + if (rmtKey0.id() != locEncKey.unsignedId()) + continue; + + rmtKeyEncrypted = rmtKey0; + + break; + } + + if (rmtKeyEncrypted == null || F.eq(locEncKey.key(), getSpi().decryptKey(rmtKeyEncrypted.key()))) + continue; + + // The remote node should not rotate the cache key to the current one + // until the old key (with an identifier that is currently active in the cluster) is removed. return new IgniteNodeValidationResult(ctx.localNodeId(), "Cache key differs! Node join is rejected. [node=" + node.id() + ", grp=" + entry.getKey() + "]", "Cache key differs! Node join is rejected."); @@ -436,10 +504,14 @@ public void onLocalJoin() { if (dataBag.isJoiningNodeClient()) return; - HashMap knownEncKeys = knownEncryptionKeys(); + Set grpIds = grpKeys.groupIds(); - HashMap newKeys = - newEncryptionKeys(knownEncKeys == null ? Collections.EMPTY_SET : knownEncKeys.keySet()); + HashMap> knownEncKeys = U.newHashMap(grpIds.size()); + + for (int grpId : grpIds) + knownEncKeys.put(grpId, grpKeys.getAll(grpId)); + + HashMap newKeys = newEncryptionKeys(grpIds); if (log.isInfoEnabled()) { String knownGrps = F.isEmpty(knownEncKeys) ? null : F.concat(knownEncKeys.keySet(), ","); @@ -467,9 +539,9 @@ public void onLocalJoin() { for (Map.Entry entry : nodeEncryptionKeys.newKeys.entrySet()) { if (groupKey(entry.getKey()) == null) { U.quietAndInfo(log, "Store group key received from joining node [node=" + - data.joiningNodeId() + ", grp=" + entry.getKey() + "]"); + data.joiningNodeId() + ", grp=" + entry.getKey() + "]"); - groupKey(entry.getKey(), entry.getValue()); + addGroupKey(entry.getKey(), new GroupKeyEncrypted(INITIAL_KEY_ID, entry.getValue())); } else { U.quietAndInfo(log, "Skip group key received from joining node. Already exists. [node=" + @@ -483,16 +555,18 @@ public void onLocalJoin() { if (dataBag.isJoiningNodeClient() || dataBag.commonDataCollectedFor(ENCRYPTION_MGR.ordinal())) return; - HashMap knownEncKeys = knownEncryptionKeys(); + HashMap knownEncKeys = grpKeys.getAll(); HashMap newKeys = newEncryptionKeys(knownEncKeys == null ? Collections.EMPTY_SET : knownEncKeys.keySet()); - if (knownEncKeys == null) - knownEncKeys = newKeys; - else if (newKeys != null) { + if (!F.isEmpty(newKeys)) { + if (knownEncKeys == null) + knownEncKeys = new HashMap<>(); + for (Map.Entry entry : newKeys.entrySet()) { - byte[] old = knownEncKeys.putIfAbsent(entry.getKey(), entry.getValue()); + GroupKeyEncrypted old = + knownEncKeys.putIfAbsent(entry.getKey(), new GroupKeyEncrypted(INITIAL_KEY_ID, entry.getValue())); assert old == null; } @@ -506,55 +580,93 @@ else if (newKeys != null) { if (ctx.clientNode()) return; - Map encKeysFromCluster = (Map)data.commonData(); + Map encKeysFromCluster = (Map)data.commonData(); if (F.isEmpty(encKeysFromCluster)) return; - for (Map.Entry entry : encKeysFromCluster.entrySet()) { - if (groupKey(entry.getKey()) == null) { - U.quietAndInfo(log, "Store group key received from coordinator [grp=" + entry.getKey() + "]"); + for (Map.Entry entry : encKeysFromCluster.entrySet()) { + int grpId = entry.getKey(); - groupKey(entry.getKey(), entry.getValue()); - } - else { + GroupKeyEncrypted rmtKey; + + if (entry.getValue() instanceof GroupKeyEncrypted) + rmtKey = (GroupKeyEncrypted)entry.getValue(); + else + rmtKey = new GroupKeyEncrypted(INITIAL_KEY_ID, (byte[])entry.getValue()); + + GroupKey locGrpKey = groupKey(grpId); + + if (locGrpKey != null && locGrpKey.unsignedId() == rmtKey.id()) { U.quietAndInfo(log, "Skip group key received from coordinator. Already exists. [grp=" + - entry.getKey() + "]"); + grpId + ", keyId=" + rmtKey.id() + "]"); + + continue; } + + U.quietAndInfo(log, "Store group key received from coordinator [grp=" + grpId + + ", keyId=" + rmtKey.id() + "]"); + + grpKeys.addKey(grpId, rmtKey); + + if (locGrpKey == null) + continue; + + GroupKey prevKey = grpKeys.changeActiveKey(grpId, rmtKey.id()); + + if (ctx.config().getDataStorageConfiguration().getWalMode() != WALMode.NONE) + grpKeys.reserveWalKey(grpId, prevKey.unsignedId(), ctx.cache().context().wal().currentSegment()); + + reencryptGroupsForced.put(grpId, rmtKey.id()); } } /** * Returns group encryption key. * - * @param grpId Group id. - * @return Group encryption key. + * @param grpId Cache group ID. + * @return Group encryption key with identifier, that was set for writing. */ - @Nullable public Serializable groupKey(int grpId) { - if (grpEncKeys.isEmpty()) - return null; - - return grpEncKeys.get(grpId); + @Nullable public GroupKey groupKey(int grpId) { + return grpKeys.getActiveKey(grpId); } /** - * Store group encryption key. + * Returns group encryption key with specified identifier. * - * @param grpId Group id. - * @param encGrpKey Encrypted group key. + * @param grpId Cache group ID. + * @param keyId Encryption key ID. + * @return Group encryption key. */ - public void groupKey(int grpId, byte[] encGrpKey) { - assert !grpEncKeys.containsKey(grpId); + @Nullable public GroupKey groupKey(int grpId, int keyId) { + return grpKeys.getKey(grpId, keyId); + } - Serializable encKey = withMasterKeyChangeReadLock(() -> getSpi().decryptKey(encGrpKey)); + /** + * Gets the existing encryption key IDs for the specified cache group. + * + * @param grpId Cache group ID. + * @return List of the key identifiers. + */ + @Nullable public List groupKeyIds(int grpId) { + return grpKeys.keyIds(grpId); + } + /** + * Adds new cache group encryption key. + * + * @param grpId Cache group ID. + * @param key Encryption key. + */ + void addGroupKey(int grpId, GroupKeyEncrypted key) { synchronized (metaStorageMux) { - if (log.isDebugEnabled()) - log.debug("Key added. [grp=" + grpId + "]"); - - grpEncKeys.put(grpId, encKey); + try { + grpKeys.addKey(grpId, key); - writeToMetaStore(grpId, encGrpKey); + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + } catch (IgniteCheckedException e) { + throw new IgniteException("Failed to write cache group encryption key [grpId=" + grpId + ']', e); + } } } @@ -588,7 +700,7 @@ public void groupKey(int grpId, byte[] encGrpKey) { digest = masterKeyDigest(masterKeyName); } catch (Exception e) { return new IgniteFinishedFutureImpl<>(new IgniteException("Master key change was rejected. " + - "Unable to get the master key digest.")); + "Unable to get the master key digest.", e)); } MasterKeyChangeRequest request = new MasterKeyChangeRequest(UUID.randomUUID(), encryptKeyName(masterKeyName), @@ -611,7 +723,7 @@ public void groupKey(int grpId, byte[] encGrpKey) { "The previous change was not completed.")); } - masterKeyChangeFut = new MasterKeyChangeFuture(request.requestId()); + masterKeyChangeFut = new KeyChangeFuture(request.requestId()); prepareMKChangeProc.start(request.requestId(), request); @@ -627,22 +739,107 @@ public void groupKey(int grpId, byte[] encGrpKey) { return withMasterKeyChangeReadLock(() -> getSpi().getMasterKeyName()); } + /** {@inheritDoc} */ + @Override public IgniteFuture changeCacheGroupKey(Collection cacheOrGrpNames) { + A.notEmpty(cacheOrGrpNames, "cacheOrGrpNames"); + + synchronized (opsMux) { + if (stopped) { + return new IgniteFinishedFutureImpl<>(new IgniteException("Cache group key change was rejected. " + + "Node is stopping.")); + } + + return grpKeyChangeProc.start(cacheOrGrpNames); + } + } + + /** + * @param grpIds Cache group IDs. + * @param keyIds Encryption key IDs. + * @param keys Encryption keys. + * @throws IgniteCheckedException If failed. + */ + protected void changeCacheGroupKeyLocal(int[] grpIds, byte[] keyIds, byte[][] keys) throws IgniteCheckedException { + Map encryptionStatus = U.newHashMap(grpIds.length); + + for (int i = 0; i < grpIds.length; i++) + encryptionStatus.put(grpIds[i], keyIds[i]); + + WALPointer ptr = ctx.cache().context().wal().log(new ReencryptionStartRecord(encryptionStatus)); + + if (ptr != null) + ctx.cache().context().wal().flush(ptr, false); + + for (int i = 0; i < grpIds.length; i++) { + int grpId = grpIds[i]; + int newKeyId = keyIds[i] & 0xff; + + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + // Set new key as key for writing. Note that we cannot pass the encrypted key here because the master + // key may have changed in which case we will not be able to decrypt the cache encryption key. + GroupKey prevGrpKey = grpKeys.changeActiveKey(grpId, newKeyId); + + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + + if (ptr == null) + return null; + + grpKeys.reserveWalKey(grpId, prevGrpKey.unsignedId(), ctx.cache().context().wal().currentSegment()); + + writeTrackedWalIdxsToMetaStore(); + } + + return null; + }); + + CacheGroupContext grp = ctx.cache().cacheGroup(grpId); + + if (grp != null && grp.affinityNode()) + reencryptGroups.put(grpId, pageScanner.pagesCount(grp)); + + if (log.isInfoEnabled()) + log.info("New encryption key for group was added [grpId=" + grpId + ", keyId=" + newKeyId + "]"); + } + + startReencryption(encryptionStatus.keySet()); + } + + /** + * @param grpId Cache group ID. + * @return Future that will be completed when reencryption of the specified group is finished. + */ + public IgniteInternalFuture reencryptionFuture(int grpId) { + return pageScanner.statusFuture(grpId); + } + + /** + * @param grpId Cache group ID. + * @return {@code True} If the specified cache group is currently being re-encrypted. + */ + public boolean reencryptionInProgress(int grpId) { + // The method guarantees not only the completion of the re-encryption, but also that the clearing of + // unused keys is complete. + return reencryptGroups.containsKey(grpId); + } + /** - * Removes encryption key. + * Removes encryption key(s). * - * @param grpId Group id. + * @param grpId Cache group ID. */ private void removeGroupKey(int grpId) { synchronized (metaStorageMux) { ctx.cache().context().database().checkpointReadLock(); try { - grpEncKeys.remove(grpId); + if (grpKeys.remove(grpId) == null) + return; - metaStorage.remove(ENCRYPTION_KEY_PREFIX + grpId); + metaStorage.remove(ENCRYPTION_KEYS_PREFIX + grpId); if (log.isDebugEnabled()) - log.debug("Key removed. [grp=" + grpId + "]"); + log.debug("Key(s) removed. [grp=" + grpId + "]"); } catch (IgniteCheckedException e) { U.error(log, "Failed to clear meta storage", e); @@ -655,19 +852,41 @@ private void removeGroupKey(int grpId) { /** * Callback for cache group start event. - * @param grpId Group id. + * + * @param grpId Cache group ID. * @param encKey Encryption key */ public void beforeCacheGroupStart(int grpId, @Nullable byte[] encKey) { if (encKey == null || ctx.clientNode()) return; - groupKey(grpId, encKey); + withMasterKeyChangeReadLock(() -> { + addGroupKey(grpId, new GroupKeyEncrypted(INITIAL_KEY_ID, encKey)); + + return null; + }); + } + + /** + * Callback is called before invalidate page memory. + * + * @param grpId Cache group ID. + */ + public void onCacheGroupStop(int grpId) { + try { + reencryptionFuture(grpId).cancel(); + } + catch (IgniteCheckedException e) { + log.warning("Unable to cancel reencryption [grpId=" + grpId + "]", e); + } + + reencryptGroups.remove(grpId); } /** * Callback for cache group destroy event. - * @param grpId Group id. + * + * @param grpId Cache group ID. */ public void onCacheGroupDestroyed(int grpId) { if (groupKey(grpId) == null) @@ -676,6 +895,59 @@ public void onCacheGroupDestroyed(int grpId) { removeGroupKey(grpId); } + /** + * @param grp Cache group. + * @param partId Partition ID. + */ + public void onDestroyPartitionStore(CacheGroupContext grp, int partId) { + if (pageScanner.excludePartition(grp.groupId(), partId)) + setEncryptionState(grp, partId, 0, 0); + } + + /** + * Callback when WAL segment is removed. + * + * @param segmentIdx WAL segment index. + */ + public void onWalSegmentRemoved(long segmentIdx) { + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + Map> rmvKeys = grpKeys.releaseWalKeys(segmentIdx); + + if (F.isEmpty(rmvKeys)) + return null; + + try { + writeTrackedWalIdxsToMetaStore(); + + for (Map.Entry> entry : rmvKeys.entrySet()) { + Integer grpId = entry.getKey(); + + if (reencryptGroups.containsKey(grpId)) + continue; + + Set keyIds = entry.getValue(); + + if (!grpKeys.removeKeysById(grpId, keyIds)) + continue; + + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + + if (log.isInfoEnabled()) { + log.info("Previous encryption keys have been removed [grpId=" + grpId + + ", keyIds=" + keyIds + "]"); + } + } + } + catch (IgniteCheckedException e) { + log.error("Unable to remove encryption keys from metastore.", e); + } + } + + return null; + }); + } + /** {@inheritDoc} */ @Override public void onReadyForRead(ReadOnlyMetastorage metastorage) { try { @@ -690,17 +962,35 @@ public void onCacheGroupDestroyed(int grpId) { } } - metastorage.iterate(ENCRYPTION_KEY_PREFIX, (key, val) -> { - Integer grpId = Integer.valueOf(key.replace(ENCRYPTION_KEY_PREFIX, "")); + metastorage.iterate(ENCRYPTION_KEYS_PREFIX, (key, val) -> { + int grpId = Integer.parseInt(key.replace(ENCRYPTION_KEYS_PREFIX, "")); - byte[] encGrpKey = (byte[])val; + if (grpKeys.groupIds().contains(grpId)) + return; - grpEncKeys.computeIfAbsent(grpId, k -> getSpi().decryptKey(encGrpKey)); + grpKeys.setGroupKeys(grpId, (List)val); }, true); - if (!grpEncKeys.isEmpty()) { - U.quietAndInfo(log, "Encryption keys loaded from metastore. [grps=" + - F.concat(grpEncKeys.keySet(), ",") + ", masterKeyName=" + getSpi().getMasterKeyName() + ']'); + // Try to read keys in previous format. + if (grpKeys.groupIds().isEmpty()) { + metastorage.iterate(ENCRYPTION_KEY_PREFIX, (key, val) -> { + int grpId = Integer.parseInt(key.replace(ENCRYPTION_KEY_PREFIX, "")); + + GroupKeyEncrypted grpKey = new GroupKeyEncrypted(INITIAL_KEY_ID, (byte[])val); + + grpKeys.setGroupKeys(grpId, Collections.singletonList(grpKey)); + }, true); + } + + Serializable savedSegments = metastorage.read(REENCRYPTED_WAL_SEGMENTS); + + if (savedSegments != null) + grpKeys.trackedWalSegments((Collection)savedSegments); + + if (grpKeys.groupIds().isEmpty()) { + U.quietAndInfo(log, "Encryption keys loaded from metastore. " + + "[grps=" + F.concat(grpKeys.groupIds(), ",") + + ", masterKeyName=" + getSpi().getMasterKeyName() + ']'); } } catch (IgniteCheckedException e) { @@ -730,20 +1020,47 @@ public void onCacheGroupDestroyed(int grpId) { /** {@inheritDoc} */ @Override public void onReadyForReadWrite(ReadWriteMetastorage metaStorage) throws IgniteCheckedException { - synchronized (metaStorageMux) { - this.metaStorage = metaStorage; + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + this.metaStorage = metaStorage; - writeToMetaStoreEnabled = true; + writeToMetaStoreEnabled = true; - if (recoveryMasterKeyName) - writeKeysToWal(); + if (recoveryMasterKeyName) + writeKeysToWal(); - writeKeysToMetaStore(restoredFromWAL || recoveryMasterKeyName); + writeKeysToMetaStore(restoredFromWAL || recoveryMasterKeyName); - restoredFromWAL = false; + restoredFromWAL = false; + + recoveryMasterKeyName = false; + } - recoveryMasterKeyName = false; + return null; + }); + + for (Map.Entry entry : reencryptGroupsForced.entrySet()) { + int grpId = entry.getKey(); + + if (reencryptGroups.containsKey(grpId)) + continue; + + if (entry.getValue() != groupKey(grpId).unsignedId()) + continue; + + CacheGroupContext grp = ctx.cache().cacheGroup(grpId); + + if (grp == null || !grp.affinityNode()) + continue; + + long[] offsets = pageScanner.pagesCount(grp); + + reencryptGroups.put(grpId, offsets); } + + reencryptGroupsForced.clear(); + + startReencryption(reencryptGroups.keySet()); } /** {@inheritDoc} */ @@ -767,6 +1084,37 @@ public void onCacheGroupDestroyed(int grpId) { } } + /** + * Set reencryption status for partition. + * + * @param grp Cache group. + * @param partId Partition ID. + * @param idx Index of the last reencrypted page. + * @param total Total pages to be reencrypted. + */ + public void setEncryptionState(CacheGroupContext grp, int partId, int idx, int total) { + // The last element of the array is used to store the status of the index partition. + long[] states = reencryptGroups.computeIfAbsent(grp.groupId(), v -> new long[grp.affinity().partitions() + 1]); + + states[Math.min(partId, states.length - 1)] = ReencryptStateUtils.state(idx, total); + } + + /** + * Get reencryption status for partition. + * + * @param grpId Cache group ID. + * @param partId Parttiion ID. + * @return Index and count of pages to be reencrypted. + */ + public long getEncryptionState(int grpId, int partId) { + long[] states = reencryptGroups.get(grpId); + + if (states == null) + return 0; + + return states[Math.min(partId, states.length - 1)]; + } + /** * @param keyCnt Count of keys to generate. * @return Future that will contain results of generation. @@ -811,6 +1159,51 @@ private void sendGenerateEncryptionKeyRequest(GenerateEncryptionKeyFuture fut) t ctx.io().sendToGridTopic(rndNode.id(), TOPIC_GEN_ENC_KEY, req, SYSTEM_POOL); } + /** + * @param grpIds Cache group IDs. + * @throws IgniteCheckedException If failed. + */ + private void startReencryption(Collection grpIds) throws IgniteCheckedException { + for (int grpId : grpIds) { + IgniteInternalFuture fut = pageScanner.schedule(grpId); + + fut.listen(f -> { + if (f.isCancelled() || f.error() != null) { + log.warning("Reencryption " + + (f.isCancelled() ? "cancelled" : "failed") + " [grp=" + grpId + "]", f.error()); + + return; + } + + withMasterKeyChangeReadLock(() -> { + synchronized (metaStorageMux) { + cleanupKeys(grpId); + + reencryptGroups.remove(grpId); + } + + return null; + }); + }); + } + } + + /** + * @param grpId Cache group ID. + * @throws IgniteCheckedException If failed. + */ + private void cleanupKeys(int grpId) throws IgniteCheckedException { + Set rmvKeyIds = grpKeys.removeUnusedKeys(grpId); + + if (rmvKeyIds.isEmpty()) + return; + + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + + if (log.isInfoEnabled()) + log.info("Previous encryption keys were removed [grpId=" + grpId + ", keyIds=" + rmvKeyIds + "]"); + } + /** * Writes all unsaved grpEncKeys to metaStorage. * @@ -821,11 +1214,55 @@ private void writeKeysToMetaStore(boolean writeAll) throws IgniteCheckedExceptio if (writeAll) metaStorage.write(MASTER_KEY_NAME_PREFIX, getSpi().getMasterKeyName()); - for (Map.Entry entry : grpEncKeys.entrySet()) { - if (!writeAll && metaStorage.read(ENCRYPTION_KEY_PREFIX + entry.getKey()) != null) + if (!reencryptGroupsForced.isEmpty()) + writeTrackedWalIdxsToMetaStore(); + + for (Integer grpId : grpKeys.groupIds()) { + if (!writeAll && !reencryptGroupsForced.containsKey(grpId) && + metaStorage.read(ENCRYPTION_KEYS_PREFIX + grpId) != null) continue; - writeToMetaStore(entry.getKey(), getSpi().encryptKey(entry.getValue())); + writeGroupKeysToMetaStore(grpId, grpKeys.getAll(grpId)); + } + } + + /** + * Writes cache group encryption keys to metastore. + * + * @param grpId Cache group ID. + */ + private void writeGroupKeysToMetaStore(int grpId, List keys) throws IgniteCheckedException { + assert Thread.holdsLock(metaStorageMux); + + if (metaStorage == null || !writeToMetaStoreEnabled || stopped) + return; + + ctx.cache().context().database().checkpointReadLock(); + + try { + metaStorage.write(ENCRYPTION_KEYS_PREFIX + grpId, (Serializable)keys); + } + finally { + ctx.cache().context().database().checkpointReadUnlock(); + } + } + + /** + * Writes tracked (encrypted with previous encryption keys) WAL segments to metastore. + */ + private void writeTrackedWalIdxsToMetaStore() throws IgniteCheckedException { + assert Thread.holdsLock(metaStorageMux); + + if (metaStorage == null || !writeToMetaStoreEnabled || stopped) + return; + + ctx.cache().context().database().checkpointReadLock(); + + try { + metaStorage.write(REENCRYPTED_WAL_SEGMENTS, (Serializable)grpKeys.trackedWalSegments()); + } + finally { + ctx.cache().context().database().checkpointReadUnlock(); } } @@ -850,29 +1287,6 @@ public void checkEncryptedCacheSupported() throws IgniteCheckedException { return ENCRYPTION_MGR; } - /** - * Writes encryption key to metastore. - * - * @param grpId Group id. - * @param encGrpKey Group encryption key. - */ - private void writeToMetaStore(int grpId, byte[] encGrpKey) { - if (metaStorage == null || !writeToMetaStoreEnabled) - return; - - ctx.cache().context().database().checkpointReadLock(); - - try { - metaStorage.write(ENCRYPTION_KEY_PREFIX + grpId, encGrpKey); - } - catch (IgniteCheckedException e) { - throw new IgniteException("Failed to write cache group encryption key [grpId=" + grpId + ']', e); - } - finally { - ctx.cache().context().database().checkpointReadUnlock(); - } - } - /** * @param knownKeys Saved keys set. * @return New keys for local cache groups. @@ -897,28 +1311,13 @@ private void writeToMetaStore(int grpId, byte[] encGrpKey) { return newKeys; } - /** - * @return Local encryption keys. - */ - @Nullable private HashMap knownEncryptionKeys() { - if (F.isEmpty(grpEncKeys)) - return null; - - HashMap knownKeys = new HashMap<>(); - - for (Map.Entry entry : grpEncKeys.entrySet()) - knownKeys.put(entry.getKey(), getSpi().encryptKey(entry.getValue())); - - return knownKeys; - } - /** * Generates required count of encryption keys. * * @param keyCnt Keys count. * @return Tuple of collection with newly generated encryption keys and master key digest. */ - private T2, byte[]> createKeys(int keyCnt) { + T2, byte[]> createKeys(int keyCnt) { return withMasterKeyChangeReadLock(() -> { if (keyCnt == 0) return new T2<>(Collections.emptyList(), getSpi().masterKeyDigest()); @@ -974,12 +1373,14 @@ private void doChangeMasterKey(String name) { /** Writes the record with the master key name and all keys to WAL. */ private void writeKeysToWal() throws IgniteCheckedException { - Map reencryptedKeys = new HashMap<>(); + List> reencryptedKeys = new ArrayList<>(); - for (Map.Entry entry : grpEncKeys.entrySet()) - reencryptedKeys.put(entry.getKey(), getSpi().encryptKey(entry.getValue())); + for (int grpId : grpKeys.groupIds()) { + for (GroupKeyEncrypted grpKey : grpKeys.getAll(grpId)) + reencryptedKeys.add(new T2<>(grpId, grpKey)); + } - MasterKeyChangeRecord rec = new MasterKeyChangeRecord(getSpi().getMasterKeyName(), reencryptedKeys); + MasterKeyChangeRecordV2 rec = new MasterKeyChangeRecordV2(getSpi().getMasterKeyName(), reencryptedKeys); WALPointer ptr = ctx.cache().context().wal().log(rec); @@ -991,7 +1392,7 @@ private void writeKeysToWal() throws IgniteCheckedException { * * @param rec Record. */ - public void applyKeys(MasterKeyChangeRecord rec) { + public void applyKeys(MasterKeyChangeRecordV2 rec) { assert !writeToMetaStoreEnabled && !ctx.state().clusterState().active(); log.info("Master key name loaded from WAL [masterKeyName=" + rec.getMasterKeyName() + ']'); @@ -999,8 +1400,13 @@ public void applyKeys(MasterKeyChangeRecord rec) { try { getSpi().setMasterKeyName(rec.getMasterKeyName()); - for (Map.Entry entry : rec.getGrpKeys().entrySet()) - grpEncKeys.put(entry.getKey(), getSpi().decryptKey(entry.getValue())); + Map> keysMap = new HashMap<>(); + + for (T2 entry : rec.getGrpKeys()) + keysMap.computeIfAbsent(entry.getKey(), v -> new ArrayList<>()).add(entry.getValue()); + + for (Map.Entry> entry : keysMap.entrySet()) + grpKeys.setGroupKeys(entry.getKey(), entry.getValue()); restoredFromWAL = true; } catch (IgniteSpiException e) { @@ -1008,13 +1414,25 @@ public void applyKeys(MasterKeyChangeRecord rec) { } } + /** + * Start reencryption using logical WAL record. + * + * @param rec Reencryption start logical record. + */ + public void applyReencryptionStartRecord(ReencryptionStartRecord rec) { + assert !writeToMetaStoreEnabled; + + for (Map.Entry e : rec.groups().entrySet()) + reencryptGroupsForced.put(e.getKey(), e.getValue() & 0xff); + } + /** * Prepares master key change. Checks master key consistency. * * @param req Request. * @return Result future. */ - private IgniteInternalFuture prepareMasterKeyChange(MasterKeyChangeRequest req) { + private IgniteInternalFuture prepareMasterKeyChange(MasterKeyChangeRequest req) { if (masterKeyChangeRequest != null) { return new GridFinishedFuture<>(new IgniteException("Master key change was rejected. " + "The previous change was not completed.")); @@ -1044,7 +1462,7 @@ private IgniteInternalFuture prepareMasterKeyChange(Maste ctx.localNodeId() + ']', e)); } - return new GridFinishedFuture<>(new MasterKeyChangeResult()); + return new GridFinishedFuture<>(new EmptyResult()); } /** @@ -1054,14 +1472,14 @@ private IgniteInternalFuture prepareMasterKeyChange(Maste * @param res Results. * @param err Errors. */ - private void finishPrepareMasterKeyChange(UUID id, Map res, Map err) { + private void finishPrepareMasterKeyChange(UUID id, Map res, Map err) { if (!err.isEmpty()) { if (masterKeyChangeRequest != null && masterKeyChangeRequest.requestId().equals(id)) masterKeyChangeRequest = null; completeMasterKeyChangeFuture(id, err); } - else if (isCoordinator()) + else if (U.isLocalNodeCoordinator(ctx.discovery())) performMKChangeProc.start(id, masterKeyChangeRequest); } @@ -1071,7 +1489,7 @@ else if (isCoordinator()) * @param req Request. * @return Result future. */ - private IgniteInternalFuture performMasterKeyChange(MasterKeyChangeRequest req) { + private IgniteInternalFuture performMasterKeyChange(MasterKeyChangeRequest req) { if (masterKeyChangeRequest == null || !masterKeyChangeRequest.equals(req)) return new GridFinishedFuture<>(new IgniteException("Unknown master key change was rejected.")); @@ -1089,7 +1507,7 @@ private IgniteInternalFuture performMasterKeyChange(Maste masterKeyDigest = req.digest(); - return new GridFinishedFuture<>(new MasterKeyChangeResult()); + return new GridFinishedFuture<>(new EmptyResult()); } /** @@ -1099,7 +1517,7 @@ private IgniteInternalFuture performMasterKeyChange(Maste * @param res Results. * @param err Errors. */ - private void finishPerformMasterKeyChange(UUID id, Map res, Map err) { + private void finishPerformMasterKeyChange(UUID id, Map res, Map err) { completeMasterKeyChangeFuture(id, err); } @@ -1130,29 +1548,16 @@ private void completeMasterKeyChangeFuture(UUID reqId, Map err) * @param msg Error message. */ private void cancelFutures(String msg) { + assert Thread.holdsLock(opsMux); + for (GenerateEncryptionKeyFuture fut : genEncKeyFuts.values()) fut.onDone(new IgniteFutureCancelledException(msg)); if (masterKeyChangeFut != null && !masterKeyChangeFut.isDone()) masterKeyChangeFut.onDone(new IgniteFutureCancelledException(msg)); - } - - /** - * Checks whether local node is coordinator. Nodes that are leaving or failed - * (but are still in topology) are removed from search. - * - * @return {@code true} if local node is coordinator. - */ - private boolean isCoordinator() { - DiscoverySpi spi = ctx.discovery().getInjectedDiscoverySpi(); - - if (spi instanceof TcpDiscoverySpi) - return ((TcpDiscoverySpi)spi).isLocalNodeCoordinator(); - else { - ClusterNode crd = U.oldest(ctx.discovery().aliveServerNodes(), null); - return crd != null && F.eq(ctx.localNodeId(), crd.id()); - } + if (grpKeyChangeProc != null) + grpKeyChangeProc.cancel(msg); } /** @return {@code True} if the master key change process in progress. */ @@ -1167,7 +1572,7 @@ public boolean isMasterKeyChangeInProgress() { * * @return Digest of last changed master key or {@code null} if master key was not changed. */ - public byte[] masterKeyDigest() { + @Nullable public byte[] masterKeyDigest() { return masterKeyDigest; } @@ -1175,7 +1580,7 @@ public byte[] masterKeyDigest() { * @param c Callable to run with master key change read lock. * @return Computed result. */ - private T withMasterKeyChangeReadLock(Callable c) { + T withMasterKeyChangeReadLock(Callable c) { masterKeyChangeLock.readLock().lock(); try { @@ -1339,24 +1744,38 @@ byte[] digest() { } /** */ - private static class MasterKeyChangeResult implements Serializable { + protected static class EmptyResult implements Serializable { /** Serial version uid. */ private static final long serialVersionUID = 0L; } /** */ - public static class NodeEncryptionKeys implements Serializable { + protected static class NodeEncryptionKeys implements Serializable { /** */ private static final long serialVersionUID = 0L; /** */ - NodeEncryptionKeys(Map knownKeys, Map newKeys, byte[] masterKeyDigest) { - this.knownKeys = knownKeys; + NodeEncryptionKeys( + HashMap> knownKeysWithIds, + Map newKeys, + byte[] masterKeyDigest + ) { this.newKeys = newKeys; this.masterKeyDigest = masterKeyDigest; + + if (F.isEmpty(knownKeysWithIds)) + return; + + // To be able to join the old cluster. + knownKeys = U.newHashMap(knownKeysWithIds.size()); + + for (Map.Entry> entry : knownKeysWithIds.entrySet()) + knownKeys.put(entry.getKey(), entry.getValue().get(0).key()); + + this.knownKeysWithIds = knownKeysWithIds; } - /** Known i.e. stored in {@code ReadWriteMetastorage} keys from node. */ + /** Known i.e. stored in {@code ReadWriteMetastorage} keys from node (in compatible format). */ Map knownKeys; /** New keys i.e. keys for a local statically configured caches. */ @@ -1364,6 +1783,9 @@ public static class NodeEncryptionKeys implements Serializable { /** Master key digest. */ byte[] masterKeyDigest; + + /** Known i.e. stored in {@code ReadWriteMetastorage} keys from node. */ + Map> knownKeysWithIds; } /** */ @@ -1423,13 +1845,13 @@ public int keyCount() { } } - /** Master key change future. */ - private static class MasterKeyChangeFuture extends GridFutureAdapter { + /** Key change future. */ + protected static class KeyChangeFuture extends GridFutureAdapter { /** Request ID. */ private final UUID id; /** @param id Request ID. */ - private MasterKeyChangeFuture(UUID id) { + KeyChangeFuture(UUID id) { this.id = id; } @@ -1440,7 +1862,7 @@ public UUID id() { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(MasterKeyChangeFuture.class, this); + return S.toString(KeyChangeFuture.class, this); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKey.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKey.java new file mode 100644 index 0000000000000..5182a18b952a2 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKey.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Cache group encryption key with identifier. + */ +public class GroupKey { + /** Encryption key ID. */ + private final int id; + + /** Encryption key. */ + private final Serializable key; + + /** + * @param id Encryption key ID. + * @param key Encryption key. + */ + public GroupKey(int id, Serializable key) { + this.id = id; + this.key = key; + } + + /** + * @return Encryption key ID. + */ + public byte id() { + return (byte)id; + } + + /** + * @return Unsigned encryption key ID. + */ + public int unsignedId() { + return id & 0xff; + } + + /** + * @return Encryption key. + */ + public Serializable key() { + return key; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + GroupKey grpKey = (GroupKey)o; + + return id == grpKey.id && Objects.equals(key, grpKey.key); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(id, key); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return "GroupKey [id=" + id + ']'; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyChangeProcess.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyChangeProcess.java new file mode 100644 index 0000000000000..3e12351de27e6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyChangeProcess.java @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteFeatures; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager.EmptyResult; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager.KeyChangeFuture; +import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; +import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; +import org.apache.ignite.internal.util.distributed.DistributedProcess; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.future.IgniteFinishedFutureImpl; +import org.apache.ignite.internal.util.future.IgniteFutureImpl; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteFuture; +import org.apache.ignite.lang.IgniteFutureCancelledException; + +import static org.apache.ignite.internal.IgniteFeatures.CACHE_GROUP_KEY_CHANGE; +import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.CACHE_GROUP_KEY_CHANGE_FINISH; +import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.CACHE_GROUP_KEY_CHANGE_PREPARE; + +/** + * A two-phase distributed process that rotates the encryption keys of specified cache groups and initiates + * re-encryption of those cache groups. + */ +class GroupKeyChangeProcess { + /** Grid kernal context. */ + private final GridKernalContext ctx; + + /** Cache group encyption key change prepare phase. */ + private final DistributedProcess prepareGKChangeProc; + + /** Cache group encyption key change perform phase. */ + private final DistributedProcess performGKChangeProc; + + /** Group encryption keys. */ + private final CacheGroupEncryptionKeys keys; + + /** Cache group key change future. */ + private volatile GroupKeyChangeFuture fut; + + /** Cache group key change request. */ + private volatile ChangeCacheEncryptionRequest req; + + /** + * @param ctx Grid kernal context. + * @param keys Cache group encryption keys. + */ + GroupKeyChangeProcess(GridKernalContext ctx, CacheGroupEncryptionKeys keys) { + this.ctx = ctx; + this.keys = keys; + + prepareGKChangeProc = + new DistributedProcess<>(ctx, CACHE_GROUP_KEY_CHANGE_PREPARE, this::prepare, this::finishPrepare); + performGKChangeProc = + new DistributedProcess<>(ctx, CACHE_GROUP_KEY_CHANGE_FINISH, this::perform, this::finishPerform); + } + + /** + * @return {@code True} if operation is still in progress. + */ + public boolean inProgress() { + return req != null; + } + + /** + * @param msg Error message. + */ + public void cancel(String msg) { + GridFutureAdapter keyChangeFut = fut; + + if (keyChangeFut != null && !keyChangeFut.isDone()) + keyChangeFut.onDone(new IgniteFutureCancelledException(msg)); + } + + /** + * Starts cache group encryption key change process. + * + * @param cacheOrGrpNames Cache or group names. + */ + public IgniteFuture start(Collection cacheOrGrpNames) { + if (ctx.clientNode()) + throw new UnsupportedOperationException("Client and daemon nodes can not perform this operation."); + + if (!IgniteFeatures.allNodesSupports(ctx.grid().cluster().nodes(), CACHE_GROUP_KEY_CHANGE)) + throw new IllegalStateException("Not all nodes in the cluster support this operation."); + + if (!ctx.state().clusterState().state().active()) + throw new IgniteException("Operation was rejected. The cluster is inactive."); + + IgniteInternalFuture fut0 = fut; + + if (fut0 != null && !fut0.isDone()) { + return new IgniteFinishedFutureImpl<>(new IgniteException("Cache group key change was rejected. " + + "The previous change was not completed.")); + } + + int[] grpIds = new int[cacheOrGrpNames.size()]; + byte[] keyIds = new byte[grpIds.length]; + + int n = 0; + + for (String cacheOrGroupName : cacheOrGrpNames) { + CacheGroupDescriptor grpDesc = ctx.cache().cacheGroupDescriptor(CU.cacheId(cacheOrGroupName)); + + if (grpDesc == null) { + DynamicCacheDescriptor cacheDesc = ctx.cache().cacheDescriptor(cacheOrGroupName); + + if (cacheDesc == null) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache or group \"" + cacheOrGroupName + "\" doesn't exists"); + } + + int grpId = cacheDesc.groupId(); + + grpDesc = ctx.cache().cacheGroupDescriptor(grpId); + + if (grpDesc.sharedGroup()) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache or group \"" + cacheOrGroupName + "\" is a part of group \"" + + grpDesc.groupName() + "\". Provide group name instead of cache name for shared groups."); + } + } + + if (!grpDesc.config().isEncryptionEnabled()) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache or group \"" + cacheOrGroupName + "\" is not encrypted."); + } + + if (ctx.encryption().reencryptionInProgress(grpDesc.groupId())) { + throw new IgniteException("Cache group key change was rejected. " + + "Cache group reencryption is in progress [grp=" + cacheOrGroupName + "]"); + } + + grpIds[n] = grpDesc.groupId(); + keyIds[n] = (byte)(ctx.encryption().groupKey(grpDesc.groupId()).unsignedId() + 1); + + n += 1; + } + + T2, byte[]> keysAndDigest = ctx.encryption().createKeys(grpIds.length); + + ChangeCacheEncryptionRequest req = new ChangeCacheEncryptionRequest( + grpIds, + keysAndDigest.get1().toArray(new byte[grpIds.length][]), + keyIds, + keysAndDigest.get2() + ); + + fut = new GroupKeyChangeFuture(req); + + prepareGKChangeProc.start(req.requestId(), req); + + return new IgniteFutureImpl<>(fut); + } + + /** + * Validates existing keys. + * + * @param req Request. + * @return Result future. + */ + private IgniteInternalFuture prepare(ChangeCacheEncryptionRequest req) { + if (ctx.clientNode()) + return new GridFinishedFuture<>(); + + if (inProgress()) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected. " + + "The previous change was not completed.")); + } + + this.req = req; + + try { + for (int i = 0; i < req.groupIds().length; i++) { + int grpId = req.groupIds()[i]; + int keyId = req.keyIds()[i] & 0xff; + + if (ctx.encryption().reencryptionInProgress(grpId)) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected. " + + "Cache group reencryption is in progress [grpId=" + grpId + "]")); + } + + List keyIds = ctx.encryption().groupKeyIds(grpId); + + if (keyIds == null) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected." + + "Encrypted cache group not found [grpId=" + grpId + "]")); + } + + GroupKey currKey = ctx.encryption().groupKey(grpId); + + for (int locKeyId : keyIds) { + if (locKeyId != keyId) + continue; + + Long walSegment = keys.reservedSegment(grpId, keyId); + + // Can overwrite inactive key if it was added during prepare phase. + if (walSegment == null && currKey.id() != (byte)keyId) + continue; + + return new GridFinishedFuture<>( + new IgniteException("Cache group key change was rejected. Cannot add new key identifier, " + + "it's already present. There existing WAL segments that encrypted with this key [" + + "grpId=" + grpId + ", newId=" + keyId + ", currId=" + currKey.unsignedId() + + ", walSegment=" + walSegment + "].")); + } + } + + return ctx.encryption().withMasterKeyChangeReadLock(() -> { + if (!Arrays.equals(ctx.config().getEncryptionSpi().masterKeyDigest(), req.masterKeyDigest())) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected. " + + "Master key has been changed.")); + } + + for (int i = 0; i < req.groupIds().length; i++) { + // Save the new key as inactive, because the master key may change later + // and there will be no way to decrypt the received keys. + GroupKeyEncrypted grpKey = new GroupKeyEncrypted(req.keyIds()[i] & 0xff, req.keys()[i]); + + ctx.encryption().addGroupKey(req.groupIds()[i], grpKey); + } + + return new GridFinishedFuture<>(new EmptyResult()); + }); + + } + catch (Exception e) { + return new GridFinishedFuture<>(new IgniteException("Cache group key change was rejected [nodeId=" + + ctx.localNodeId() + ']', e)); + } + } + + /** + * Starts group key change if there are no errors. + * + * @param id Request id. + * @param res Results. + * @param err Errors. + */ + private void finishPrepare(UUID id, Map res, Map err) { + if (!err.isEmpty()) { + if (req != null && req.requestId().equals(id)) + req = null; + + completeFuture(id, err, fut); + } + else if (U.isLocalNodeCoordinator(ctx.discovery())) + performGKChangeProc.start(id, req); + } + + /** + * Sets new encrpytion key as active (for writing) and starts background reencryption. + * + * @param req Request. + * @return Result future. + */ + private IgniteInternalFuture perform(ChangeCacheEncryptionRequest req) { + if (this.req == null || !this.req.equals(req)) + return new GridFinishedFuture<>(new IgniteException("Unknown cache group key change was rejected.")); + + try { + if (!ctx.state().clusterState().state().active()) + throw new IgniteException("Cache group key change was rejected. The cluster is inactive."); + + if (!ctx.clientNode()) + ctx.encryption().changeCacheGroupKeyLocal(req.groupIds(), req.keyIds(), req.keys()); + } catch (Exception e) { + return new GridFinishedFuture<>(e); + } finally { + this.req = null; + } + + return new GridFinishedFuture<>(new EmptyResult()); + } + + /** + * Finishes cache encryption key rotation. + * + * @param id Request id. + * @param res Results. + * @param err Errors. + */ + private void finishPerform(UUID id, Map res, Map err) { + completeFuture(id, err, fut); + } + + /** + * @param reqId Request id. + * @param err Exception. + * @param fut Key change future. + * @return {@code True} if future was completed by this call. + */ + private boolean completeFuture(UUID reqId, Map err, GroupKeyChangeFuture fut) { + boolean isInitiator = fut != null && fut.id().equals(reqId); + + if (!isInitiator || fut.isDone()) + return false; + + return !F.isEmpty(err) ? fut.onDone(F.firstValue(err)) : fut.onDone(); + } + + /** Cache group key change future. */ + private static class GroupKeyChangeFuture extends KeyChangeFuture { + /** Request. */ + private final ChangeCacheEncryptionRequest req; + + /** + * @param req Request. + */ + GroupKeyChangeFuture(ChangeCacheEncryptionRequest req) { + super(req.requestId()); + + this.req = req; + } + + /** @return Topology version. */ + public ChangeCacheEncryptionRequest request() { + return req; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(GroupKeyChangeFuture.class, this); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyEncrypted.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyEncrypted.java new file mode 100644 index 0000000000000..6b2ed0543038b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GroupKeyEncrypted.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +import java.io.Serializable; + +/** + * Cache group encryption key with identifier. Key is encrypted. + */ +public class GroupKeyEncrypted implements Serializable { + /** Serial version UID. */ + private static final long serialVersionUID = 0L; + + /** Encryption key ID. */ + private final int id; + + /** Encryption key. */ + private final byte[] key; + + /** + * @param id Encryption key ID. + * @param key Encryption key. + */ + public GroupKeyEncrypted(int id, byte[] key) { + this.id = id; + this.key = key; + } + + /** + * @return Encryption key ID. + */ + public int id() { + return id; + } + + /** + * @return Encryption key. + */ + public byte[] key() { + return key; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ReencryptStateUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ReencryptStateUtils.java new file mode 100644 index 0000000000000..37292d99ceee2 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/ReencryptStateUtils.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.encryption; + +/** */ +public class ReencryptStateUtils { + /** + * @param idx Index of the last reencrypted page. + * @param total Total pages to be reencrypted. + * @return Reencryption status. + */ + public static long state(int idx, int total) { + return ((long)idx) << Integer.SIZE | (total & 0xffffffffL); + } + + /** + * @param state Reencryption status. + * @return Index of the last reencrypted page. + */ + public static int pageIndex(long state) { + return (int)(state >> Integer.SIZE); + } + + /** + * @param state Reencryption status. + * @return Total pages to be reencrypted. + */ + public static int pageCount(long state) { + return (int)state; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java index 40e70b37acbd8..cc183bfda3f88 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java @@ -166,6 +166,11 @@ public WALIterator replay( */ public void notchLastCheckpointPtr(WALPointer ptr); + /** + * @return Current segment index. + */ + public long currentSegment(); + /** * @return Total number of segments in the WAL archive. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java index 583ff733d4126..dba07b8cbb23e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecord.java @@ -24,7 +24,10 @@ /** * Logical record that stores encryption keys. Written to the WAL on the master key change. + * + * @deprecated Replaced by MasterKeyChangeRecordV2. */ +@Deprecated public class MasterKeyChangeRecord extends WALRecord { /** Master key name. */ private final String masterKeyName; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecordV2.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecordV2.java new file mode 100644 index 0000000000000..10145d822118d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MasterKeyChangeRecordV2.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record; + +import java.util.List; +import org.apache.ignite.internal.managers.encryption.GroupKeyEncrypted; +import org.apache.ignite.internal.util.typedef.T2; + +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; + +/** + * Logical record that stores encryption keys. Written to the WAL on the master key change. + */ +public class MasterKeyChangeRecordV2 extends WALRecord { + /** Master key name. */ + private final String masterKeyName; + + /** Group keys encrypted by the master key. */ + private final List> grpKeys; + + /** + * @param masterKeyName Master key name. + * @param grpKeys Encrypted group keys. + */ + public MasterKeyChangeRecordV2(String masterKeyName, List> grpKeys) { + this.masterKeyName = masterKeyName; + this.grpKeys = grpKeys; + } + + /** @return Master key name. */ + public String getMasterKeyName() { + return masterKeyName; + } + + /** @return Encrypted group keys. */ + public List> getGrpKeys() { + return grpKeys; + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return MASTER_KEY_CHANGE_RECORD_V2; + } + + /** @return Record data size. */ + public int dataSize() { + int size = /*Master key name length*/4 + masterKeyName.getBytes().length + /*list size*/4; + + for (T2 entry : grpKeys) + size += /*grpId*/4 + /*grp key size*/4 + /*grp key id size*/1 + entry.get2().key().length; + + return size; + } +} + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/ReencryptionStartRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/ReencryptionStartRecord.java new file mode 100644 index 0000000000000..c8b08d2f2b089 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/ReencryptionStartRecord.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record; + +import java.util.Map; + +/** + * Logical record to restart reencryption with the latest encryption key. + */ +public class ReencryptionStartRecord extends WALRecord { + /** Map of reencrypted cache groups with encryption key identifiers. */ + private final Map grps; + + /** + * @param grps Map of reencrypted cache groups with encryption key identifiers. + */ + public ReencryptionStartRecord(Map grps) { + this.grps = grps; + } + + /** + * @return Map of reencrypted cache groups with encryption key identifiers. + */ + public Map groups() { + return grps; + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.REENCRYPTION_START_RECORD; + } + + /** @return Record data size. */ + public int dataSize() { + return 4 + ((/*grpId*/4 + /*keyId*/1) * grps.size()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java index 8a01467c3eb2b..f07b71a82fc89 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java @@ -234,7 +234,25 @@ public enum RecordType { TRACKING_PAGE_REPAIR_DELTA(61, PHYSICAL), /** Atomic out-of-order update. */ - OUT_OF_ORDER_UPDATE(62, LOGICAL); + OUT_OF_ORDER_UPDATE(62, LOGICAL), + + /** Encrypted WAL-record. */ + ENCRYPTED_RECORD_V2(63, PHYSICAL), + + /** Ecnrypted data record. */ + ENCRYPTED_DATA_RECORD_V2(64, LOGICAL), + + /** Master key change record containing multiple keys for single cache group. */ + MASTER_KEY_CHANGE_RECORD_V2(65, LOGICAL), + + /** Logical record to restart reencryption with the latest encryption key. */ + REENCRYPTION_START_RECORD(66, LOGICAL), + + /** Partition meta page delta record includes encryption status data. */ + PARTITION_META_PAGE_DELTA_RECORD_V3(67, PHYSICAL), + + /** Index meta page delta record includes encryption status data. */ + INDEX_META_PAGE_DELTA_RECORD(68, PHYSICAL); /** Index for serialization. Should be consistent throughout all versions. */ private final int idx; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateIndexDataRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateIndexDataRecord.java new file mode 100644 index 0000000000000..e2f83c0ef3d39 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateIndexDataRecord.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIOV2; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Meta page delta record, includes encryption status data. + */ +public class MetaPageUpdateIndexDataRecord extends PageDeltaRecord { + /** Index of the last reencrypted page. */ + private int encryptPageIdx; + + /** Total pages to be reencrypted. */ + private int encryptPageCnt; + + /** + * @param grpId Cache group ID. + * @param pageId Page ID. + * @param encryptPageIdx Index of the last reencrypted page. + * @param encryptPageCnt Total pages to be reencrypted. + */ + public MetaPageUpdateIndexDataRecord(int grpId, long pageId, int encryptPageIdx, int encryptPageCnt) { + super(grpId, pageId); + + this.encryptPageIdx = encryptPageIdx; + this.encryptPageCnt = encryptPageCnt; + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + if (PageIO.getVersion(pageAddr) < 2) + ((PageMetaIOV2)PageMetaIOV2.VERSIONS.latest()).upgradePage(pageAddr); + + PageMetaIOV2 io = (PageMetaIOV2)PageMetaIOV2.VERSIONS.forPage(pageAddr); + + io.setEncryptedPageIndex(pageAddr, encryptPageIdx); + io.setEncryptedPageCount(pageAddr, encryptPageCnt); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.INDEX_META_PAGE_DELTA_RECORD; + } + + /** + * @param in Input. + */ + public MetaPageUpdateIndexDataRecord(DataInput in) throws IOException { + super(in.readInt(), in.readLong()); + + encryptPageIdx = in.readInt(); + encryptPageCnt = in.readInt(); + } + + /** + * @param buf Buffer. + */ + public void toBytes(ByteBuffer buf) { + buf.putInt(groupId()); + buf.putLong(pageId()); + + buf.putInt(encryptionPagesIndex()); + buf.putInt(encryptionPagesCount()); + } + + /** + * @return Index of the last reencrypted page. + */ + private int encryptionPagesIndex() { + return encryptPageIdx; + } + + /** + * @return Total pages to be reencrypted. + */ + private int encryptionPagesCount() { + return encryptPageCnt; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageUpdateIndexDataRecord.class, this, "partId", + PageIdUtils.partId(pageId()), "super", super.toString()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV3.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV3.java new file mode 100644 index 0000000000000..c7a71570560bf --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV3.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV3; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Partition meta page delta record includes encryption status data. + */ +public class MetaPageUpdatePartitionDataRecordV3 extends MetaPageUpdatePartitionDataRecordV2 { + /** Index of the last reencrypted page. */ + private int encryptedPageIdx; + + /** Total pages to be reencrypted. */ + private int encryptedPageCnt; + + /** + * @param grpId Group id. + * @param pageId Page id. + * @param updateCntr Update counter. + * @param globalRmvId Global remove id. + * @param partSize Partition size. + * @param cntrsPageId Cntrs page id. + * @param state State. + * @param allocatedIdxCandidate Allocated index candidate. + * @param link Link. + * @param encryptedPageIdx Index of the last reencrypted page. + * @param encryptedPageCnt Total pages to be reencrypted. + */ + public MetaPageUpdatePartitionDataRecordV3( + int grpId, + long pageId, + long updateCntr, + long globalRmvId, + int partSize, + long cntrsPageId, + byte state, + int allocatedIdxCandidate, + long link, + int encryptedPageIdx, + int encryptedPageCnt) { + super(grpId, pageId, updateCntr, globalRmvId, partSize, cntrsPageId, state, allocatedIdxCandidate, link); + + this.encryptedPageIdx = encryptedPageIdx; + this.encryptedPageCnt = encryptedPageCnt; + } + + /** + * @param in Input. + */ + public MetaPageUpdatePartitionDataRecordV3(DataInput in) throws IOException { + super(in); + + encryptedPageIdx = in.readInt(); + encryptedPageCnt = in.readInt(); + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + super.applyDelta(pageMem, pageAddr); + + PagePartitionMetaIOV3 io = (PagePartitionMetaIOV3)PagePartitionMetaIO.VERSIONS.forPage(pageAddr); + + io.setEncryptedPageIndex(pageAddr, encryptedPageIdx); + io.setEncryptedPageCount(pageAddr, encryptedPageCnt); + } + + /** + * @return Index of the last reencrypted page. + */ + public int encryptedPageIndex() { + return encryptedPageIdx; + } + + /** + * @return Total pages to be reencrypted. + */ + public int encryptedPageCount() { + return encryptedPageCnt; + } + + /** {@inheritDoc} */ + @Override public void toBytes(ByteBuffer buf) { + super.toBytes(buf); + + buf.putInt(encryptedPageIndex()); + buf.putInt(encryptedPageCount()); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.PARTITION_META_PAGE_DELTA_RECORD_V3; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageUpdatePartitionDataRecordV3.class, this, "partId", + PageIdUtils.partId(pageId()), "super", super.toString()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java index 15a989decd166..75fdd15c85d96 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java @@ -25,10 +25,15 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -39,6 +44,7 @@ import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.metric.impl.AtomicLongMetric; import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; @@ -175,6 +181,16 @@ public void onTopologyInitialized() { mreg.register("TotalAllocatedSize", this::getTotalAllocatedSize, "Total size of memory allocated for group, in bytes."); + + if (ctx.config().isEncryptionEnabled()) { + mreg.register("ReencryptionFinished", + () -> !ctx.shared().kernalContext().encryption().reencryptionInProgress(ctx.groupId()), + "The flag indicates whether reencryption is finished or not."); + + mreg.register("ReencryptionPagesLeft", + this::getPagesLeftForReencryption, + "Number of pages left for reencryption."); + } } /** */ @@ -486,6 +502,40 @@ public long getSparseStorageSize() { return sparseStorageSize == null ? 0 : sparseStorageSize.value(); } + /** */ + public long getPagesLeftForReencryption() { + if (!ctx.shared().kernalContext().encryption().reencryptionInProgress(ctx.groupId())) + return 0; + + long pagesLeft = 0; + + FilePageStoreManager mgr = (FilePageStoreManager)ctx.shared().pageStore(); + + GridEncryptionManager encMgr = ctx.shared().kernalContext().encryption(); + + try { + for (int p = 0; p < ctx.affinity().partitions(); p++) { + PageStore pageStore = mgr.getStore(ctx.groupId(), p); + + if (!pageStore.exists()) + continue; + + long state = encMgr.getEncryptionState(ctx.groupId(), p); + + pagesLeft += ReencryptStateUtils.pageCount(state) - ReencryptStateUtils.pageIndex(state); + } + + long state = encMgr.getEncryptionState(ctx.groupId(), PageIdAllocator.INDEX_PARTITION); + + pagesLeft += ReencryptStateUtils.pageCount(state) - ReencryptStateUtils.pageIndex(state); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + + return pagesLeft; + } + /** Removes all metric for cache group. */ public void remove() { if (ctx.shared().kernalContext().isStopping()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index cb0daedbc29f9..e260983fa7186 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -2021,9 +2021,13 @@ public static boolean isPersistenceEnabled(DataStorageConfiguration cfg) { * @return Page size without encryption overhead. */ public static int encryptedPageSize(int pageSize, EncryptionSpi encSpi) { + // If encryption is enabled, a space of one encryption block is reserved to store CRC and encryption key ID. + // If encryption is disabled, NoopEncryptionSPI with a zero encryption block size is used. + assert encSpi.blockSize() >= /* CRC */ 4 + /* Key ID */ 1 || encSpi.blockSize() == 0; + return pageSize - (encSpi.encryptedSizeNoPadding(pageSize) - pageSize) - - encSpi.blockSize(); /* For CRC. */ + - encSpi.blockSize(); /* For CRC and encryption key ID. */ } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java index a41790f8bd44a..8adb6b30e53a4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java @@ -41,6 +41,7 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIOV2; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseListImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -124,7 +125,7 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { try { if (PageIO.getType(pageAddr) != PageIO.T_META) { // Initialize new page. - PageMetaIO io = PageMetaIO.VERSIONS.latest(); + PageMetaIO io = PageMetaIOV2.VERSIONS.latest(); io.initNewPage(pageAddr, metaId, pageMemory.pageSize()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 50224f38629b5..0789a0cd9f9f6 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -85,12 +85,13 @@ import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; import org.apache.ignite.internal.pagemem.wal.record.DataEntry; import org.apache.ignite.internal.pagemem.wal.record.DataRecord; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord; import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord; import org.apache.ignite.internal.pagemem.wal.record.MvccDataEntry; import org.apache.ignite.internal.pagemem.wal.record.MvccTxRecord; import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; import org.apache.ignite.internal.pagemem.wal.record.RollbackRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.WalRecordCacheGroupAware; @@ -168,6 +169,7 @@ import static org.apache.ignite.internal.pagemem.PageIdUtils.partId; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.CHECKPOINT_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.METASTORE_DATA_RECORD; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.fromOrdinal; @@ -1255,6 +1257,9 @@ private String cacheInfo(GridCacheContext cacheCtx) { grpIds.add(tup.get1().groupId()); + if (gctx.config().isEncryptionEnabled()) + cctx.kernalContext().encryption().onCacheGroupStop(gctx.groupId()); + pageMem.onCacheGroupDestroyed(tup.get1().groupId()); if (tup.get2()) @@ -2347,6 +2352,7 @@ private RestoreLogicalState applyLogicalUpdates( case MVCC_DATA_RECORD: case DATA_RECORD: case ENCRYPTED_DATA_RECORD: + case ENCRYPTED_DATA_RECORD_V2: DataRecord dataRec = (DataRecord)rec; for (DataEntry dataEntry : dataRec.writeEntries()) { @@ -2427,8 +2433,13 @@ private RestoreLogicalState applyLogicalUpdates( break; - case MASTER_KEY_CHANGE_RECORD: - cctx.kernalContext().encryption().applyKeys((MasterKeyChangeRecord)rec); + case MASTER_KEY_CHANGE_RECORD_V2: + cctx.kernalContext().encryption().applyKeys((MasterKeyChangeRecordV2)rec); + + break; + + case REENCRYPTION_START_RECORD: + cctx.kernalContext().encryption().applyReencryptionStartRecord((ReencryptionStartRecord)rec); break; @@ -3125,7 +3136,8 @@ private IgnitePredicate groupsWithEnabledWal() { * @return WAL records predicate that passes only Metastorage and encryption data records. */ private IgniteBiPredicate onlyMetastorageAndEncryptionRecords() { - return (type, ptr) -> type == METASTORE_DATA_RECORD || type == MASTER_KEY_CHANGE_RECORD; + return (type, ptr) -> type == METASTORE_DATA_RECORD || + type == MASTER_KEY_CHANGE_RECORD || type == MASTER_KEY_CHANGE_RECORD_V2; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 1fba65daf224a..fa4fabe05d4c3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -40,6 +40,8 @@ import org.apache.ignite.SystemProperty; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; @@ -55,7 +57,8 @@ import org.apache.ignite.internal.pagemem.wal.record.RollbackRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; -import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateIndexDataRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV3; import org.apache.ignite.internal.pagemem.wal.record.delta.PartitionDestroyRecord; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheDiagnosticManager; @@ -90,9 +93,10 @@ import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorageImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIOV2; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionCountersIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; -import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV2; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV3; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseListImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -362,6 +366,9 @@ private void syncMetadata(Context ctx, Executor execSvc, boolean needSnapshot) t } }); } + + if (grp.config().isEncryptionEnabled()) + saveIndexReencryptionStatus(grp.groupId()); } /** @@ -389,8 +396,10 @@ private void saveStoreMetadata( PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); IgniteWriteAheadLogManager wal = this.ctx.wal(); + GridEncryptionManager encMgr = this.ctx.kernalContext().encryption(); - if (size > 0 || updCntr > 0 || !store.partUpdateCounter().sequential()) { + if (size > 0 || updCntr > 0 || !store.partUpdateCounter().sequential() || + (grp.config().isEncryptionEnabled() && encMgr.getEncryptionState(grp.groupId(), store.partId()) > 0)) { GridDhtPartitionState state = null; // localPartition will not acquire writeLock here because create=false. @@ -429,7 +438,7 @@ private void saveStoreMetadata( boolean changed = false; try { - PagePartitionMetaIOV2 io = PageIO.getPageIO(partMetaPageAddr); + PagePartitionMetaIOV3 io = PageIO.getPageIO(partMetaPageAddr); long link = io.getGapsLink(partMetaPageAddr); @@ -474,6 +483,27 @@ else if (updCntrsBytes != null && link != 0) { changed |= io.setGlobalRemoveId(partMetaPageAddr, rmvId); changed |= io.setSize(partMetaPageAddr, size); + int encryptIdx = 0; + int encryptCnt = 0; + + if (grp.config().isEncryptionEnabled()) { + long reencryptState = encMgr.getEncryptionState(grpId, store.partId()); + + if (reencryptState != 0) { + encryptIdx = ReencryptStateUtils.pageIndex(reencryptState); + encryptCnt = ReencryptStateUtils.pageCount(reencryptState); + + if (encryptIdx == encryptCnt) { + encMgr.setEncryptionState(grp, store.partId(), 0, 0); + + encryptIdx = encryptCnt = 0; + } + + changed |= io.setEncryptedPageIndex(partMetaPageAddr, encryptIdx); + changed |= io.setEncryptedPageCount(partMetaPageAddr, encryptCnt); + } + } + if (state != null) changed |= io.setPartitionState(partMetaPageAddr, (byte)state.ordinal()); else @@ -541,7 +571,7 @@ else if (state == MOVING || state == RENTING) { pageCnt = io.getCandidatePageCount(partMetaPageAddr); if (changed && PageHandler.isWalDeltaRecordNeeded(pageMem, grpId, partMetaId, partMetaPage, wal, null)) - wal.log(new MetaPageUpdatePartitionDataRecordV2( + wal.log(new MetaPageUpdatePartitionDataRecordV3( grpId, partMetaId, updCntr, @@ -550,7 +580,9 @@ else if (state == MOVING || state == RENTING) { cntrsPageId, state == null ? -1 : (byte)state.ordinal(), pageCnt, - link + link, + encryptIdx, + encryptCnt )); } finally { @@ -949,6 +981,9 @@ private static boolean addPartition( public void destroyPartitionStore(int grpId, int partId) throws IgniteCheckedException { PageMemoryEx pageMemory = (PageMemoryEx)grp.dataRegion().pageMemory(); + if (grp.config().isEncryptionEnabled()) + ctx.kernalContext().encryption().onDestroyPartitionStore(grp, partId); + int tag = pageMemory.invalidate(grp.groupId(), partId); if (grp.walEnabled()) @@ -1019,13 +1054,13 @@ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { try { final long pageAddr = pageMem.writeLock(grpId, metaId, metaPage); - boolean allocated = false; + boolean markDirty = false; try { long metastoreRoot, reuseListRoot; if (PageIO.getType(pageAddr) != PageIO.T_META) { - PageMetaIO pageIO = PageMetaIO.VERSIONS.latest(); + PageMetaIOV2 pageIO = (PageMetaIOV2)PageMetaIOV2.VERSIONS.latest(); pageIO.initNewPage(pageAddr, metaId, pageMem.realPageSize(grpId)); @@ -1048,25 +1083,38 @@ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { )); } - allocated = true; + markDirty = true; } else { - PageMetaIO pageIO = PageIO.getPageIO(pageAddr); + if (PageMetaIO.getVersion(pageAddr) < 2) { + ((PageMetaIOV2)PageMetaIOV2.VERSIONS.latest()).upgradePage(pageAddr); + + markDirty = true; + } + + PageMetaIOV2 pageIO = PageIO.getPageIO(pageAddr); metastoreRoot = pageIO.getTreeRoot(pageAddr); reuseListRoot = pageIO.getReuseListRoot(pageAddr); + int encrPageCnt = pageIO.getEncryptedPageCount(pageAddr); + + if (encrPageCnt > 0) { + ctx.kernalContext().encryption().setEncryptionState(grp, PageIdAllocator.INDEX_PARTITION, + pageIO.getEncryptedPageIndex(pageAddr), encrPageCnt); + } + assert reuseListRoot != 0L; } return new Metas( - new RootPage(new FullPageId(metastoreRoot, grpId), allocated), - new RootPage(new FullPageId(reuseListRoot, grpId), allocated), + new RootPage(new FullPageId(metastoreRoot, grpId), markDirty), + new RootPage(new FullPageId(reuseListRoot, grpId), markDirty), null, null); } finally { - pageMem.writeUnlock(grpId, metaId, metaPage, null, allocated); + pageMem.writeUnlock(grpId, metaId, metaPage, null, markDirty); } } finally { @@ -1255,6 +1303,55 @@ public void findAndCleanupLostIndexesForStoppedCache(int cacheId) throws IgniteC } } + /** + * @param grpId Cache group ID. + * @throws IgniteCheckedException If failed. + */ + private void saveIndexReencryptionStatus(int grpId) throws IgniteCheckedException { + long state = ctx.kernalContext().encryption().getEncryptionState(grpId, PageIdAllocator.INDEX_PARTITION); + + if (state == 0) + return; + + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); + + long metaPageId = pageMem.metaPageId(grpId); + long metaPage = pageMem.acquirePage(grpId, metaPageId); + + try { + boolean changed = false; + + long metaPageAddr = pageMem.writeLock(grpId, metaPageId, metaPage); + + try { + PageMetaIOV2 metaIo = PageMetaIO.getPageIO(metaPageAddr); + + int encryptIdx = ReencryptStateUtils.pageIndex(state); + int encryptCnt = ReencryptStateUtils.pageCount(state); + + if (encryptIdx == encryptCnt) { + ctx.kernalContext().encryption().setEncryptionState(grp, PageIdAllocator.INDEX_PARTITION, 0, 0); + + encryptIdx = encryptCnt = 0; + } + + changed |= metaIo.setEncryptedPageIndex(metaPageAddr, encryptIdx); + changed |= metaIo.setEncryptedPageCount(metaPageAddr, encryptCnt); + + IgniteWriteAheadLogManager wal = ctx.cache().context().wal(); + + if (changed && PageHandler.isWalDeltaRecordNeeded(pageMem, grpId, metaPageId, metaPage, wal, null)) + wal.log(new MetaPageUpdateIndexDataRecord(grpId, metaPageId, encryptIdx, encryptCnt)); + } + finally { + pageMem.writeUnlock(grpId, metaPageId, metaPage, null, changed); + } + } + finally { + pageMem.releasePage(grpId, metaPageId, metaPage); + } + } + /** * */ @@ -1955,7 +2052,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException try { if (PageIO.getType(pageAddr) != 0) { - PagePartitionMetaIOV2 io = (PagePartitionMetaIOV2)PagePartitionMetaIO.VERSIONS.latest(); + PagePartitionMetaIOV3 io = (PagePartitionMetaIOV3)PagePartitionMetaIO.VERSIONS.latest(); Map cacheSizes = null; @@ -1968,6 +2065,13 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException delegate0.restoreState(io.getSize(pageAddr), io.getUpdateCounter(pageAddr), cacheSizes, data); + int encrPageCnt = io.getEncryptedPageCount(pageAddr); + + if (encrPageCnt > 0) { + ctx.kernalContext().encryption().setEncryptionState( + grp, partId, io.getEncryptedPageIndex(pageAddr), encrPageCnt); + } + globalRemoveId().setIfGreater(io.getGlobalRemoveId(pageAddr)); } } @@ -2035,7 +2139,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { // Initialize new page. if (PageIO.getType(pageAddr) != PageIO.T_PART_META) { - PagePartitionMetaIOV2 io = (PagePartitionMetaIOV2)PagePartitionMetaIO.VERSIONS.latest(); + PagePartitionMetaIOV3 io = (PagePartitionMetaIOV3)PagePartitionMetaIO.VERSIONS.latest(); io.initNewPage(pageAddr, partMetaId, pageMem.realPageSize(grpId)); @@ -2069,8 +2173,8 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { int pageVer = PagePartitionMetaIO.getVersion(pageAddr); - if (pageVer < 2) { - assert pageVer == 1; + if (pageVer < 3) { + assert pageVer == 1 || pageVer == 2; if (log.isDebugEnabled()) log.info("Upgrade partition meta page version: [part=" + partId + @@ -2080,7 +2184,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { io = PagePartitionMetaIO.VERSIONS.latest(); - ((PagePartitionMetaIOV2)io).upgradePage(pageAddr); + ((PagePartitionMetaIOV3)io).upgradePage(pageAddr); pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java index 60f5017f2f6fa..88b619ebc0d93 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/EncryptedFileIO.java @@ -18,10 +18,10 @@ package org.apache.ignite.internal.processors.cache.persistence.file; import java.io.IOException; -import java.io.Serializable; import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -63,11 +63,6 @@ public class EncryptedFileIO implements FileIO { */ private final EncryptionSpi encSpi; - /** - * Encryption key. - */ - private Serializable encKey; - /** * Extra bytes added by encryption. */ @@ -242,12 +237,16 @@ private void encrypt(ByteBuffer srcBuf, ByteBuffer res) throws IOException { srcBuf.limit(srcBuf.position() + plainDataSize()); - encSpi.encryptNoPadding(srcBuf, key(), res); + GroupKey grpKey = encMgr.groupKey(groupId); + + encSpi.encryptNoPadding(srcBuf, grpKey.key(), res); res.rewind(); storeCRC(res); + res.put(grpKey.id()); + srcBuf.limit(srcLimit); srcBuf.position(srcBuf.position() + encryptionOverhead); } @@ -260,11 +259,31 @@ private void decrypt(ByteBuffer encrypted, ByteBuffer destBuf) throws IOExceptio assert encrypted.remaining() >= pageSize; assert encrypted.limit() >= pageSize; - checkCRC(encrypted); + int crc = FastCrc.calcCrc(encrypted, encryptedDataSize()); + + int storedCrc = 0; + + storedCrc |= (int)encrypted.get() << 24; + storedCrc |= ((int)encrypted.get() & 0xff) << 16; + storedCrc |= ((int)encrypted.get() & 0xff) << 8; + storedCrc |= encrypted.get() & 0xff; + + if (crc != storedCrc) { + throw new IOException("Content of encrypted page is broken. [StoredCrc=" + storedCrc + + ", calculatedCrc=" + crc + "]"); + } + + int keyId = encrypted.get() & 0xff; + + encrypted.position(encrypted.position() - (encryptedDataSize() + 4 /* CRC size. */ + 1 /* key identifier. */)); encrypted.limit(encryptedDataSize()); - encSpi.decryptNoPadding(encrypted, key(), destBuf); + GroupKey grpKey = encMgr.groupKey(groupId, keyId); + + assert grpKey != null : keyId; + + encSpi.decryptNoPadding(encrypted, grpKey.key(), destBuf); destBuf.put(zeroes); //Forcibly purge page buffer tail. } @@ -283,29 +302,6 @@ private void storeCRC(ByteBuffer res) { res.put((byte) crc); } - /** - * Checks encrypted data integrity. - * - * @param encrypted Encrypted data buffer. - */ - private void checkCRC(ByteBuffer encrypted) throws IOException { - int crc = FastCrc.calcCrc(encrypted, encryptedDataSize()); - - int storedCrc = 0; - - storedCrc |= (int)encrypted.get() << 24; - storedCrc |= ((int)encrypted.get() & 0xff) << 16; - storedCrc |= ((int)encrypted.get() & 0xff) << 8; - storedCrc |= encrypted.get() & 0xff; - - if (crc != storedCrc) { - throw new IOException("Content of encrypted page is broken. [StoredCrc=" + storedCrc + - ", calculatedCrd=" + crc + "]"); - } - - encrypted.position(encrypted.position() - (encryptedDataSize() + 4 /* CRC size. */)); - } - /** * @return Encrypted data size. */ @@ -334,16 +330,6 @@ private boolean tailIsEmpty(ByteBuffer src, int pageType) { return true; } - /** - * @return Encryption key. - */ - private Serializable key() { - if (encKey == null) - return encKey = encMgr.groupKey(groupId); - - return encKey; - } - /** {@inheritDoc} */ @Override public int write(byte[] buf, int off, int len) throws IOException { throw new UnsupportedOperationException("Encrypted File doesn't support this operation"); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java index b25bae4ec8e93..84735c2bef6c7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIO.java @@ -56,7 +56,8 @@ public class PageMetaIO extends PageIO { /** */ public static final IOVersions VERSIONS = new IOVersions<>( - new PageMetaIO(1) + new PageMetaIO(1), + new PageMetaIOV2(2) ); /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIOV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIOV2.java new file mode 100644 index 0000000000000..f9f956d45082f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageMetaIOV2.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.io; + +import org.apache.ignite.internal.pagemem.PageUtils; + +/** + * IO for index partition metadata page. + */ +public class PageMetaIOV2 extends PageMetaIO { + /** Total pages for reencryption offset. */ + private static final int ENCRYPT_PAGE_IDX_OFF = END_OF_PAGE_META; + + /** Last reencrypted page index offset. */ + private static final int ENCRYPT_PAGE_MAX_OFF = ENCRYPT_PAGE_IDX_OFF + 4; + + /** + * @param ver Version. + */ + public PageMetaIOV2(int ver) { + super(ver); + } + + /** + * @param pageAddr Page address. + * @return Index of the last reencrypted page. + */ + public int getEncryptedPageIndex(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_IDX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pageIdx Index of the last reencrypted page. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageIndex(long pageAddr, int pageIdx) { + if (getEncryptedPageIndex(pageAddr) == pageIdx) + return false; + + PageUtils.putLong(pageAddr, ENCRYPT_PAGE_IDX_OFF, pageIdx); + + return true; + } + + /** + * @param pageAddr Page address. + * @return Total pages to be reencrypted. + */ + public int getEncryptedPageCount(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_MAX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pagesCnt Total pages to be reencrypted. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageCount(long pageAddr, int pagesCnt) { + if (getEncryptedPageCount(pageAddr) == pagesCnt) + return false; + + PageUtils.putInt(pageAddr, ENCRYPT_PAGE_MAX_OFF, pagesCnt); + + return true; + } + + /** {@inheritDoc} */ + @Override public void initNewPage(long pageAddr, long pageId, int pageSize) { + super.initNewPage(pageAddr, pageId, pageSize); + + setEncryptedPageCount(pageAddr, 0); + setEncryptedPageIndex(pageAddr, 0); + } + + /** + * Upgrade page to PageMetaIOV2. + * + * @param pageAddr Page address. + */ + public void upgradePage(long pageAddr) { + assert PageIO.getType(pageAddr) == getType(); + + PageIO.setVersion(pageAddr, getVersion()); + + setEncryptedPageIndex(pageAddr, 0); + setEncryptedPageCount(pageAddr, 0); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java index b58b5c0ab1a05..bb0634011e5ac 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIO.java @@ -48,7 +48,8 @@ public class PagePartitionMetaIO extends PageMetaIO { /** */ public static final IOVersions VERSIONS = new IOVersions<>( new PagePartitionMetaIO(1), - new PagePartitionMetaIOV2(2) + new PagePartitionMetaIOV2(2), + new PagePartitionMetaIOV3(3) ); /** {@inheritDoc} */ @@ -242,13 +243,24 @@ public boolean setGapsLink(long pageAddr, long link) { @Override protected void printPage(long pageAddr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException { super.printPage(pageAddr, pageSize, sb); + sb.a(",\nPagePartitionMeta[\n"); + + printFields(pageAddr, sb); + + sb.a("\n]"); + } + + /** + * @param pageAddr Address. + * @param sb String builder. + */ + protected void printFields(long pageAddr, GridStringBuilder sb) { byte state = getPartitionState(pageAddr); - sb.a(",\nPagePartitionMeta[\n\tsize=").a(getSize(pageAddr)) + sb.a("\tsize=").a(getSize(pageAddr)) .a(",\n\tupdateCounter=").a(getUpdateCounter(pageAddr)) .a(",\n\tglobalRemoveId=").a(getGlobalRemoveId(pageAddr)) .a(",\n\tpartitionState=").a(state).a("(").a(GridDhtPartitionState.fromOrdinal(state)).a(")") - .a(",\n\tcountersPageId=").a(getCountersPageId(pageAddr)) - .a("\n]"); + .a(",\n\tcountersPageId=").a(getCountersPageId(pageAddr)).toString(); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java index efdfecd699ff1..6c30775409724 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV2.java @@ -18,9 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.tree.io; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageUtils; -import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.util.GridStringBuilder; /** @@ -37,6 +35,9 @@ public class PagePartitionMetaIOV2 extends PagePartitionMetaIO { /** */ private static final int GAPS_LINK = PART_META_REUSE_LIST_ROOT_OFF + 8; + /** */ + public static final int END_OF_PARTITION_PAGE_META_V2 = GAPS_LINK + 8; + /** * @param ver Version. */ @@ -102,10 +103,10 @@ public boolean setGapsLink(long pageAddr, long link) { } /** {@inheritDoc} */ - @Override protected void printPage(long pageAddr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException { - byte state = getPartitionState(pageAddr); + @Override protected void printFields(long pageAddr, GridStringBuilder sb) { + super.printFields(pageAddr, sb); - sb.a("PagePartitionMeta[\n\ttreeRoot=").a(getReuseListRoot(pageAddr)); + sb.a("\ttreeRoot=").a(getReuseListRoot(pageAddr)); sb.a(",\n\tpendingTreeRoot=").a(getLastSuccessfulFullSnapshotId(pageAddr)); sb.a(",\n\tlastSuccessfulFullSnapshotId=").a(getLastSuccessfulFullSnapshotId(pageAddr)); sb.a(",\n\tlastSuccessfulSnapshotId=").a(getLastSuccessfulSnapshotId(pageAddr)); @@ -113,13 +114,7 @@ public boolean setGapsLink(long pageAddr, long link) { sb.a(",\n\tlastSuccessfulSnapshotTag=").a(getLastSuccessfulSnapshotTag(pageAddr)); sb.a(",\n\tlastAllocatedPageCount=").a(getLastAllocatedPageCount(pageAddr)); sb.a(",\n\tcandidatePageCount=").a(getCandidatePageCount(pageAddr)); - sb.a(",\n\tsize=").a(getSize(pageAddr)); - sb.a(",\n\tupdateCounter=").a(getUpdateCounter(pageAddr)); - sb.a(",\n\tglobalRemoveId=").a(getGlobalRemoveId(pageAddr)); - sb.a(",\n\tpartitionState=").a(state).a("(").a(GridDhtPartitionState.fromOrdinal(state)).a(")"); - sb.a(",\n\tcountersPageId=").a(getCountersPageId(pageAddr)); sb.a(",\n\tcntrUpdDataPageId=").a(getGapsLink(pageAddr)); - sb.a("\n]"); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV3.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV3.java new file mode 100644 index 0000000000000..1c81d2ec84d80 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PagePartitionMetaIOV3.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.io; + +import org.apache.ignite.internal.pagemem.PageUtils; +import org.apache.ignite.internal.util.GridStringBuilder; + +/** + * IO for partition metadata pages. + */ +public class PagePartitionMetaIOV3 extends PagePartitionMetaIOV2 { + /** Last reencrypted page index offset. */ + private static final int ENCRYPT_PAGE_IDX_OFF = END_OF_PARTITION_PAGE_META_V2; + + /** Total pages to be reencrypted offset. */ + private static final int ENCRYPT_PAGE_MAX_OFF = ENCRYPT_PAGE_IDX_OFF + 4; + + /** + * @param ver Version. + */ + public PagePartitionMetaIOV3(int ver) { + super(ver); + } + + /** {@inheritDoc} */ + @Override public void initNewPage(long pageAddr, long pageId, int pageSize) { + super.initNewPage(pageAddr, pageId, pageSize); + + setEncryptedPageIndex(pageAddr, 0); + setEncryptedPageCount(pageAddr, 0); + } + + /** + * @param pageAddr Page address. + * @return Index of the last reencrypted page. + */ + public int getEncryptedPageIndex(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_IDX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pageIdx Index of the last reencrypted page. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageIndex(long pageAddr, int pageIdx) { + if (getEncryptedPageIndex(pageAddr) == pageIdx) + return false; + + PageUtils.putLong(pageAddr, ENCRYPT_PAGE_IDX_OFF, pageIdx); + + return true; + } + + /** + * @param pageAddr Page address. + * @return Total pages to be reencrypted. + */ + public int getEncryptedPageCount(long pageAddr) { + return PageUtils.getInt(pageAddr, ENCRYPT_PAGE_MAX_OFF); + } + + /** + * @param pageAddr Page address. + * @param pagesCnt Total pages to be reencrypted. + * + * @return {@code true} if value has changed as a result of this method's invocation. + */ + public boolean setEncryptedPageCount(long pageAddr, int pagesCnt) { + if (getEncryptedPageCount(pageAddr) == pagesCnt) + return false; + + PageUtils.putInt(pageAddr, ENCRYPT_PAGE_MAX_OFF, pagesCnt); + + return true; + } + + /** {@inheritDoc} */ + @Override protected void printFields(long pageAddr, GridStringBuilder sb) { + super.printFields(pageAddr, sb); + + sb.a(",\n\tencryptedPageIndex=").a(getEncryptedPageIndex(pageAddr)); + sb.a(",\n\tencryptedPageCount=").a(getEncryptedPageCount(pageAddr)); + } + + /** + * Upgrade page to PagePartitionMetaIOV3. + * + * @param pageAddr Page address. + */ + @Override public void upgradePage(long pageAddr) { + assert PageIO.getType(pageAddr) == getType(); + + int ver = PageIO.getVersion(pageAddr); + + assert ver < getVersion(); + + if (ver < 2) + super.upgradePage(pageAddr); + + PageIO.setVersion(pageAddr, getVersion()); + + setEncryptedPageIndex(pageAddr, 0); + setEncryptedPageCount(pageAddr, 0); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index b79d637c115f6..70da8e9a95a3d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -1062,6 +1062,8 @@ private boolean hasIndex(long absIdx) { // Bump up the oldest archive segment index. if (segmentAware.lastTruncatedArchiveIdx() < desc.idx) segmentAware.lastTruncatedArchiveIdx(desc.idx); + + cctx.kernalContext().encryption().onWalSegmentRemoved(desc.idx); } } @@ -1087,6 +1089,11 @@ private boolean segmentReservedOrLocked(long absIdx) { segmentAware.keepUncompressedIdxFrom(ptr.index()); } + /** {@inheritDoc} */ + @Override public long currentSegment() { + return segmentAware.curAbsWalIdx(); + } + /** {@inheritDoc} */ @Override public int walArchiveSegments() { long lastTruncated = segmentAware.lastTruncatedArchiveIdx(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java index 46fbd7780ba41..2da912824a1a3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java @@ -20,7 +20,6 @@ import java.io.DataInput; import java.io.EOFException; import java.io.IOException; -import java.io.Serializable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; @@ -32,6 +31,8 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.GroupKey; +import org.apache.ignite.internal.managers.encryption.GroupKeyEncrypted; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.wal.record.CacheState; import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; @@ -39,10 +40,11 @@ import org.apache.ignite.internal.pagemem.wal.record.DataRecord; import org.apache.ignite.internal.pagemem.wal.record.EncryptedRecord; import org.apache.ignite.internal.pagemem.wal.record.LazyDataEntry; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord; import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord; import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; import org.apache.ignite.internal.pagemem.wal.record.TxRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType; @@ -68,12 +70,14 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineFlagsCreatedVersionRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateIndexDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastAllocatedIndex; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulFullSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateNextSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV3; import org.apache.ignite.internal.pagemem.wal.record.delta.NewRootInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PageListMetaResetCountRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListAddPageRecord; @@ -117,8 +121,10 @@ import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_RECORD; -import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD_V2; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.REC_TYPE_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.putRecordType; @@ -190,7 +196,7 @@ public RecordDataV1Serializer(GridCacheSharedContext cctx) { int clSz = plainSize(record); if (needEncryption(record)) - return encSpi.encryptedSize(clSz) + 4 /* groupId */ + 4 /* data size */ + REC_TYPE_SIZE; + return encSpi.encryptedSize(clSz) + 4 /* groupId */ + 4 /* data size */ + 1 /* key ID */ + REC_TYPE_SIZE; return clSz; } @@ -198,7 +204,7 @@ public RecordDataV1Serializer(GridCacheSharedContext cctx) { /** {@inheritDoc} */ @Override public WALRecord readRecord(RecordType type, ByteBufferBackedDataInput in, int size) throws IOException, IgniteCheckedException { - if (type == ENCRYPTED_RECORD) { + if (type == ENCRYPTED_RECORD || type == ENCRYPTED_RECORD_V2) { if (encSpi == null) { T2 knownData = skipEncryptedRecord(in, true); @@ -206,7 +212,8 @@ public RecordDataV1Serializer(GridCacheSharedContext cctx) { return new EncryptedRecord(knownData.get1(), knownData.get2()); } - T3 clData = readEncryptedData(in, true); + T3 clData = + readEncryptedData(in, true, type == ENCRYPTED_RECORD_V2); //This happen during startup. On first WAL iteration we restore only metastore. //So, no encryption keys available. See GridCacheDatabaseSharedManager#readMetastore @@ -270,30 +277,36 @@ private boolean needEncryption(int grpId) { * * @param in Input stream. * @param readType If {@code true} plain record type will be read from {@code in}. + * @param readKeyId If {@code true} encryption key identifier will be read from {@code in}. * @return Plain data stream, group id, plain record type, * @throws IOException If failed. * @throws IgniteCheckedException If failed. */ - private T3 readEncryptedData(ByteBufferBackedDataInput in, - boolean readType) - throws IOException, IgniteCheckedException { + private T3 readEncryptedData( + ByteBufferBackedDataInput in, + boolean readType, + boolean readKeyId + ) throws IOException, IgniteCheckedException { int grpId = in.readInt(); int encRecSz = in.readInt(); + RecordType plainRecType = null; if (readType) plainRecType = RecordV1Serializer.readRecordType(in); + int keyId = readKeyId ? in.readUnsignedByte() : GridEncryptionManager.INITIAL_KEY_ID; + byte[] encData = new byte[encRecSz]; in.readFully(encData); - Serializable key = encMgr.groupKey(grpId); + GroupKey grpKey = encMgr.groupKey(grpId, keyId); - if (key == null) + if (grpKey == null) return new T3<>(null, grpId, plainRecType); - byte[] clData = encSpi.decrypt(encData, key); + byte[] clData = encSpi.decrypt(encData, grpKey.key()); return new T3<>(new ByteBufferBackedDataInputImpl().buffer(ByteBuffer.wrap(clData)), grpId, plainRecType); } @@ -339,11 +352,11 @@ private void writeEncryptedData(int grpId, @Nullable RecordType plainRecType, By if (plainRecType != null) putRecordType(dst, plainRecType); - Serializable key = encMgr.groupKey(grpId); + GroupKey grpKey = encMgr.groupKey(grpId); - assert key != null; + dst.put(grpKey.id()); - encSpi.encrypt(clData, key, dst); + encSpi.encrypt(clData, grpKey.key(), dst); } /** @@ -372,6 +385,9 @@ assert record instanceof PageSnapshot; case META_PAGE_INIT: return /*cache ID*/4 + /*page ID*/8 + /*ioType*/2 + /*ioVer*/2 + /*tree root*/8 + /*reuse root*/8; + case INDEX_META_PAGE_DELTA_RECORD: + return /*cache ID*/4 + /*page ID*/8 + /*encrypt page index*/ 4 + /*encrypt pages count*/4; + case PARTITION_META_PAGE_UPDATE_COUNTERS: return /*cache ID*/4 + /*page ID*/8 + /*upd cntr*/8 + /*rmv id*/8 + /*part size*/4 + /*counters page id*/8 + /*state*/ 1 + /*allocatedIdxCandidate*/ 4; @@ -380,6 +396,10 @@ assert record instanceof PageSnapshot; return /*cache ID*/4 + /*page ID*/8 + /*upd cntr*/8 + /*rmv id*/8 + /*part size*/4 + /*counters page id*/8 + /*state*/ 1 + /*allocatedIdxCandidate*/ 4 + /*link*/ 8; + case PARTITION_META_PAGE_DELTA_RECORD_V3: + return /*cache ID*/4 + /*page ID*/8 + /*upd cntr*/8 + /*rmv id*/8 + /*part size*/4 + /*counters page id*/8 + /*state*/ 1 + + /*allocatedIdxCandidate*/ 4 + /*link*/ 8 + /*encrypt page index*/ 4 + /*encrypt pages count*/4; + case MEMORY_RECOVERY: return 8; @@ -536,10 +556,12 @@ assert record instanceof PageSnapshot; case TX_RECORD: return txRecordSerializer.size((TxRecord)record); - case MASTER_KEY_CHANGE_RECORD: - MasterKeyChangeRecord rec = (MasterKeyChangeRecord)record; + case MASTER_KEY_CHANGE_RECORD_V2: + return ((MasterKeyChangeRecordV2)record).dataSize(); + + case REENCRYPTION_START_RECORD: + return ((ReencryptionStartRecord)record).dataSize(); - return rec.dataSize(); default: throw new UnsupportedOperationException("Type: " + record.type()); } @@ -609,6 +631,11 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; + case INDEX_META_PAGE_DELTA_RECORD: + res = new MetaPageUpdateIndexDataRecord(in); + + break; + case PARTITION_META_PAGE_UPDATE_COUNTERS: res = new MetaPageUpdatePartitionDataRecord(in); @@ -619,6 +646,11 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; + case PARTITION_META_PAGE_DELTA_RECORD_V3: + res = new MetaPageUpdatePartitionDataRecordV3(in); + + break; + case MEMORY_RECOVERY: long ts = in.readLong(); @@ -647,12 +679,13 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; case ENCRYPTED_DATA_RECORD: + case ENCRYPTED_DATA_RECORD_V2: entryCnt = in.readInt(); entries = new ArrayList<>(entryCnt); for (int i = 0; i < entryCnt; i++) - entries.add(readEncryptedDataEntry(in)); + entries.add(readEncryptedDataEntry(in, type == ENCRYPTED_DATA_RECORD_V2)); res = new DataRecord(entries, 0L); @@ -1184,6 +1217,7 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, break; case MASTER_KEY_CHANGE_RECORD: + case MASTER_KEY_CHANGE_RECORD_V2: int keyNameLen = in.readInt(); byte[] keyNameBytes = new byte[keyNameLen]; @@ -1194,21 +1228,39 @@ WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, int keysCnt = in.readInt(); - HashMap grpKeys = new HashMap<>(keysCnt); + List> grpKeys = new ArrayList<>(keysCnt); + + boolean readKeyId = type == MASTER_KEY_CHANGE_RECORD_V2; for (int i = 0; i < keysCnt; i++) { int grpId = in.readInt(); + int keyId = readKeyId ? in.readByte() & 0xff : 0; int grpKeySize = in.readInt(); - byte[] grpKey = new byte[grpKeySize]; in.readFully(grpKey); - grpKeys.put(grpId, grpKey); + grpKeys.add(new T2<>(grpId, new GroupKeyEncrypted(keyId, grpKey))); } - res = new MasterKeyChangeRecord(masterKeyName, grpKeys); + res = new MasterKeyChangeRecordV2(masterKeyName, grpKeys); + + break; + + case REENCRYPTION_START_RECORD: + int grpsCnt = in.readInt(); + + Map map = U.newHashMap(grpsCnt); + + for (int i = 0; i < grpsCnt; i++) { + int grpId = in.readInt(); + byte keyId = in.readByte(); + + map.put(grpId, keyId); + } + + res = new ReencryptionStartRecord(map); break; @@ -1265,8 +1317,14 @@ void writePlainRecord(WALRecord rec, ByteBuffer buf) throws IgniteCheckedExcepti break; + case INDEX_META_PAGE_DELTA_RECORD: + ((MetaPageUpdateIndexDataRecord)rec).toBytes(buf); + + break; + case PARTITION_META_PAGE_UPDATE_COUNTERS: case PARTITION_META_PAGE_UPDATE_COUNTERS_V2: + case PARTITION_META_PAGE_DELTA_RECORD_V3: ((MetaPageUpdatePartitionDataRecord)rec).toBytes(buf); break; @@ -1795,23 +1853,40 @@ void writePlainRecord(WALRecord rec, ByteBuffer buf) throws IgniteCheckedExcepti case SWITCH_SEGMENT_RECORD: break; - case MASTER_KEY_CHANGE_RECORD: - MasterKeyChangeRecord mkChangeRec = (MasterKeyChangeRecord)rec; + case MASTER_KEY_CHANGE_RECORD_V2: + MasterKeyChangeRecordV2 mkChangeRec = (MasterKeyChangeRecordV2)rec; byte[] keyIdBytes = mkChangeRec.getMasterKeyName().getBytes(); buf.putInt(keyIdBytes.length); buf.put(keyIdBytes); - Map grpKeys = mkChangeRec.getGrpKeys(); + List> grpKeys = mkChangeRec.getGrpKeys(); buf.putInt(grpKeys.size()); - for (Entry entry : grpKeys.entrySet()) { - buf.putInt(entry.getKey()); + for (T2 entry : grpKeys) { + GroupKeyEncrypted grpKey = entry.get2(); + + buf.putInt(entry.get1()); + buf.put((byte)grpKey.id()); + + buf.putInt(grpKey.key().length); + buf.put(grpKey.key()); + } + + break; + + case REENCRYPTION_START_RECORD: + ReencryptionStartRecord statusRecord = (ReencryptionStartRecord)rec; + + Map grps = statusRecord.groups(); + + buf.putInt(grps.size()); - buf.putInt(entry.getValue().length); - buf.put(entry.getValue()); + for (Map.Entry e : grps.entrySet()) { + buf.putInt(e.getKey()); + buf.put(e.getValue()); } break; @@ -1927,11 +2002,12 @@ private static void putRow(ByteBuffer buf, byte[] rowBytes) { /** * @param in Input to read from. + * @param readKeyId If {@code true} encryption key identifier will be read from {@code in}. * @return Read entry. * @throws IOException If failed. * @throws IgniteCheckedException If failed. */ - DataEntry readEncryptedDataEntry(ByteBufferBackedDataInput in) throws IOException, IgniteCheckedException { + DataEntry readEncryptedDataEntry(ByteBufferBackedDataInput in, boolean readKeyId) throws IOException, IgniteCheckedException { boolean needDecryption = in.readByte() == ENCRYPTED; if (needDecryption) { @@ -1941,7 +2017,7 @@ DataEntry readEncryptedDataEntry(ByteBufferBackedDataInput in) throws IOExceptio return new EncryptedDataEntry(); } - T3 clData = readEncryptedData(in, false); + T3 clData = readEncryptedData(in, false, readKeyId); if (clData.get1() == null) return null; @@ -2035,12 +2111,12 @@ RecordType recordType(WALRecord rec) { return rec.type(); if (needEncryption(rec)) - return ENCRYPTED_RECORD; + return ENCRYPTED_RECORD_V2; if (rec.type() != DATA_RECORD) return rec.type(); - return isDataRecordEncrypted((DataRecord)rec) ? ENCRYPTED_DATA_RECORD : DATA_RECORD; + return isDataRecordEncrypted((DataRecord)rec) ? ENCRYPTED_DATA_RECORD_V2 : DATA_RECORD; } /** @@ -2129,7 +2205,7 @@ protected int dataSize(DataRecord dataRec) throws IgniteCheckedException { int clSz = entrySize(entry); if (!encryptionDisabled && needEncryption(cctx.cacheContext(entry.cacheId()).groupId())) - sz += encSpi.encryptedSize(clSz) + 1 /* encrypted flag */ + 4 /* groupId */ + 4 /* data size */; + sz += encSpi.encryptedSize(clSz) + 1 /*encrypted flag*/ + 4 /*groupId*/ + 4 /*data size*/ + 1 /*key ID*/; else { sz += clSz; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java index 6e70ee60f0542..8622629497b19 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV2Serializer.java @@ -56,6 +56,8 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.record.HeaderRecord; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD_V2; + /** * Record data V2 serializer. */ @@ -180,13 +182,14 @@ public RecordDataV2Serializer(GridCacheSharedContext cctx) { return new MvccDataRecord(entries, timeStamp); case ENCRYPTED_DATA_RECORD: + case ENCRYPTED_DATA_RECORD_V2: entryCnt = in.readInt(); timeStamp = in.readLong(); entries = new ArrayList<>(entryCnt); for (int i = 0; i < entryCnt; i++) - entries.add(readEncryptedDataEntry(in)); + entries.add(readEncryptedDataEntry(in, type == ENCRYPTED_DATA_RECORD_V2)); return new DataRecord(entries, timeStamp); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java b/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java new file mode 100644 index 0000000000000..8429ccbbcb346 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.util; + +import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.util.typedef.internal.A; + +import static java.lang.Math.max; +import static java.util.concurrent.TimeUnit.MICROSECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; + +/** + * The simplified version of Google Guava smooth rate limiter.

+ * + * The primary feature of a rate limiter is its "stable rate", the maximum rate that is should + * allow at normal conditions. This is enforced by "throttling" incoming requests as needed, i.e. + * compute, for an incoming request, the appropriate throttle time, and make the calling thread + * wait as much.

+ * + * The simplest way to maintain a rate of QPS is to keep the timestamp of the last granted + * request, and ensure that (1/QPS) seconds have elapsed since then. For example, for a rate of + * QPS=5 (5 tokens per second), if we ensure that a request isn't granted earlier than 200ms after + * the last one, then we achieve the intended rate. If a request comes and the last request was + * granted only 100ms ago, then we wait for another 100ms. At this rate, serving 15 fresh permits + * (i.e. for an acquire(15) request) naturally takes 3 seconds.

+ * + * It is important to realize that such a limiter has a very superficial memory of the past: + * it only remembers the last request. if the limiter was unused for a long period of + * time, then a request arrived and was immediately granted? This limiter would immediately + * forget about that past underutilization. + */ +public class BasicRateLimiter { + /** Start timestamp. */ + private final long startTime = System.nanoTime(); + + /** Synchronization mutex. */ + private final Object mux = new Object(); + + /** + * The interval between two unit requests, at our stable rate. E.g., a stable rate of 5 permits + * per second has a stable interval of 200ms. + */ + private double stableIntervalMicros; + + /** + * The time when the next request (no matter its size) will be granted. After granting a request, + * this is pushed further in the future. Large requests push this further than small requests. + */ + private long nextFreeTicketMicros; + + /** + * @param permitsPerSecond Estimated number of permits per second. + */ + public BasicRateLimiter(double permitsPerSecond) { + setRate(permitsPerSecond); + } + + /** + * Updates the stable rate. + * + * @param permitsPerSecond The new stable rate of this {@code RateLimiter}. + * @throws IllegalArgumentException If {@code permitsPerSecond} is negative or zero. + */ + public void setRate(double permitsPerSecond) { + A.ensure(permitsPerSecond > 0, "Requested permits (" + permitsPerSecond + ") must be positive"); + + synchronized (mux) { + resync(); + + stableIntervalMicros = SECONDS.toMicros(1L) / permitsPerSecond; + } + } + + /** + * @return The stable rate (as {@code permits per seconds}). + */ + public double getRate() { + synchronized (mux) { + return SECONDS.toMicros(1L) / stableIntervalMicros; + } + } + + /** + * Acquires the given number of permits from this {@code RateLimiter}, blocking until the request + * can be granted. Tells the amount of time slept, if any. + * + * @param permits The number of permits to acquire. + * @throws IllegalArgumentException If the requested number of permits is negative or zero. + */ + public void acquire(int permits) throws IgniteInterruptedCheckedException { + long microsToWait = reserve(permits); + + try { + MICROSECONDS.sleep(microsToWait); + } + catch (InterruptedException e) { + Thread.currentThread().interrupt(); + + throw new IgniteInterruptedCheckedException(e); + } + } + + /** + * Reserves the given number of permits for future use. + * + * @param permits The number of permits. + * @return Time in microseconds to wait until the resource can be acquired, never negative. + */ + private long reserve(int permits) { + A.ensure(permits > 0, "Requested permits (" + permits + ") must be positive"); + + synchronized (mux) { + long nowMicros = resync(); + + long momentAvailable = nextFreeTicketMicros; + + nextFreeTicketMicros = momentAvailable + (long)(permits * stableIntervalMicros); + + return max(momentAvailable - nowMicros, 0); + } + } + + /** + * Updates {@code nextFreeTicketMicros} based on the current time. + * + * @return Time passed (since start) in microseconds. + */ + private long resync() { + long passed = MICROSECONDS.convert(System.nanoTime() - startTime, NANOSECONDS); + + // if nextFreeTicket is in the past, resync to now + if (passed > nextFreeTicketMicros) + nextFreeTicketMicros = passed; + + return passed; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java b/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java index fe0fee5a99b11..65394537302b6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/distributed/DistributedProcess.java @@ -430,6 +430,16 @@ public enum DistributedProcessType { * * @see IgniteSnapshotManager */ - END_SNAPSHOT + END_SNAPSHOT, + + /** + * Cache group encyption key change prepare phase. + */ + CACHE_GROUP_KEY_CHANGE_PREPARE, + + /** + * Cache group encyption key change perform phase. + */ + CACHE_GROUP_KEY_CHANGE_FINISH } } diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java index 9db104971ae55..5aaca05e47c86 100644 --- a/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/EncryptionMXBean.java @@ -17,6 +17,7 @@ package org.apache.ignite.mxbean; +import java.util.Collection; import org.apache.ignite.IgniteEncryption; /** @@ -43,4 +44,15 @@ public interface EncryptionMXBean { public void changeMasterKey( @MXBeanParameter(name = "masterKeyName", description = "Master key name.") String masterKeyName ); + + /** + * Starts cache group encryption key change process. + * + * @param cacheOrGrpName Cache or group name. + * @see IgniteEncryption#changeCacheGroupKey(Collection) + */ + @MXBeanDescription("Change cache group key.") + public void changeCacheGroupKey( + @MXBeanParameter(name = "cacheOrGroupName", description = "Cache or group name.") String cacheOrGrpName + ); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java index 68b4888141d3d..8c66afe05d54c 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/AbstractEncryptionTest.java @@ -20,21 +20,40 @@ import java.io.File; import java.io.FileOutputStream; import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.security.KeyStore; import java.util.HashSet; +import java.util.List; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import javax.crypto.KeyGenerator; import javax.crypto.SecretKey; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.GroupKey; +import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -47,6 +66,7 @@ import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.configuration.WALMode.FSYNC; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION; import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.CIPHER_ALGO; import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME; @@ -115,6 +135,18 @@ protected String keystorePath() { return KEYSTORE_PATH; } + /** + * @param name Cache name. + * @param grp Cache group name. + */ + protected CacheConfiguration cacheConfiguration(String name, String grp) { + CacheConfiguration cfg = new CacheConfiguration<>(name); + + return cfg.setWriteSynchronizationMode(FULL_SYNC) + .setGroupName(grp) + .setEncryptionEnabled(true); + } + /** */ void checkEncryptedCaches(IgniteEx grid0, IgniteEx grid1) { Set cacheNames = new HashSet<>(grid0.cacheNames()); @@ -139,13 +171,21 @@ void checkEncryptedCaches(IgniteEx grid0, IgniteEx grid1) { assertTrue(encrypted1.configuration().isEncryptionEnabled()); - KeystoreEncryptionKey encKey0 = (KeystoreEncryptionKey)grid0.context().encryption().groupKey(grpId); + GroupKey grpKey0 = grid0.context().encryption().groupKey(grpId); + + assertNotNull(grpKey0); + + KeystoreEncryptionKey encKey0 = (KeystoreEncryptionKey)grpKey0.key(); assertNotNull(encKey0); assertNotNull(encKey0.key()); if (!grid1.configuration().isClientMode()) { - KeystoreEncryptionKey encKey1 = (KeystoreEncryptionKey)grid1.context().encryption().groupKey(grpId); + GroupKey grpKey1 = grid1.context().encryption().groupKey(grpId); + + assertNotNull(grpKey1); + + KeystoreEncryptionKey encKey1 = (KeystoreEncryptionKey)grpKey1.key(); assertNotNull(encKey1); assertNotNull(encKey1.key()); @@ -161,12 +201,21 @@ void checkEncryptedCaches(IgniteEx grid0, IgniteEx grid1) { /** */ protected void checkData(IgniteEx grid0) { - IgniteCache cache = grid0.cache(cacheName()); + IgniteCache cache = grid0.cache(cacheName()); assertNotNull(cache); - for (long i = 0; i < 100; i++) - assertEquals("" + i, cache.get(i)); + int size = cache.size(); + + assertTrue("Cache cannot be empty", size > 0); + + for (long i = 0; i < size; i++) + assertEquals(generateValue(i), cache.get(i)); + } + + /** */ + protected Object generateValue(long id) { + return String.valueOf(id); } /** */ @@ -178,22 +227,17 @@ protected void createEncryptedCache(IgniteEx grid0, @Nullable IgniteEx grid1, St /** */ protected void createEncryptedCache(IgniteEx grid0, @Nullable IgniteEx grid1, String cacheName, String cacheGroup, boolean putData) throws IgniteInterruptedCheckedException { - CacheConfiguration ccfg = new CacheConfiguration(cacheName) - .setWriteSynchronizationMode(FULL_SYNC) - .setGroupName(cacheGroup) - .setEncryptionEnabled(true); - - IgniteCache cache = grid0.createCache(ccfg); + IgniteCache cache = grid0.createCache(cacheConfiguration(cacheName, cacheGroup)); if (grid1 != null) GridTestUtils.waitForCondition(() -> grid1.cachex(cacheName()) != null, 2_000L); if (putData) { for (long i = 0; i < 100; i++) - cache.put(i, "" + i); + cache.put(i, generateValue(i)); for (long i = 0; i < 100; i++) - assertEquals("" + i, cache.get(i)); + assertEquals(generateValue(i), cache.get(i)); } } @@ -271,4 +315,206 @@ protected boolean checkMasterKeyName(String name) { return true; } + + /** + * Load data into cache "{@link #cacheName()}" using node "{@link #GRID_0}". + * + * @param cnt Count of entries. + */ + protected void loadData(int cnt) { + loadData(cacheName(), cnt); + } + + /** + * Load data into cache using node "{@link #GRID_0}". + * + * @param cnt Count of entries. + * @param cacheName Cache name. + */ + protected void loadData(String cacheName, int cnt) { + info("Loading " + cnt + " entries into " + cacheName); + + int start = grid(GRID_0).cache(cacheName).size(); + + try (IgniteDataStreamer streamer = grid(GRID_0).dataStreamer(cacheName)) { + for (long i = start; i < (cnt + start); i++) + streamer.addData(i, generateValue(i)); + } + + info("Load data finished"); + } + + /** + * Ensures that all pages of page store have expected encryption key identifier. + * + * @param grpId Cache group ID. + * @param expKeyId Encryption key ID. + * @param timeout Timeout to wait for encryption to complete. + * @throws Exception If failed. + */ + protected void checkGroupKey(int grpId, int expKeyId, long timeout) throws Exception { + awaitEncryption(G.allGrids(), grpId, timeout); + + for (Ignite g : G.allGrids()) { + IgniteEx grid = (IgniteEx)g; + + if (grid.context().clientNode()) + continue; + + info("Validating encryption key [node=" + g.cluster().localNode().id() + ", grp=" + grpId + "]"); + + CacheGroupContext grp = grid.context().cache().cacheGroup(grpId); + + if (grp == null || !grp.affinityNode()) { + info("Context doesn't exits on " + grid.localNode().id()); + + continue; + } + + GridEncryptionManager encryption = grid.context().encryption(); + + assertEquals(grid.localNode().id().toString(), (byte)expKeyId, encryption.groupKey(grpId).id()); + + IgniteInternalFuture fut = encryption.reencryptionFuture(grpId); + + // The future will be completed after the checkpoint, forcecheckpoint does nothing + // if the checkpoint has already been scheduled. + GridTestUtils.waitForCondition(() -> { + if (fut.isDone()) + return true; + + try { + forceCheckpoint(g); + } + catch (IgniteCheckedException e) { + throw new RuntimeException(e); + } + + return fut.isDone(); + }, timeout); + + assertTrue(fut.isDone()); + + List parts = IntStream.range(0, grp.shared().affinity().affinity(grpId).partitions()) + .boxed().collect(Collectors.toList()); + + parts.add(INDEX_PARTITION); + + int realPageSize = grp.dataRegion().pageMemory().realPageSize(grpId); + int encryptionBlockSize = grp.shared().kernalContext().config().getEncryptionSpi().blockSize(); + + for (int p : parts) { + FilePageStore pageStore = + (FilePageStore)((FilePageStoreManager)grp.shared().pageStore()).getStore(grpId, p); + + if (!pageStore.exists()) + continue; + + long state = grid.context().encryption().getEncryptionState(grpId, p); + + String msg = String.format("p=%d, off=%d, total=%d", + p, ReencryptStateUtils.pageIndex(state), ReencryptStateUtils.pageCount(state)); + + assertEquals(msg, 0, ReencryptStateUtils.pageCount(state)); + assertEquals(msg, 0, ReencryptStateUtils.pageIndex(state)); + + long startPageId = PageIdUtils.pageId(p, PageIdAllocator.FLAG_DATA, 0); + + int pagesCnt = pageStore.pages(); + int pageSize = pageStore.getPageSize(); + + ByteBuffer pageBuf = ByteBuffer.allocate(pageSize); + + Path path = new File(pageStore.getFileAbsolutePath()).toPath(); + + try (FileChannel ch = FileChannel.open(path, StandardOpenOption.READ)) { + for (int n = 0; n < pagesCnt; n++) { + long pageId = startPageId + n; + long pageOff = pageStore.pageOffset(pageId); + + pageBuf.position(0); + + ch.position(pageOff); + ch.read(pageBuf); + + pageBuf.position(realPageSize + encryptionBlockSize); + + int pageCrc = pageBuf.getInt(); + int pageKeyId = pageBuf.get() & 0xff; + + // If this page is empty we can skip it. + if (pageCrc == 0 && pageKeyId == 0) { + pageBuf.position(0); + + boolean emptyPage = false; + + while (pageBuf.hasRemaining() && !emptyPage) + emptyPage = pageBuf.getLong() == 0; + + if (emptyPage) + continue; + } + + msg = String.format("File=%s, page=%d", pageStore.getFileAbsolutePath(), n); + assertEquals(msg, expKeyId, pageKeyId); + } + } + } + } + } + + /** + * @param grids Grids. + * @param grpId Cache group ID. + * @param timeout Timeout to wait for encryption to complete. + * @throws IgniteCheckedException If failed. + */ + protected void awaitEncryption(List grids, int grpId, long timeout) throws IgniteCheckedException { + GridCompoundFuture fut = new GridCompoundFuture<>(); + + for (Ignite node : grids) { + IgniteEx grid = (IgniteEx)node; + + if (grid.context().clientNode()) + continue; + + IgniteInternalFuture fut0 = GridTestUtils.runAsync(() -> { + boolean success = + GridTestUtils.waitForCondition(() -> !isReencryptionInProgress(grid, grpId), timeout); + + assertTrue(success); + + return null; + }); + + fut.add(fut0); + } + + fut.markInitialized(); + + fut.get(timeout); + } + + /** + * @param node Node. + * @param grpId Cache group ID. + * @return {@code True} If reencryption of the specified group is not yet complete. + */ + protected boolean isReencryptionInProgress(IgniteEx node, int grpId) { + CacheGroupContext grp = node.context().cache().cacheGroup(grpId); + + if (grp == null || !grp.affinityNode()) + return false; + + for (int p = 0; p < grp.affinity().partitions(); p++) { + long state = node.context().encryption().getEncryptionState(grpId, p); + + if (ReencryptStateUtils.pageIndex(state) != ReencryptStateUtils.pageCount(state)) + return true; + } + + long state = node.context().encryption().getEncryptionState(grpId, INDEX_PARTITION); + + return ReencryptStateUtils.pageIndex(state) != ReencryptStateUtils.pageCount(state); + } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupKeyChangeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupKeyChangeTest.java new file mode 100644 index 0000000000000..810e05d109ace --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupKeyChangeTest.java @@ -0,0 +1,1063 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.encryption; + +import java.io.File; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.IgniteException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.TestRecordingCommunicationSpi; +import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; +import org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer; +import org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType; +import org.apache.ignite.internal.util.distributed.InitMessage; +import org.apache.ignite.internal.util.distributed.SingleNodeMessage; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteFuture; +import org.apache.ignite.spi.IgniteSpiException; +import org.apache.ignite.spi.discovery.tcp.TestTcpDiscoverySpi; +import org.apache.ignite.testframework.GridTestUtils.DiscoveryHook; +import org.junit.Test; + +import static org.apache.ignite.configuration.WALMode.LOG_ONLY; +import static org.apache.ignite.configuration.WALMode.NONE; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.INITIAL_KEY_ID; +import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; +import static org.apache.ignite.testframework.GridTestUtils.runAsync; +import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; + +/** + * Cache group key change distributed process tests. + */ +public class CacheGroupKeyChangeTest extends AbstractEncryptionTest { + /** Timeout. */ + private static final long MAX_AWAIT_MILLIS = 15_000; + + /** 1 megabyte in bytes. */ + private static final int MB = 1024 * 1024; + + /** */ + private static final String GRID_2 = "grid-2"; + + /** Discovery hook for distributed process. */ + private InitMessageDiscoveryHook discoveryHook; + + /** Count of cache backups. */ + private int backups; + + /** Number of WAL segments. */ + private int walSegments = 10; + + /** WAL mode. */ + private WALMode walMode = LOG_ONLY; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String name) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(name); + + cfg.setConsistentId(name); + cfg.setCommunicationSpi(new TestRecordingCommunicationSpi()); + + if (discoveryHook != null) + ((TestTcpDiscoverySpi)cfg.getDiscoverySpi()).discoveryHook(discoveryHook); + + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setMaxSize(100 * MB) + .setPersistenceEnabled(true)) + .setPageSize(4 * 1024) + .setWalSegmentSize(MB) + .setWalSegments(walSegments) + .setMaxWalArchiveSize(2 * walSegments * MB) + .setCheckpointFrequency(30 * 1000L) + .setWalMode(walMode); + + cfg.setDataStorageConfiguration(memCfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected CacheConfiguration cacheConfiguration(String name, String grp) { + CacheConfiguration cfg = super.cacheConfiguration(name, grp); + + return cfg.setAffinity(new RendezvousAffinityFunction(false, 8)).setBackups(backups); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** @throws Exception If failed. */ + @Test + @SuppressWarnings("ThrowableNotThrown") + public void testRejectNodeJoinDuringRotation() throws Exception { + T2 grids = startTestGrids(true); + + createEncryptedCache(grids.get1(), grids.get2(), cacheName(), null); + + int grpId = CU.cacheId(cacheName()); + + assertEquals(0, grids.get1().context().encryption().groupKey(grpId).id()); + + TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(grids.get2()); + + commSpi.blockMessages((node, msg) -> msg instanceof SingleNodeMessage); + + IgniteFuture fut = grids.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())); + + commSpi.waitForBlocked(); + + assertThrowsWithCause(() -> startGrid(3), IgniteCheckedException.class); + + commSpi.stopBlock(); + + fut.get(); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + checkEncryptedCaches(grids.get1(), grids.get2()); + } + + /** @throws Exception If failed. */ + @Test + public void testNotAllBltNodesPresent() throws Exception { + startTestGrids(true); + + createEncryptedCache(grid(GRID_0), grid(GRID_1), cacheName(), null); + + stopGrid(GRID_1); + + grid(GRID_0).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + startGrid(GRID_1); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testNodeFailsBeforePrepare() throws Exception { + checkNodeFailsDuringRotation(false, true, true); + } + + /** @throws Exception If failed. */ + @Test + public void testNodeFailsBeforePerform() throws Exception { + checkNodeFailsDuringRotation(false, false, true); + } + + /** @throws Exception If failed. */ + @Test + public void testNodeFailsAfterPrepare() throws Exception { + checkNodeFailsDuringRotation(false, true, false); + } + + /** @throws Exception If failed. */ + @Test + public void testCrdFailsAfterPrepare() throws Exception { + checkNodeFailsDuringRotation(true, true, false); + } + + /** @throws Exception If failed. */ + @Test + public void testNodeFailsAfterPerform() throws Exception { + checkNodeFailsDuringRotation(false, false, false); + } + + /** @throws Exception If failed. */ + @Test + public void testCrdFailsAfterPerform() throws Exception { + checkNodeFailsDuringRotation(true, false, false); + } + + /** + * @param stopCrd {@code True} to stop coordinator. + * @param prepare {@code True} to stop on the prepare phase. {@code False} to stop on the perform phase. + * @param discoBlock {@code True} to block discovery, {@code False} to block communication SPI. + */ + private void checkNodeFailsDuringRotation(boolean stopCrd, boolean prepare, boolean discoBlock) throws Exception { + cleanPersistenceDir(); + + DistributedProcessType type = prepare ? + DistributedProcessType.CACHE_GROUP_KEY_CHANGE_PREPARE : DistributedProcessType.CACHE_GROUP_KEY_CHANGE_FINISH; + + InitMessageDiscoveryHook locHook = new InitMessageDiscoveryHook(type); + + if (discoBlock && stopCrd) + discoveryHook = locHook; + + IgniteEx grid0 = startGrid(GRID_0); + + if (discoBlock && !stopCrd) + discoveryHook = locHook; + + IgniteEx grid1 = startGrid(GRID_1); + + grid0.cluster().state(ClusterState.ACTIVE); + + createEncryptedCache(grid0, grid1, cacheName(), null); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID, MAX_AWAIT_MILLIS); + + TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(grid1); + + if (!discoBlock) { + AtomicBoolean preparePhase = new AtomicBoolean(true); + + spi.blockMessages((node, msg) -> { + if (msg instanceof SingleNodeMessage) { + boolean isPrepare = preparePhase.compareAndSet(true, false); + + return prepare || !isPrepare; + } + + return false; + }); + } + + String alive = stopCrd ? GRID_1 : GRID_0; + String stopped = stopCrd ? GRID_0 : GRID_1; + + IgniteFuture changeFut = grid(alive).encryption().changeCacheGroupKey(Collections.singleton(cacheName())); + + IgniteInternalFuture stopFut = new GridFinishedFuture<>(); + + if (discoBlock) { + locHook.waitForBlocked(MAX_AWAIT_MILLIS); + + stopGrid(stopped, true); + + locHook.stopBlock(); + } + else { + spi.waitForBlocked(); + + stopFut = runAsync(() -> stopGrid(stopped, true)); + } + + changeFut.get(MAX_AWAIT_MILLIS); + stopFut.get(MAX_AWAIT_MILLIS); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + IgniteEx stoppedNode = startGrid(stopped); + + stoppedNode.resetLostPartitions(Collections.singleton(ENCRYPTED_CACHE)); + + awaitPartitionMapExchange(); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + stoppedNode.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + checkGroupKey(grpId, INITIAL_KEY_ID + 2, MAX_AWAIT_MILLIS); + } + + /** + * Ensures that we can rotate the key more than 255 times. + * + * @throws Exception If failed. + */ + @Test + public void testKeyIdentifierOverflow() throws Exception { + IgniteEx node = startTestGrids(true).get1(); + + createEncryptedCache(node, null, cacheName(), null, false); + + int grpId = CU.cacheId(cacheName()); + + byte keyId = INITIAL_KEY_ID; + + do { + node.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + // Validates reencryption of index partition. + checkGroupKey(grpId, ++keyId & 0xff, MAX_AWAIT_MILLIS); + } while (keyId != INITIAL_KEY_ID); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testMasterAndCacheGroupKeySimultaneousChange() throws Exception { + startTestGrids(true); + + IgniteEx node0 = grid(GRID_0); + IgniteEx node1 = grid(GRID_1); + + createEncryptedCache(node0, node1, cacheName(), null); + + int grpId = CU.cacheId(cacheName()); + + assertTrue(checkMasterKeyName(DEFAULT_MASTER_KEY_NAME)); + + Random rnd = ThreadLocalRandom.current(); + + for (byte keyId = 1; keyId < 50; keyId++) { + String currMkName = node0.context().config().getEncryptionSpi().getMasterKeyName(); + String newMkName = currMkName.equals(MASTER_KEY_NAME_2) ? MASTER_KEY_NAME_3 : MASTER_KEY_NAME_2; + + boolean changeGrpFirst = rnd.nextBoolean(); + + IgniteFuture grpKeyFut; + IgniteFuture masterKeyFut; + + if (changeGrpFirst) { + grpKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())); + masterKeyFut = node0.encryption().changeMasterKey(newMkName); + } + else { + masterKeyFut = node0.encryption().changeMasterKey(newMkName); + grpKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())); + } + + masterKeyFut.get(MAX_AWAIT_MILLIS); + assertTrue(checkMasterKeyName(newMkName)); + + try { + grpKeyFut.get(MAX_AWAIT_MILLIS); + checkGroupKey(grpId, keyId, MAX_AWAIT_MILLIS); + } catch (IgniteException e) { + assertTrue(e.getMessage().contains("Cache group key change was rejected. Master key has been changed.")); + + // Retry iteration. + keyId -= 1; + } + } + } + + /** + * @throws Exception If failed. + */ + @Test + public void testCacheStartDuringRotation() throws Exception { + T2 grids = startTestGrids(true); + + createEncryptedCache(grids.get1(), grids.get2(), cacheName(), null); + + TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(grids.get2()); + + commSpi.blockMessages((node, msg) -> msg instanceof SingleNodeMessage); + + IgniteFuture fut = grids.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())); + + commSpi.waitForBlocked(); + + IgniteCache cache = grids.get1().createCache(cacheConfiguration("cache1", null)); + + for (int i = 0; i < 100; i++) + cache.put(i, i); + + commSpi.stopBlock(); + + fut.get(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + checkGroupKey(CU.cacheId("cache1"), INITIAL_KEY_ID, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testCacheStartSameGroupDuringRotation() throws Exception { + T2 grids = startTestGrids(true); + + String grpName = "shared"; + + createEncryptedCache(grids.get1(), grids.get2(), cacheName(), grpName); + + TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(grids.get2()); + + commSpi.blockMessages((node, msg) -> msg instanceof SingleNodeMessage); + + IgniteFuture fut = grids.get1().encryption().changeCacheGroupKey(Collections.singleton(grpName)); + + commSpi.waitForBlocked(); + + IgniteCache cache = + grids.get1().createCache(cacheConfiguration("cache1", grpName)); + + commSpi.stopBlock(); + + for (int i = 0; i < 100; i++) + cache.put(i, i); + + fut.get(); + + checkGroupKey(CU.cacheId(grpName), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testChangeKeyDuringRebalancing() throws Exception { + T2 grids = startTestGrids(true); + + IgniteEx node0 = grids.get1(); + IgniteEx node1 = grids.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + loadData(500_000); + + IgniteEx node2 = startGrid(GRID_2); + + resetBaselineTopology(); + + int grpId = CU.cacheId(cacheName()); + + node2.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + awaitPartitionMapExchange(); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testNodeWithOlderKeyBecameCoordinator() throws Exception { + backups = 1; + + startTestGrids(true); + + IgniteEx node0 = grid(GRID_0); + IgniteEx node1 = grid(GRID_1); + + createEncryptedCache(node0, node1, cacheName(), null); + + int grpId = CU.cacheId(cacheName()); + + stopGrid(GRID_0); + + // Changing encryption key on one node. + node1.context().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + stopGrid(GRID_1); + + // The node with only the old key ID has become the coordinator. + node0 = startGrid(GRID_0); + assertTrue(Collections.singleton(INITIAL_KEY_ID).containsAll(node0.context().encryption().groupKeyIds(grpId))); + + node1 = startGrid(GRID_1); + node1.cluster().state(ClusterState.ACTIVE); + + // Wait until cache will be reencrypted with the old key. + checkGroupKey(grpId, INITIAL_KEY_ID, MAX_AWAIT_MILLIS); + + GridEncryptionManager encrMgr0 = node0.context().encryption(); + GridEncryptionManager encrMgr1 = node1.context().encryption(); + + // Changing the encryption key is not possible until the WAL segment, + // encrypted (probably) with the previous key, is deleted. + assertThrowsAnyCause(log, + () -> encrMgr1.changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS), + IgniteException.class, + "Cache group key change was rejected. Cannot add new key identifier, it's already present."); + + long walIdx = node1.context().cache().context().wal().currentSegment(); + + // Simulate WAL segment deletion. + for (long n = 0; n <= walIdx; n++) + node1.context().encryption().onWalSegmentRemoved(walIdx); + + encrMgr1.changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkEncryptedCaches(node0, node1); + + walIdx = Math.max(node0.context().cache().context().wal().currentSegment(), + node1.context().cache().context().wal().currentSegment()); + + // Simulate WAL segment deletion. + for (long n = 0; n <= walIdx; n++) { + encrMgr0.onWalSegmentRemoved(walIdx); + encrMgr1.onWalSegmentRemoved(walIdx); + } + + // Make sure the previous key has been removed. + assertEquals(1, encrMgr0.groupKeyIds(grpId).size()); + assertEquals(encrMgr1.groupKeyIds(grpId), encrMgr0.groupKeyIds(grpId)); + } + + /** + * Ensures that a node cannot join the cluster if it cannot replace an existing encryption key. + *

+ * If the joining node has a different encryption key than the coordinator, but with the same identifier, it should + * not perform key rotation to a new key (recevied from coordinator) until the previous key is deleted. + * + * @throws Exception If failed. + */ + @Test + public void testNodeJoinRejectedIfKeyCannotBeReplaced() throws Exception { + backups = 2; + + T2 nodes = startTestGrids(true); + + startGrid(GRID_2); + + resetBaselineTopology(); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + forceCheckpoint(); + + stopGrid(GRID_0); + stopGrid(GRID_1); + + grid(GRID_2).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + grid(GRID_2).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + checkGroupKey(grpId, INITIAL_KEY_ID + 2, MAX_AWAIT_MILLIS); + + stopGrid(GRID_2); + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID, MAX_AWAIT_MILLIS); + + grid(GRID_0).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + assertThrowsAnyCause(log, + () -> startGrid(GRID_2), + IgniteSpiException.class, + "Cache key differs! Node join is rejected."); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testKeyChangeWithNodeFilter() throws Exception { + startTestGrids(true); + + IgniteEx node0 = grid(GRID_0); + IgniteEx node1 = grid(GRID_1); + + Object nodeId0 = node0.localNode().consistentId(); + Object nodeId1 = node1.localNode().consistentId(); + + String cache1 = cacheName(); + String cache2 = "cache2"; + + node0.createCache(cacheConfiguration(cache1, null) + .setNodeFilter(node -> !node.consistentId().equals(nodeId0))); + + node0.createCache(cacheConfiguration(cache2, null) + .setNodeFilter(node -> !node.consistentId().equals(nodeId1))); + + loadData(10_000); + + forceCheckpoint(); + + int grpId1 = CU.cacheId(cache1); + int grpId2 = CU.cacheId(cache2); + + node0.encryption().changeCacheGroupKey(Arrays.asList(cache1, cache2)).get(); + + List keys0 = node0.context().encryption().groupKeyIds(grpId1); + List keys1 = node1.context().encryption().groupKeyIds(grpId1); + + assertEquals(2, keys0.size()); + assertEquals(2, keys1.size()); + + assertTrue(keys0.containsAll(keys1)); + + keys0 = node0.context().encryption().groupKeyIds(grpId2); + keys1 = node1.context().encryption().groupKeyIds(grpId2); + + assertEquals(2, keys0.size()); + assertEquals(2, keys1.size()); + + assertTrue(keys0.containsAll(keys1)); + + checkGroupKey(grpId1, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkGroupKey(grpId2, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + stopAllGrids(); + + startTestGrids(false); + + node0 = grid(GRID_0); + node1 = grid(GRID_1); + + IgniteCache allNodesCache = node0.createCache("cacheX"); + + // Previous keys must be deleted when the corresponding WAL segment is deleted, so we adding data on all nodes. + long endTime = U.currentTimeMillis() + 30_000; + int cntr = 0; + + do { + allNodesCache.put(cntr, String.valueOf(cntr)); + + if (node0.context().encryption().groupKeyIds(grpId1).size() == 1 && + node1.context().encryption().groupKeyIds(grpId1).size() == 1 && + node0.context().encryption().groupKeyIds(grpId2).size() == 1 && + node1.context().encryption().groupKeyIds(grpId2).size() == 1) + break; + + ++cntr; + } while (U.currentTimeMillis() < endTime); + + assertEquals(1, node0.context().encryption().groupKeyIds(grpId1).size()); + assertEquals(1, node0.context().encryption().groupKeyIds(grpId2).size()); + + assertEquals(node0.context().encryption().groupKeyIds(grpId1), node1.context().encryption().groupKeyIds(grpId1)); + assertEquals(node0.context().encryption().groupKeyIds(grpId2), node1.context().encryption().groupKeyIds(grpId2)); + + checkGroupKey(grpId1, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkGroupKey(grpId2, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + checkEncryptedCaches(node0, node1); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testBasicChangeWithConstantLoad() throws Exception { + walSegments = 20; + + startTestGrids(true); + + IgniteEx node0 = grid(GRID_0); + IgniteEx node1 = grid(GRID_1); + + GridEncryptionManager encrMgr0 = node0.context().encryption(); + GridEncryptionManager encrMgr1 = node1.context().encryption(); + + createEncryptedCache(node0, node1, cacheName(), null); + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + IgniteInternalFuture loadFut = loadDataAsync(node0); + + try { + IgniteCache cache = node0.cache(cacheName()); + + boolean success = waitForCondition(() -> cache.size() > 2000, MAX_AWAIT_MILLIS); + assertTrue(success); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + waitForCondition(() -> + encrMgr0.groupKeyIds(grpId).size() == 1 && encrMgr1.groupKeyIds(grpId).size() == 1, MAX_AWAIT_MILLIS); + } finally { + loadFut.cancel(); + } + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + assertEquals(node0.cluster().localNode().id().toString(), 1, encrMgr0.groupKeyIds(grpId).size()); + assertEquals(node1.cluster().localNode().id().toString(), 1, encrMgr1.groupKeyIds(grpId).size()); + } + + /** + * Ensures that unused key will be removed even if user cleaned wal archive folder manually. + * + * @throws Exception If failed. + */ + @Test + public void testWalArchiveCleanup() throws Exception { + cleanPersistenceDir(); + + IgniteEx node = startGrid(GRID_0); + + node.cluster().state(ClusterState.ACTIVE); + + createEncryptedCache(node, null, cacheName(), null); + + node.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + IgniteWriteAheadLogManager walMgr = node.context().cache().context().wal(); + + long reservedIdx = walMgr.currentSegment(); + + boolean reserved = walMgr.reserve(new WALPointer(reservedIdx, 0, 0)); + assertTrue(reserved); + + IgniteInternalFuture loadFut = loadDataAsync(node); + + // Wait until the reserved segment is moved to the archive. + try { + boolean success = waitForCondition(() -> walMgr.lastArchivedSegment() >= reservedIdx, MAX_AWAIT_MILLIS); + assertTrue(success); + } finally { + loadFut.cancel(); + } + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + assertEquals(2, node.context().encryption().groupKeyIds(grpId).size()); + + stopAllGrids(); + + // Cleanup WAL arcive folder. + File dbDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false); + + boolean rmvd = U.delete(new File(dbDir, "wal/archive")); + + assertTrue(rmvd); + + node = startGrid(GRID_0); + + node.cluster().state(ClusterState.ACTIVE); + + loadFut = loadDataAsync(node); + + // Make sure that unused encryption key has been deleted. + try { + GridEncryptionManager encryptMgr = node.context().encryption(); + + boolean success = waitForCondition(() -> encryptMgr.groupKeyIds(grpId).size() == 1, MAX_AWAIT_MILLIS); + assertTrue(success); + } finally { + loadFut.cancel(); + } + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @param grid Grid. + * @return Future for this operation. + */ + private IgniteInternalFuture loadDataAsync(Ignite grid) { + return runAsync(() -> { + long cntr = grid.cache(cacheName()).size(); + + try (IgniteDataStreamer streamer = grid.dataStreamer(cacheName())) { + while (!Thread.currentThread().isInterrupted()) { + streamer.addData(cntr, String.valueOf(cntr)); + + ++cntr; + } + } + }); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testCacheStartOnClientDuringRotation() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + IgniteEx client = startClientGrid(getConfiguration("client")); + + node0.cluster().state(ClusterState.ACTIVE); + + String grpName = "shared"; + + createEncryptedCache(client, null, cacheName(), grpName); + + awaitPartitionMapExchange(); + + TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(node1); + + commSpi.blockMessages((node, message) -> message instanceof SingleNodeMessage); + + IgniteFuture changeKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(grpName)); + + commSpi.waitForBlocked(); + + String cacheName = "userCache"; + + IgniteInternalFuture cacheStartFut = runAsync(() -> { + client.getOrCreateCache(cacheConfiguration(cacheName, grpName)); + }); + + commSpi.stopBlock(); + + changeKeyFut.get(MAX_AWAIT_MILLIS); + cacheStartFut.get(MAX_AWAIT_MILLIS); + + IgniteCache cache = client.cache(cacheName); + + for (int i = 0; i < 200; i++) + cache.put(i, String.valueOf(i)); + + checkEncryptedCaches(node0, client); + + checkGroupKey(CU.cacheId(grpName), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + checkEncryptedCaches(node0, node1); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testClientJoinDuringRotation() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + node0.cluster().state(ClusterState.ACTIVE); + + createEncryptedCache(node0, node1, cacheName(), null); + + awaitPartitionMapExchange(); + + TestRecordingCommunicationSpi commSpi = TestRecordingCommunicationSpi.spi(node1); + + commSpi.blockMessages((node, message) -> message instanceof SingleNodeMessage); + + IgniteFuture changeKeyFut = node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())); + + commSpi.waitForBlocked(); + + IgniteEx client = startClientGrid(getConfiguration("client")); + + assertTrue(!changeKeyFut.isDone()); + + commSpi.stopBlock(); + + changeKeyFut.get(MAX_AWAIT_MILLIS); + + checkEncryptedCaches(node0, client); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Ensures that node can join after rotation of encryption key. + * + * @throws Exception If failed. + */ + @Test + public void testNodeJoinAfterRotation() throws Exception { + backups = 1; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + forceCheckpoint(); + + stopGrid(GRID_1); + resetBaselineTopology(); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + startGrid(GRID_1); + resetBaselineTopology(); + awaitPartitionMapExchange(); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkEncryptedCaches(grid(GRID_0), grid(GRID_1)); + + GridEncryptionManager encrMgr0 = grid(GRID_0).context().encryption(); + GridEncryptionManager encrMgr1 = grid(GRID_1).context().encryption(); + + long maxWalIdx = Math.max(nodes.get1().context().cache().context().wal().currentSegment(), + nodes.get2().context().cache().context().wal().currentSegment()); + + for (long idx = 0; idx <= maxWalIdx; idx++) { + encrMgr0.onWalSegmentRemoved(maxWalIdx); + encrMgr1.onWalSegmentRemoved(maxWalIdx); + } + + assertEquals(1, encrMgr1.groupKeyIds(grpId).size()); + assertEquals(encrMgr0.groupKeyIds(grpId), encrMgr1.groupKeyIds(grpId)); + + startGrid(GRID_2); + + resetBaselineTopology(); + awaitPartitionMapExchange(); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkEncryptedCaches(grid(GRID_2), nodes.get1()); + + assertEquals(encrMgr0.groupKeyIds(grpId), grid(GRID_2).context().encryption().groupKeyIds(grpId)); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testWrongCacheGroupSpecified() throws Exception { + T2 grids = startTestGrids(true); + + IgniteEx node0 = grids.get1(); + IgniteEx node1 = grids.get2(); + + assertThrowsAnyCause(log, + () -> node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS), + IgniteException.class, + "Cache group key change was rejected. Cache or group \"" + cacheName() + "\" doesn't exists"); + + node0.createCache(new CacheConfiguration<>(cacheName()).setNodeFilter(node -> node.equals(node0.localNode()))); + + assertThrowsAnyCause(log, + () -> node1.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS), + IgniteException.class, + "Cache group key change was rejected. Cache or group \"" + cacheName() + "\" is not encrypted."); + + node0.destroyCache(cacheName()); + + awaitPartitionMapExchange(); + + String grpName = "cacheGroup1"; + + createEncryptedCache(node0, node1, cacheName(), grpName); + + assertThrowsAnyCause(log, + () -> node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS), + IgniteException.class, + "Cache group key change was rejected. Cache or group \"" + cacheName() + "\" is a part of group \"" + + grpName + "\". Provide group name instead of cache name for shared groups."); + } + + /** @throws Exception If failed. */ + @Test + public void testChangeCacheGroupKeyWithoutWAL() throws Exception { + walMode = NONE; + T2 grids = startTestGrids(true); + + createEncryptedCache(grids.get1(), grids.get2(), cacheName(), null); + + IgniteEx node0 = grids.get1(); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + assertEquals(1, node0.context().encryption().groupKeyIds(grpId).size()); + assertEquals(1, grids.get2().context().encryption().groupKeyIds(grpId).size()); + } + + /** + * Custom discovery hook to block distributed process. + */ + private static class InitMessageDiscoveryHook extends DiscoveryHook { + /** + * Latch to sync execution. + */ + private final CountDownLatch unlockLatch = new CountDownLatch(1); + + /** + * Latch to sync execution. + */ + private final CountDownLatch blockedLatch = new CountDownLatch(1); + + /** + * Distributed process type. + */ + private final DistributedProcessType type; + + /** + * @param type Distributed process type. + */ + private InitMessageDiscoveryHook(DistributedProcessType type) { + this.type = type; + } + + /** {@inheritDoc} */ + @Override public void beforeDiscovery(DiscoveryCustomMessage customMsg) { + if (!(customMsg instanceof InitMessage)) + return; + + InitMessage msg = (InitMessage)customMsg; + + if (msg.type() != type.ordinal()) + return; + + try { + blockedLatch.countDown(); + + unlockLatch.await(MAX_AWAIT_MILLIS, TimeUnit.MILLISECONDS); + } + catch (InterruptedException ignore) { + Thread.currentThread().interrupt(); + } + } + + /** + * @param timeout Timeout in milliseconds. + * @throws InterruptedException If interrupted. + */ + public void waitForBlocked(long timeout) throws InterruptedException { + blockedLatch.await(timeout, TimeUnit.MILLISECONDS); + } + + /** */ + public void stopBlock() { + unlockLatch.countDown(); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java new file mode 100644 index 0000000000000..1f7cf36aaedab --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java @@ -0,0 +1,867 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.encryption; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.OpenOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.EncryptionConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.events.EventType; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteFutureCancelledCheckedException; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.cache.CacheGroupMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIODecorator; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; +import org.apache.ignite.internal.processors.metric.MetricRegistry; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.spi.metric.BooleanMetric; +import org.apache.ignite.spi.metric.LongMetric; +import org.apache.ignite.testframework.GridTestUtils; +import org.junit.Test; + +import static org.apache.ignite.configuration.EncryptionConfiguration.DFLT_REENCRYPTION_RATE_MBPS; +import static org.apache.ignite.configuration.WALMode.LOG_ONLY; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.INITIAL_KEY_ID; +import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause; + +/** + * Cache re-encryption tests. + */ +public class CacheGroupReencryptionTest extends AbstractEncryptionTest { + /** */ + private static final String GRID_2 = "grid-2"; + + /** */ + private static final String GRID_3 = "grid-3"; + + /** Timeout. */ + private static final long MAX_AWAIT_MILLIS = 15_000; + + /** File IO fail flag. */ + private final AtomicBoolean failFileIO = new AtomicBoolean(); + + /** Count of cache backups. */ + private int backups; + + /** Re-encryption rate limit. */ + private double pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + /** The number of pages that is scanned during re-encryption under checkpoint lock. */ + private int pageScanBatchSize = EncryptionConfiguration.DFLT_REENCRYPTION_BATCH_SIZE; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String name) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(name); + + cfg.setConsistentId(name); + + cfg.setIncludeEventTypes(EventType.EVT_CACHE_REBALANCE_STOPPED); + + EncryptionConfiguration encCfg = new EncryptionConfiguration() + .setReencryptionBatchSize(pageScanBatchSize) + .setReencryptionRateLimit(pageScanRate); + + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setMaxSize(1024 * 1024 * 1024L) + .setPersistenceEnabled(true)) + .setPageSize(4 * 1024) + .setWalSegmentSize(10 * 1024 * 1024) + .setWalSegments(4) + .setMaxWalArchiveSize(100 * 1024 * 1024L) + .setCheckpointFrequency(30 * 1000L) + .setWalMode(LOG_ONLY) + .setFileIOFactory(new FailingFileIOFactory(new RandomAccessFileIOFactory(), failFileIO)) + .setEncryptionConfiguration(encCfg); + + cfg.setDataStorageConfiguration(memCfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected CacheConfiguration cacheConfiguration(String name, String grp) { + CacheConfiguration cfg = super.cacheConfiguration(name, grp); + + cfg.setIndexedTypes(Long.class, IndexedObject.class); + + return cfg.setAffinity(new RendezvousAffinityFunction(false, 16)).setBackups(backups); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected Object generateValue(long id) { + return new IndexedObject(id, "string-" + id); + } + + /** + * Check physical recovery after checkpoint failure during re-encryption. + * + * @throws Exception If failed. + */ + @Test + public void testPhysicalRecovery() throws Exception { + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + IgniteInternalFuture fut = GridTestUtils.runAsync(() -> loadData(50_000)); + + forceCheckpoint(); + + enableCheckpoints(nodes.get1(), false); + enableCheckpoints(nodes.get2(), false); + + int grpId = CU.cacheId(cacheName()); + + failFileIO.set(true); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + fut.get(); + + assertThrowsAnyCause(log, () -> { + enableCheckpoints(grid(GRID_0), true); + enableCheckpoints(grid(GRID_1), true); + + forceCheckpoint(); + + return null; + }, IgniteCheckedException.class, null); + + stopAllGrids(true); + + failFileIO.set(false); + + nodes = startTestGrids(false); + + checkEncryptedCaches(nodes.get1(), nodes.get2()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testPhysicalRecoveryWithUpdates() throws Exception { + pageScanRate = 1.5; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + IgniteInternalFuture addFut = GridTestUtils.runAsync(() -> loadData(100_000)); + + IgniteInternalFuture updateFut = GridTestUtils.runAsync(() -> { + IgniteCache cache = grid(GRID_0).cache(cacheName()); + + while (!Thread.currentThread().isInterrupted()) { + for (long i = 50_000; i > 20_000; i--) { + String val = cache.get(i); + + cache.put(i, val); + } + } + }); + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + forceCheckpoint(); + + failFileIO.set(true); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + addFut.get(); + updateFut.cancel(); + + assertThrowsAnyCause(log, () -> { + forceCheckpoint(); + + return null; + }, IgniteCheckedException.class, null); + + stopAllGrids(true); + + failFileIO.set(false); + + nodes = startTestGrids(false); + + checkEncryptedCaches(nodes.get1(), nodes.get2()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Ensures that re-encryption continues after a restart. + * + * @throws Exception If failed. + */ + @Test + public void testLogicalRecovery() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null, true); + + loadData(100_000); + + forceCheckpoint(); + + enableCheckpoints(G.allGrids(), false); + + int grpId = CU.cacheId(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + assertEquals(1, node0.context().encryption().groupKey(grpId).id()); + assertEquals(1, node1.context().encryption().groupKey(grpId).id()); + + stopAllGrids(); + + info(">>> Start grids (iteration 1)"); + + startTestGrids(false); + + enableCheckpoints(G.allGrids(), false); + + stopAllGrids(); + + info(">>> Start grids (iteration 2)"); + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testCacheStopDuringReencryption() throws Exception { + pageScanRate = 1; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + loadData(100_000); + + IgniteCache cache = node0.cache(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + int grpId = CU.cacheId(cacheName()); + + IgniteInternalFuture fut0 = node0.context().encryption().reencryptionFuture(grpId); + + assertFalse(fut0.isDone()); + + assertTrue(isReencryptionInProgress(node0, grpId)); + + cache.destroy(); + + assertThrowsAnyCause(log, () -> { + fut0.get(); + + return null; + }, IgniteFutureCancelledCheckedException.class, null); + + awaitPartitionMapExchange(); + + assertNull(node0.context().encryption().groupKeyIds(grpId)); + assertNull(node1.context().encryption().groupKeyIds(grpId)); + } + + /** @throws Exception If failed. */ + @Test + public void testPartitionEvictionDuringReencryption() throws Exception { + backups = 1; + pageScanRate = 1; + + CountDownLatch rebalanceFinished = new CountDownLatch(1); + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + loadData(100_000); + + IgniteEx node2 = startGrid(GRID_2); + + node2.events().localListen(evt -> { + rebalanceFinished.countDown(); + + return true; + }, EventType.EVT_CACHE_REBALANCE_STOPPED); + + resetBaselineTopology(); + + rebalanceFinished.await(); + + stopGrid(GRID_2); + + resetBaselineTopology(); + + int grpId = CU.cacheId(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + stopAllGrids(); + + pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Test that partition files are reused correctly. + * + * @throws Exception If failed. + */ + @Test + public void testPartitionFileDestroy() throws Exception { + backups = 1; + pageScanRate = 1; + pageScanBatchSize = 10; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + forceCheckpoint(); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + startGrid(GRID_2); + + // Trigger partitions eviction. + resetBaselineTopology(); + + awaitPartitionMapExchange(true, true, null); + + forceCheckpoint(); + + assertTrue(isReencryptionInProgress(Collections.singleton(cacheName()))); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * Test that partition files are reused correctly. + * + * @throws Exception If failed. + */ + @Test + public void testPartitionFileDestroyAndRecreate() throws Exception { + backups = 1; + pageScanRate = 1; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + grid(GRID_0).encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + long walSegment = nodes.get1().context().cache().context().wal().currentSegment(); + + for (long n = 0; n <= walSegment; n++) + nodes.get1().context().encryption().onWalSegmentRemoved(n); + + walSegment = nodes.get2().context().cache().context().wal().currentSegment(); + + for (long n = 0; n <= walSegment; n++) + nodes.get2().context().encryption().onWalSegmentRemoved(n); + + // Force checkpoint to prevent logical recovery after key rotation. + forceCheckpoint(); + + startGrid(GRID_2); + + // Trigger partitions eviction. + resetBaselineTopology(); + + awaitPartitionMapExchange(true, true, null); + + // Trigger partitions re-create. + stopGrid(GRID_2); + + resetBaselineTopology(); + + awaitPartitionMapExchange(true, true, null); + + stopAllGrids(); + + nodes = startTestGrids(false); + + checkEncryptedCaches(nodes.get1(), nodes.get2()); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testNotBltNodeJoin() throws Exception { + backups = 1; + pageScanRate = 1; + pageScanBatchSize = 10; + + T2 nodes = startTestGrids(true); + + createEncryptedCache(nodes.get1(), nodes.get2(), cacheName(), null); + + loadData(50_000); + + forceCheckpoint(); + + long startIdx1 = nodes.get1().context().cache().context().wal().currentSegment(); + long startIdx2 = nodes.get2().context().cache().context().wal().currentSegment(); + + nodes.get1().encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + long endIdx1 = nodes.get1().context().cache().context().wal().currentSegment(); + long endIdx2 = nodes.get2().context().cache().context().wal().currentSegment(); + + stopGrid(GRID_1); + + resetBaselineTopology(); + + int grpId = CU.cacheId(cacheName()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + startGrid(GRID_1); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + assertEquals(2, grid(GRID_0).context().encryption().groupKeyIds(grpId).size()); + assertEquals(2, grid(GRID_1).context().encryption().groupKeyIds(grpId).size()); + + // Simulate that wal was removed. + for (long segment = startIdx1; segment <= endIdx1; segment++) + grid(GRID_0).context().encryption().onWalSegmentRemoved(segment); + + assertEquals(1, grid(GRID_0).context().encryption().groupKeyIds(grpId).size()); + + for (long segment = startIdx2; segment <= endIdx2; segment++) + grid(GRID_1).context().encryption().onWalSegmentRemoved(segment); + + assertEquals(1, grid(GRID_1).context().encryption().groupKeyIds(grpId).size()); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testReencryptionStartsAfterNodeRestart() throws Exception { + pageScanRate = 0.000000001; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + forceCheckpoint(); + + int grpId = CU.cacheId(cacheName()); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + forceCheckpoint(); + + stopAllGrids(); + + nodes = startTestGrids(false); + + node0 = nodes.get1(); + node1 = nodes.get2(); + + assertTrue(isReencryptionInProgress(node0, grpId)); + assertTrue(isReencryptionInProgress(node1, grpId)); + + stopAllGrids(); + + pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + startTestGrids(false); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testReencryptionOnUnstableTopology() throws Exception { + backups = 1; + pageScanRate = 2; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + startGrid(GRID_2); + startGrid(GRID_3); + + resetBaselineTopology(); + + createEncryptedCache(node0, node1, cacheName(), null); + + String cache2 = "encrypted-2"; + + createEncryptedCache(node0, node1, cache2, null); + + loadData(cacheName(), 100_000); + loadData(cache2, 100_000); + + List cacheGroups = Arrays.asList(cacheName(), cache2); + + node0.encryption().changeCacheGroupKey(cacheGroups).get(); + + while (isReencryptionInProgress(cacheGroups)) { + int rndNode = ThreadLocalRandom.current().nextInt(3); + + String gridName = "grid-" + rndNode; + + stopGrid(gridName); + + startGrid(gridName); + } + + stopAllGrids(); + + startGrid(GRID_0); + startGrid(GRID_1); + startGrid(GRID_2); + startGrid(GRID_3); + + grid(GRID_0).cluster().state(ClusterState.ACTIVE); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + checkGroupKey(CU.cacheId(cache2), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testChangeBaseline() throws Exception { + backups = 1; + pageScanRate = 2; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + loadData(100_000); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + assertTrue(isReencryptionInProgress(Collections.singleton(cacheName()))); + + startGrid(GRID_2); + + resetBaselineTopology(); + + startGrid(GRID_3); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + stopGrid(GRID_2); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 2, MAX_AWAIT_MILLIS); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + startGrid(GRID_2); + + resetBaselineTopology(); + + awaitPartitionMapExchange(); + + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 3, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testKeyCleanup() throws Exception { + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + forceCheckpoint(); + + enableCheckpoints(G.allGrids(), false); + + int grpId = CU.cacheId(cacheName()); + + long startIdx = node1.context().cache().context().wal().currentSegment(); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + long endIdx = node1.context().cache().context().wal().currentSegment(); + + awaitEncryption(G.allGrids(), grpId, MAX_AWAIT_MILLIS); + + // Simulate that wal was removed. + for (long segment = startIdx; segment <= endIdx; segment++) + node1.context().encryption().onWalSegmentRemoved(segment); + + stopGrid(GRID_1); + + node1 = startGrid(GRID_1); + + enableCheckpoints(G.allGrids(), true); + + node1.cluster().state(ClusterState.ACTIVE); + + node1.resetLostPartitions(Collections.singleton(ENCRYPTED_CACHE)); + + checkEncryptedCaches(node0, node1); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); + } + + /** @throws Exception If failed. */ + @Test + public void testReencryptionMetrics() throws Exception { + pageScanRate = 0.000000001; + + T2 nodes = startTestGrids(true); + + IgniteEx node0 = nodes.get1(); + IgniteEx node1 = nodes.get2(); + + createEncryptedCache(node0, node1, cacheName(), null); + + node0.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(); + + validateMetrics(node0, false); + validateMetrics(node1, false); + + pageScanRate = DFLT_REENCRYPTION_RATE_MBPS; + + stopAllGrids(); + + nodes = startTestGrids(false); + + node0 = nodes.get1(); + node1 = nodes.get2(); + + awaitEncryption(G.allGrids(), CU.cacheId(cacheName()), MAX_AWAIT_MILLIS); + + forceCheckpoint(); + + validateMetrics(node0, true); + validateMetrics(node1, true); + } + + /** + * @param node Grid. + * @param finished Expected reencryption status. + */ + private void validateMetrics(IgniteEx node, boolean finished) { + MetricRegistry registry = + node.context().metric().registry(metricName(CacheGroupMetricsImpl.CACHE_GROUP_METRICS_PREFIX, cacheName())); + + LongMetric pagesLeft = registry.findMetric("ReencryptionPagesLeft"); + + if (finished) + assertEquals(0, pagesLeft.value()); + else + assertTrue(pagesLeft.value() > 0); + + BooleanMetric reencryptionFinished = registry.findMetric("ReencryptionFinished"); + + assertEquals(finished, reencryptionFinished.value()); + } + + /** + * @param cacheGroups Cache group names. + * @return {@code True} If reencryption of the specified groups is not yet complete. + */ + private boolean isReencryptionInProgress(Iterable cacheGroups) { + for (Ignite node : G.allGrids()) { + for (String groupName : cacheGroups) { + if (isReencryptionInProgress((IgniteEx)node, CU.cacheId(groupName))) + return true; + } + } + + return false; + } + + /** */ + private static final class FailingFileIOFactory implements FileIOFactory { + /** */ + private final FileIOFactory delegateFactory; + + /** */ + private final AtomicBoolean failFlag; + + /** + * @param factory Delegate factory. + */ + FailingFileIOFactory(FileIOFactory factory, AtomicBoolean failFlag) { + delegateFactory = factory; + + this.failFlag = failFlag; + } + + /** {@inheritDoc}*/ + @Override public FileIO create(File file, OpenOption... modes) throws IOException { + FileIO delegate = delegateFactory.create(file, modes); + + return new FailingFileIO(delegate); + } + + /** */ + final class FailingFileIO extends FileIODecorator { + /** + * @param delegate File I/O delegate + */ + public FailingFileIO(FileIO delegate) { + super(delegate); + } + + /** {@inheritDoc} */ + @Override public int writeFully(ByteBuffer srcBuf, long position) throws IOException { + if (failFlag.get()) + throw new IOException("Test exception."); + + return delegate.writeFully(srcBuf, position); + } + } + } + + /** */ + private static class IndexedObject { + /** Id. */ + @QuerySqlField(index = true) + private final long id; + + /** Name. */ + @QuerySqlField(index = true) + private final String name; + + /** + * @param id Id. + */ + public IndexedObject(long id, String name) { + this.id = id; + this.name = name; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + IndexedObject obj = (IndexedObject)o; + + return id == obj.id && Objects.equals(name, obj.name); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(name, id); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java index 92202025d45f3..613f376a65c14 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheBigEntryTest.java @@ -22,6 +22,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; @@ -59,7 +60,7 @@ public void testCreateEncryptedCacheWithBigEntry() throws Exception { int grpId = CU.cacheGroupId(cacheName(), null); KeystoreEncryptionKey keyBeforeRestart = - (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId).key(); stopAllGrids(); @@ -67,7 +68,11 @@ public void testCreateEncryptedCacheWithBigEntry() throws Exception { checkEncryptedCaches(grids.get1(), grids.get2()); - KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + GroupKey grpKeyAfterRestart = grids.get1().context().encryption().groupKey(grpId); + + assertNotNull(grpKeyAfterRestart); + + KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grpKeyAfterRestart.key(); assertNotNull(keyAfterRestart); assertNotNull(keyAfterRestart.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java index aaf880abf7f91..7f5f3ae195469 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheCreateTest.java @@ -28,6 +28,7 @@ import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; @@ -89,8 +90,11 @@ public void testCreateEncryptedCache() throws Exception { assertNotNull(enc); - KeystoreEncryptionKey key = - (KeystoreEncryptionKey)grid.context().encryption().groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, null)); + GroupKey grpKey = grid.context().encryption().groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, null)); + + assertNotNull(grpKey); + + KeystoreEncryptionKey key = (KeystoreEncryptionKey)grpKey.key(); assertNotNull(key); assertNotNull(key.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java index 4fe9f591e2cc3..1f5f0cf39723a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheDestroyTest.java @@ -19,13 +19,13 @@ import java.util.Collection; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; -import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; import org.junit.Test; -import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEY_PREFIX; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEYS_PREFIX; /** */ @@ -114,17 +114,17 @@ private void checkCacheDestroyed(IgniteEx grid, String encCacheName, String grpN int grpId = CU.cacheGroupId(encCacheName, grpName); - KeystoreEncryptionKey encKey = (KeystoreEncryptionKey)grid.context().encryption().groupKey(grpId); + GroupKey encKey = grid.context().encryption().groupKey(grpId); MetaStorage metaStore = grid.context().cache().context().database().metaStorage(); if (keyShouldBeEmpty) { assertNull(encKey); - assertNull(metaStore.readRaw(ENCRYPTION_KEY_PREFIX + grpId)); + assertNull(metaStore.readRaw(ENCRYPTION_KEYS_PREFIX + grpId)); } else { assertNotNull(encKey); - assertNotNull(metaStore.readRaw(ENCRYPTION_KEY_PREFIX + grpId)); + assertNotNull(metaStore.readRaw(ENCRYPTION_KEYS_PREFIX + grpId)); } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java index 12fd8be41b6c5..4caf3033be4a7 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheGroupCreateTest.java @@ -21,6 +21,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; @@ -71,7 +72,11 @@ public void testCreateEncryptedCacheGroup() throws Exception { GridEncryptionManager encMgr = encrypted2.context().kernalContext().encryption(); - KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)encMgr.groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, ENCRYPTED_GROUP)); + GroupKey grpKey2 = encMgr.groupKey(CU.cacheGroupId(ENCRYPTED_CACHE, ENCRYPTED_GROUP)); + + assertNotNull(grpKey2); + + KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)grpKey2.key(); assertNotNull(key2); assertNotNull(key2.key()); @@ -108,8 +113,11 @@ private KeystoreEncryptionKey createEncryptedCache(String cacheName, String grpN assertNotNull(enc); - KeystoreEncryptionKey key = - (KeystoreEncryptionKey)grid.context().encryption().groupKey(CU.cacheGroupId(cacheName, grpName)); + GroupKey grpKey = grid.context().encryption().groupKey(CU.cacheGroupId(cacheName, grpName)); + + assertNotNull(grpKey); + + KeystoreEncryptionKey key = (KeystoreEncryptionKey)grpKey.key(); assertNotNull(key); assertNotNull(key.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java index 9107ddf39947c..dd2a50a5c2cae 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheRestartTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.encryption; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionKey; @@ -48,7 +49,8 @@ public void testCreateEncryptedCache() throws Exception { int grpId = CU.cacheGroupId(cacheName(), null); - KeystoreEncryptionKey keyBeforeRestart = (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + KeystoreEncryptionKey keyBeforeRestart = + (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId).key(); stopAllGrids(); @@ -56,7 +58,11 @@ public void testCreateEncryptedCache() throws Exception { checkEncryptedCaches(grids.get1(), grids.get2()); - KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grids.get1().context().encryption().groupKey(grpId); + GroupKey grpKeyAfterRestart = grids.get1().context().encryption().groupKey(grpId); + + assertNotNull(grpKeyAfterRestart); + + KeystoreEncryptionKey keyAfterRestart = (KeystoreEncryptionKey)grpKeyAfterRestart.key(); assertNotNull(keyAfterRestart); assertNotNull(keyAfterRestart.key()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java index ab5bf521ae987..5844b5e481c6f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptionMXBeanTest.java @@ -20,10 +20,12 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.managers.encryption.EncryptionMXBeanImpl; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.mxbean.EncryptionMXBean; import org.junit.Test; import static org.apache.ignite.cluster.ClusterState.ACTIVE_READ_ONLY; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.INITIAL_KEY_ID; import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -110,6 +112,28 @@ public void testMasterKeyChangeOnInactiveAndReadonlyCluster() throws Exception { assertEquals(MASTER_KEY_NAME_2, grid0.encryption().getMasterKeyName()); } + /** @throws Exception If failed. */ + @Test + public void testCacheGroupKeyChange() throws Exception { + IgniteEx ignite = startGrid(GRID_0); + + ignite.cluster().active(true); + + createEncryptedCache(ignite, null, cacheName(), null); + + EncryptionMXBean mBean = getMBean(GRID_0); + + int grpId = CU.cacheId(cacheName()); + + assertEquals(INITIAL_KEY_ID, ignite.context().encryption().groupKey(grpId).id()); + + mBean.changeCacheGroupKey(cacheName()); + + assertEquals(INITIAL_KEY_ID + 1, ignite.context().encryption().groupKey(grpId).id()); + + checkGroupKey(grpId, INITIAL_KEY_ID + 1, getTestTimeout()); + } + /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { cleanPersistenceDir(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java index 81d489f6da527..91dcd052b05d7 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/MasterKeyChangeTest.java @@ -42,7 +42,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_MASTER_KEY_NAME_TO_CHANGE_BEFORE_STARTUP; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; -import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEY_PREFIX; +import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.ENCRYPTION_KEYS_PREFIX; import static org.apache.ignite.internal.managers.encryption.GridEncryptionManager.MASTER_KEY_NAME_PREFIX; import static org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi.DEFAULT_MASTER_KEY_NAME; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -59,6 +59,7 @@ public class MasterKeyChangeTest extends AbstractEncryptionTest { IgniteConfiguration cfg = super.getConfiguration(name); cfg.setCommunicationSpi(new TestRecordingCommunicationSpi()); + cfg.setConsistentId(name); return cfg; } @@ -315,14 +316,14 @@ public void testRecoveryFromWalWithCacheOperations() throws Exception { DynamicCacheDescriptor desc = grid0.context().cache().cacheDescriptor(cacheName()); - Serializable oldKey = metaStorage.read(ENCRYPTION_KEY_PREFIX + desc.groupId()); + Serializable oldKey = metaStorage.read(ENCRYPTION_KEYS_PREFIX + desc.groupId()); assertNotNull(oldKey); dbMgr.checkpointReadLock(); // 6. Simulate group key write error to MetaStore for one node to check recovery from WAL. - metaStorage.write(ENCRYPTION_KEY_PREFIX + desc.groupId(), new byte[0]); + metaStorage.write(ENCRYPTION_KEYS_PREFIX + desc.groupId(), new byte[0]); dbMgr.checkpointReadUnlock(); @@ -468,7 +469,7 @@ public void testMultiByteMasterKeyNameWalRecovery() throws Exception { } /** {@inheritDoc} */ - @Override protected void afterTest() throws Exception { + @Override protected void beforeTest() throws Exception { stopAllGrids(); cleanPersistenceDir(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java index b40d6c56d0179..3dedd8feae1c8 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java @@ -164,6 +164,11 @@ public class NoOpWALManager implements IgniteWriteAheadLogManager { // No-op. } + /** {@inheritDoc} */ + @Override public long currentSegment() { + return 0; + } + /** {@inheritDoc} */ @Override public int walArchiveSegments() { return 0; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java new file mode 100644 index 0000000000000..c136cb8f9db5b --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.util; + +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.testframework.GridTestUtils; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/** + * Rate limiter tests. + */ +public class BasicRateLimiterTest { + /** + * Check change speed at runtime. + */ + @Test + public void checkSpeedLimitChange() throws IgniteInterruptedCheckedException { + BasicRateLimiter limiter = new BasicRateLimiter(2); + + checkRate(limiter, 10); + + limiter.setRate(3); + + checkRate(limiter, 15); + + limiter.setRate(0.5); + + checkRate(limiter, 5); + } + + /** + * Check the average rate of the limiter. + * + * @param limiter Rate limiter. + * @param totalOps Number of operations. + */ + private void checkRate(BasicRateLimiter limiter, int totalOps) throws IgniteInterruptedCheckedException { + double permitsPerSec = limiter.getRate(); + long startTime = System.currentTimeMillis(); + + for (int i = 0; i < totalOps; i++) + limiter.acquire(1); + + long timeSpent = System.currentTimeMillis() - startTime; + + // Rate limiter aims for an average rate of permits per second. + assertEquals(1, Math.round((double)timeSpent / 1000 / totalOps * permitsPerSec)); + } + + /** + * Check rate limit with multiple threads. + */ + @Test + public void checkLimitMultithreaded() throws Exception { + int permitsPerSec = 1_000; + int totalOps = 10_000; + + BasicRateLimiter limiter = new BasicRateLimiter(permitsPerSec); + + int threads = Runtime.getRuntime().availableProcessors(); + + CyclicBarrier ready = new CyclicBarrier(threads + 1); + + AtomicInteger cntr = new AtomicInteger(); + + IgniteInternalFuture fut = GridTestUtils.runMultiThreadedAsync(() -> { + ready.await(); + + do { + limiter.acquire(1); + } + while (!Thread.currentThread().isInterrupted() && cntr.incrementAndGet() < totalOps); + + return null; + }, threads, "worker"); + + ready.await(); + + long startTime = System.currentTimeMillis(); + + fut.get(); + + long timeSpent = System.currentTimeMillis() - startTime; + + // Rate limiter aims for an average rate of permits per second. + assertEquals(1, Math.round((double)timeSpent / 1000 / totalOps * permitsPerSec)); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java index 7e557a1ec7281..8a35caa51e70e 100755 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java @@ -107,6 +107,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager; import org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2; import org.apache.ignite.internal.processors.service.IgniteServiceProcessor; +import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.G; @@ -2559,6 +2560,8 @@ private boolean validateMetricsMethod(Method m) { * @throws IgniteCheckedException If failed. */ protected void enableCheckpoints(Collection nodes, boolean enable) throws IgniteCheckedException { + GridCompoundFuture fut = new GridCompoundFuture<>(); + for (Ignite node : nodes) { assert !node.cluster().localNode().isClient(); @@ -2569,8 +2572,12 @@ protected void enableCheckpoints(Collection nodes, boolean enable) throw GridCacheDatabaseSharedManager dbMgr0 = (GridCacheDatabaseSharedManager) dbMgr; - dbMgr0.enableCheckpoints(enable).get(); + fut.add(dbMgr0.enableCheckpoints(enable)); } + + fut.markInitialized(); + + fut.get(); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java index f5da43d5a65aa..27aa08a7a5973 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/wal/record/RecordUtils.java @@ -26,12 +26,13 @@ import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; import org.apache.ignite.internal.pagemem.wal.record.DataRecord; import org.apache.ignite.internal.pagemem.wal.record.ExchangeRecord; -import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecord; +import org.apache.ignite.internal.pagemem.wal.record.MasterKeyChangeRecordV2; import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord; import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord; import org.apache.ignite.internal.pagemem.wal.record.MvccDataRecord; import org.apache.ignite.internal.pagemem.wal.record.MvccTxRecord; import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot; +import org.apache.ignite.internal.pagemem.wal.record.ReencryptionStartRecord; import org.apache.ignite.internal.pagemem.wal.record.RollbackRecord; import org.apache.ignite.internal.pagemem.wal.record.SnapshotRecord; import org.apache.ignite.internal.pagemem.wal.record.SwitchSegmentRecord; @@ -55,12 +56,14 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateIndexDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastAllocatedIndex; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulFullSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateNextSnapshotId; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV2; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecordV3; import org.apache.ignite.internal.pagemem.wal.record.delta.NewRootInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PageListMetaResetCountRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListAddPageRecord; @@ -110,11 +113,15 @@ import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_PAGE_UPDATE_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_DATA_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ENCRYPTED_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.EXCHANGE; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.HEADER_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.INDEX_META_PAGE_DELTA_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.INIT_NEW_PAGE_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MASTER_KEY_CHANGE_RECORD_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.MEMORY_RECOVERY; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.METASTORE_DATA_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.META_PAGE_INIT; @@ -136,9 +143,11 @@ import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PAGE_LIST_META_RESET_COUNT_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PAGE_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_DESTROY; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_META_PAGE_DELTA_RECORD_V3; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_META_PAGE_UPDATE_COUNTERS; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PARTITION_META_PAGE_UPDATE_COUNTERS_V2; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.PART_META_UPDATE_STATE; +import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.REENCRYPTION_START_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.RESERVED; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ROLLBACK_TX_RECORD; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.ROTATED_ID_PART_RECORD; @@ -189,6 +198,7 @@ public class RecordUtils { put(PAGES_LIST_ADD_PAGE, RecordUtils::buildPagesListAddPageRecord); put(PAGES_LIST_REMOVE_PAGE, RecordUtils::buildPagesListRemovePageRecord); put(META_PAGE_INIT, RecordUtils::buildMetaPageInitRecord); + put(INDEX_META_PAGE_DELTA_RECORD, RecordUtils::buildMetaPageIndexDeltaRecord); put(PARTITION_META_PAGE_UPDATE_COUNTERS, RecordUtils::buildMetaPageUpdatePartitionDataRecord); put(MEMORY_RECOVERY, RecordUtils::buildMemoryRecoveryRecord); put(TRACKING_PAGE_DELTA, RecordUtils::buildTrackingPageDeltaRecord); @@ -209,13 +219,18 @@ public class RecordUtils { put(RESERVED, RecordUtils::buildReservedRecord); put(ROLLBACK_TX_RECORD, RecordUtils::buildRollbackRecord); put(PARTITION_META_PAGE_UPDATE_COUNTERS_V2, RecordUtils::buildMetaPageUpdatePartitionDataRecordV2); + put(PARTITION_META_PAGE_DELTA_RECORD_V3, RecordUtils::buildMetaPageUpdatePartitionDataRecordV3); put(MASTER_KEY_CHANGE_RECORD, RecordUtils::buildMasterKeyChangeRecord); + put(MASTER_KEY_CHANGE_RECORD_V2, RecordUtils::buildMasterKeyChangeRecordV2); + put(REENCRYPTION_START_RECORD, RecordUtils::buildEncryptionStatusRecord); put(ROTATED_ID_PART_RECORD, RecordUtils::buildRotatedIdPartRecord); put(MVCC_DATA_PAGE_MARK_UPDATED_RECORD, RecordUtils::buildDataPageMvccMarkUpdatedRecord); put(MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD, RecordUtils::buildDataPageMvccUpdateTxStateHintRecord); put(MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD, RecordUtils::buildDataPageMvccUpdateNewTxStateHintRecord); put(ENCRYPTED_RECORD, RecordUtils::buildEncryptedRecord); put(ENCRYPTED_DATA_RECORD, RecordUtils::buildEncryptedDataRecord); + put(ENCRYPTED_RECORD_V2, RecordUtils::buildEncryptedRecordV2); + put(ENCRYPTED_DATA_RECORD_V2, RecordUtils::buildEncryptedDataRecordV2); put(MVCC_DATA_RECORD, RecordUtils::buildMvccDataRecord); put(MVCC_TX_RECORD, RecordUtils::buildMvccTxRecord); put(CONSISTENT_CUT, RecordUtils::buildConsistentCutRecord); @@ -409,6 +424,11 @@ public static MetaPageInitRecord buildMetaPageInitRecord() { return new MetaPageInitRecord(1, 1, 1, 1, 1, 1); } + /** **/ + public static MetaPageUpdateIndexDataRecord buildMetaPageIndexDeltaRecord() { + return new MetaPageUpdateIndexDataRecord(1, 1, 0, 0); + } + /** **/ public static MetaPageUpdatePartitionDataRecord buildMetaPageUpdatePartitionDataRecord() { return new MetaPageUpdatePartitionDataRecord(1, 1, 1, 1, 1, 1, (byte)1, 1); @@ -514,8 +534,23 @@ public static MetaPageUpdatePartitionDataRecordV2 buildMetaPageUpdatePartitionDa } /** **/ - public static MasterKeyChangeRecord buildMasterKeyChangeRecord() { - return new MasterKeyChangeRecord("", new HashMap<>()); + public static MetaPageUpdatePartitionDataRecordV3 buildMetaPageUpdatePartitionDataRecordV3() { + return new MetaPageUpdatePartitionDataRecordV3(1, 1, 1, 1, 1, 1, (byte)1, 1, 1, 0, 0); + } + + /** **/ + public static UnsupportedWalRecord buildMasterKeyChangeRecord() { + return new UnsupportedWalRecord(MASTER_KEY_CHANGE_RECORD); + } + + /** **/ + public static MasterKeyChangeRecordV2 buildMasterKeyChangeRecordV2() { + return new MasterKeyChangeRecordV2("", Collections.emptyList()); + } + + /** **/ + public static ReencryptionStartRecord buildEncryptionStatusRecord() { + return new ReencryptionStartRecord(Collections.emptyMap()); } /** **/ @@ -548,6 +583,16 @@ public static UnsupportedWalRecord buildEncryptedDataRecord() { return new UnsupportedWalRecord(ENCRYPTED_DATA_RECORD); } + /** **/ + public static UnsupportedWalRecord buildEncryptedRecordV2() { + return new UnsupportedWalRecord(ENCRYPTED_RECORD_V2); + } + + /** **/ + public static UnsupportedWalRecord buildEncryptedDataRecordV2() { + return new UnsupportedWalRecord(ENCRYPTED_DATA_RECORD_V2); + } + /** **/ public static MvccDataRecord buildMvccDataRecord() { return new MvccDataRecord(Collections.emptyList(), 1); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java index 210b1c7e85092..6addefe5f9891 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java @@ -24,6 +24,8 @@ import org.apache.ignite.internal.ClusterBaselineNodesMetricsSelfTest; import org.apache.ignite.internal.GridNodeMetricsLogPdsSelfTest; import org.apache.ignite.internal.cluster.IgniteClusterIdTagTest; +import org.apache.ignite.internal.encryption.CacheGroupKeyChangeTest; +import org.apache.ignite.internal.encryption.CacheGroupReencryptionTest; import org.apache.ignite.internal.encryption.EncryptedCacheBigEntryTest; import org.apache.ignite.internal.encryption.EncryptedCacheCreateTest; import org.apache.ignite.internal.encryption.EncryptedCacheDestroyTest; @@ -74,6 +76,9 @@ MasterKeyChangeTest.class, MasterKeyChangeConsistencyCheckTest.class, + CacheGroupKeyChangeTest.class, + CacheGroupReencryptionTest.class, + EncryptionMXBeanTest.class, IgniteSnapshotManagerSelfTest.class, diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java index e45ac074be172..1e110b1fba6ff 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java @@ -20,6 +20,7 @@ import org.apache.ignite.internal.IgniteVersionUtilsSelfTest; import org.apache.ignite.internal.pagemem.impl.PageIdUtilsSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheUtilsSelfTest; +import org.apache.ignite.internal.util.BasicRateLimiterTest; import org.apache.ignite.internal.util.DistributedProcessCoordinatorLeftTest; import org.apache.ignite.internal.util.GridArraysSelfTest; import org.apache.ignite.internal.util.GridConcurrentMultiPairQueueTest; @@ -137,7 +138,9 @@ GridCountDownCallbackTest.class, - DistributedProcessCoordinatorLeftTest.class + DistributedProcessCoordinatorLeftTest.class, + + BasicRateLimiterTest.class }) public class IgniteUtilSelfTestSuite { } diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java index c4b8d0f42015f..1074af312ed60 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java @@ -17,6 +17,7 @@ package org.apache.ignite.testsuites; +import org.apache.ignite.internal.encryption.CacheGroupReencryptionTest; import org.apache.ignite.internal.processors.cache.IgnitePdsSingleNodeWithIndexingAndGroupPutGetPersistenceSelfTest; import org.apache.ignite.internal.processors.cache.IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest; import org.apache.ignite.internal.processors.cache.index.ClientReconnectWithSqlTableConfiguredTest; @@ -54,7 +55,8 @@ RebuildIndexTest.class, IgniteClusterSnapshotWithIndexesTest.class, ClientReconnectWithSqlTableConfiguredTest.class, - MultipleParallelCacheDeleteDeadlockTest.class + MultipleParallelCacheDeleteDeadlockTest.class, + CacheGroupReencryptionTest.class }) public class IgnitePdsWithIndexingTestSuite { } diff --git a/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java b/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java index bad97a504032a..8866ab64fe440 100644 --- a/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java +++ b/modules/spring/src/test/java/org/apache/ignite/encryption/SpringEncryptedCacheRestartTest.java @@ -24,6 +24,7 @@ import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgnitionEx; import org.apache.ignite.internal.encryption.EncryptedCacheRestartTest; +import org.apache.ignite.internal.managers.encryption.GroupKey; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.typedef.T2; @@ -81,13 +82,21 @@ public void testEncryptionKeysEqualsOnThirdNodeJoin() throws Exception { int grpId = CU.cacheGroupId(enc.name(), enc.configuration().getGroupName()); - KeystoreEncryptionKey key0 = (KeystoreEncryptionKey)g.get1().context().encryption().groupKey(grpId); - KeystoreEncryptionKey key1 = (KeystoreEncryptionKey)g.get2().context().encryption().groupKey(grpId); - KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)g2.context().encryption().groupKey(grpId); + GroupKey grpKey0 = g.get1().context().encryption().groupKey(grpId); + GroupKey grpKey1 = g.get2().context().encryption().groupKey(grpId); + GroupKey grpKey2 = g2.context().encryption().groupKey(grpId); - assertNotNull(cacheName, key0); - assertNotNull(cacheName, key1); - assertNotNull(cacheName, key2); + assertNotNull(cacheName, grpKey0); + assertNotNull(cacheName, grpKey1); + assertNotNull(cacheName, grpKey2); + + KeystoreEncryptionKey key0 = (KeystoreEncryptionKey)grpKey0.key(); + KeystoreEncryptionKey key1 = (KeystoreEncryptionKey)grpKey1.key(); + KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)grpKey2.key(); + + assertNotNull(cacheName, key0.key()); + assertNotNull(cacheName, key1.key()); + assertNotNull(cacheName, key2.key()); assertNotNull(cacheName, key0.key()); assertNotNull(cacheName, key1.key()); @@ -121,15 +130,23 @@ public void testCreateEncryptedCacheGroup() throws Exception { assertNotNull(encrypted2); - KeystoreEncryptionKey key = (KeystoreEncryptionKey)g0.context().encryption().groupKey( + GroupKey grpKey = g0.context().encryption().groupKey( CU.cacheGroupId(encrypted.name(), encrypted.configuration().getGroupName())); + assertNotNull(grpKey); + + KeystoreEncryptionKey key = (KeystoreEncryptionKey)grpKey.key(); + assertNotNull(key); assertNotNull(key.key()); - KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)g0.context().encryption().groupKey( + GroupKey grpKey2 = g0.context().encryption().groupKey( CU.cacheGroupId(encrypted2.name(), encrypted2.configuration().getGroupName())); + assertNotNull(grpKey2); + + KeystoreEncryptionKey key2 = (KeystoreEncryptionKey)grpKey2.key(); + assertNotNull(key2); assertNotNull(key2.key()); From ff0e4cb616f331d042aa463621554e73b3880fae Mon Sep 17 00:00:00 2001 From: Aleksey Plekhanov Date: Tue, 27 Oct 2020 22:44:28 +0300 Subject: [PATCH 004/110] Fix imports. --- .../query/ScanQueryConcurrentUpdatesAbstractTest.java | 11 +++++------ .../cache/query/ScanQueryConcurrentUpdatesTest.java | 5 ++--- .../query/ScanQueryConcurrentSqlUpdatesTest.java | 5 ++--- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java index 502a628d3a4e9..87a51d85cfccb 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesAbstractTest.java @@ -17,6 +17,11 @@ package org.apache.ignite.internal.processors.cache.query; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import javax.cache.Cache; +import javax.cache.expiry.Duration; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.query.ScanQuery; @@ -25,12 +30,6 @@ import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; -import javax.cache.Cache; -import javax.cache.expiry.Duration; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - /** * A base for tests that check the behaviour of scan queries run on a data set that is modified concurrently. * Actual tests should implement a way of cache creation, modification and destruction. diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java index 17dd603f26eaa..598e89e49938e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentUpdatesTest.java @@ -17,13 +17,12 @@ package org.apache.ignite.internal.processors.cache.query; +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.configuration.CacheConfiguration; -import javax.cache.expiry.CreatedExpiryPolicy; -import javax.cache.expiry.Duration; - /** * {@link ScanQueryConcurrentUpdatesAbstractTest} with caches created, updates and destroyed using Java API. */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java index bb6753541fc6f..9459134a3808f 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/query/ScanQueryConcurrentSqlUpdatesTest.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.processors.cache.query; +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.QueryEntity; @@ -24,9 +26,6 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; -import javax.cache.expiry.CreatedExpiryPolicy; -import javax.cache.expiry.Duration; - /** * {@link ScanQueryConcurrentUpdatesAbstractTest} with caches created, updates and destroyed using SQL DDL queries. */ From 12ab7e0de6cb5a579f7350079aeb53ea8706b0f2 Mon Sep 17 00:00:00 2001 From: Aleksey Plekhanov Date: Wed, 28 Oct 2020 15:02:27 +0300 Subject: [PATCH 005/110] IGNITE-12451 Introduce deadlock detection for atomic cache putAll operations - Fixes #8268. Signed-off-by: Aleksey Plekhanov --- .../processors/cache/LockedEntriesInfo.java | 150 ++++++++++++++++++ .../dht/atomic/GridDhtAtomicCache.java | 39 +---- .../local/atomic/GridLocalAtomicCache.java | 38 ++--- ...tomicConcurrentUnorderedUpdateAllTest.java | 133 ++++++++++++++++ .../testsuites/IgniteCacheMvccTestSuite1.java | 2 + .../testsuites/IgniteCacheTestSuite.java | 2 + 6 files changed, 309 insertions(+), 55 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LockedEntriesInfo.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LockedEntriesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LockedEntriesInfo.java new file mode 100644 index 0000000000000..039c6bd49cd22 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LockedEntriesInfo.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Helper class to acquire java level locks on unordered set of entries and avoid deadlocks. + */ +public class LockedEntriesInfo { + /** Deadlock detection timeout in milliseconds. */ + private static final long DEADLOCK_DETECTION_TIMEOUT = 500L; + + /** Locked entries info for each thread. */ + private final Map lockedEntriesPerThread = new ConcurrentHashMap<>(); + + /** + * Attempt to lock all provided entries avoiding deadlocks. + * + * @param entries Entries to lock. + * @return {@code True} if entries were successfully locked, {@code false} if possible deadlock detected or + * some entries are obsolete (lock attempt should be retried in this case). + */ + public boolean tryLockEntries(GridCacheEntryEx[] entries) { + long threadId = Thread.currentThread().getId(); + + LockedEntries lockedEntries = new LockedEntries(entries); + + lockedEntriesPerThread.put(threadId, lockedEntries); + + boolean wasInterrupted = false; + + try { + for (int i = 0; i < entries.length; i++) { + GridCacheEntryEx entry = entries[i]; + + if (entry == null) + continue; + + boolean retry = false; + + while (true) { + if (entry.tryLockEntry(DEADLOCK_DETECTION_TIMEOUT)) + break; // Successfully locked. + else { + wasInterrupted |= Thread.interrupted(); // Clear thread interruption flag. + + if (hasLockCollisions(entry, lockedEntries)) { + // Possible deadlock detected, unlock all locked entries and retry again. + retry = true; + + break; + } + // Possible deadlock not detected, just retry lock on current entry. + } + } + + if (!retry && entry.obsolete()) { + entry.unlockEntry(); + + retry = true; + } + + if (retry) { + lockedEntries.lockedIdx = -1; + + // Unlock all previously locked. + for (int j = 0; j < i; j++) { + if (entries[j] != null) + entries[j].unlockEntry(); + } + + return false; + } + + lockedEntries.lockedIdx = i; + } + + return true; + } + finally { + if (wasInterrupted) + Thread.currentThread().interrupt(); + + // Already acuired all locks or released all locks here, deadlock is not possible by this thread anymore, + // can safely delete locks information. + lockedEntriesPerThread.remove(threadId); + } + } + + /** + * @param entry Entry. + * @param curLockedEntries Current locked entries info. + * @return {@code True} if another thread holds lock for this entry and started to lock entries earlier. + */ + private boolean hasLockCollisions(GridCacheEntryEx entry, LockedEntries curLockedEntries) { + for (Map.Entry other : lockedEntriesPerThread.entrySet()) { + LockedEntries otherLockedEntries = other.getValue(); + + if (otherLockedEntries == curLockedEntries || otherLockedEntries.ts > curLockedEntries.ts) + // Skip current thread and threads started to lock after the current thread. + continue; + + GridCacheEntryEx[] otherThreadLocks = otherLockedEntries.entries; + + int otherThreadLockedIdx = otherLockedEntries.lockedIdx; + + // Visibility guarantees provided by volatile lockedIdx field. + for (int i = 0; i <= otherThreadLockedIdx; i++) { + if (otherThreadLocks[i] == entry) + return true; + } + } + + return false; + } + + /** Per-thread locked entries info. */ + private static class LockedEntries { + /** Timestamp of lock. */ + private final long ts = System.nanoTime(); + + /** Entries to lock. */ + private final GridCacheEntryEx[] entries; + + /** Current locked entry index. */ + private volatile int lockedIdx = -1; + + /** */ + private LockedEntries(GridCacheEntryEx[] entries) { + this.entries = entries; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java index 5249aa98836f5..fec3967d92d03 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java @@ -65,6 +65,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult; import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy; import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.LockedEntriesInfo; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry; @@ -168,6 +169,9 @@ public class GridDhtAtomicCache extends GridDhtCacheAdapter { } }; + /** Locked entries info for each thread. */ + private final LockedEntriesInfo lockedEntriesInfo = new LockedEntriesInfo(); + /** Update reply closure. */ @GridToStringExclude private UpdateReplyClosure updateReplyClos; @@ -3111,44 +3115,17 @@ private List lockEntries(GridNearAtomicAbstractUpdateRequest } } else { - List locked = new ArrayList<>(req.size()); + GridDhtCacheEntry[] locked = new GridDhtCacheEntry[req.size()]; while (true) { for (int i = 0; i < req.size(); i++) { GridDhtCacheEntry entry = entryExx(req.key(i), topVer); - locked.add(entry); - } - - boolean retry = false; - - for (int i = 0; i < locked.size(); i++) { - GridCacheMapEntry entry = locked.get(i); - - if (entry == null) - continue; - - entry.lockEntry(); - - if (entry.obsolete()) { - // Unlock all locked. - for (int j = 0; j <= i; j++) { - if (locked.get(j) != null) - locked.get(j).unlockEntry(); - } - - // Clear entries. - locked.clear(); - - // Retry. - retry = true; - - break; - } + locked[i] = entry; } - if (!retry) - return locked; + if (lockedEntriesInfo.tryLockEntries(locked)) + return Arrays.asList(locked); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java index 6b2128de70815..e430a7183d5d9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java @@ -54,6 +54,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheReturn; import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy; import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.LockedEntriesInfo; import org.apache.ignite.internal.processors.cache.local.GridLocalCache; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx; @@ -90,6 +91,9 @@ public class GridLocalAtomicCache extends GridLocalCache { /** */ private GridCachePreloader preldr; + /** Locked entries info for each thread. */ + private final LockedEntriesInfo lockedEntriesInfo = new LockedEntriesInfo(); + /** * Empty constructor required by {@link Externalizable}. */ @@ -1476,11 +1480,13 @@ else if (op == UPDATE) { * @return Collection of locked entries. */ private List lockEntries(Collection keys) { - List locked = new ArrayList<>(keys.size()); + GridCacheEntryEx[] locked = new GridCacheEntryEx[keys.size()]; boolean nullKeys = false; while (true) { + int i = 0; + for (K key : keys) { if (key == null) { nullKeys = true; @@ -1490,40 +1496,24 @@ private List lockEntries(Collection keys) { GridCacheEntryEx entry = entryEx(ctx.toCacheKeyObject(key)); - locked.add(entry); + locked[i++] = entry; } if (nullKeys) break; - for (int i = 0; i < locked.size(); i++) { - GridCacheEntryEx entry = locked.get(i); - - entry.lockEntry(); - - if (entry.obsolete()) { - // Unlock all locked. - for (int j = 0; j <= i; j++) - locked.get(j).unlockEntry(); - - // Clear entries. - locked.clear(); - - // Retry. - break; - } - } - - if (!locked.isEmpty()) - return locked; + if (lockedEntriesInfo.tryLockEntries(locked)) + return Arrays.asList(locked); } assert nullKeys; AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion(); - for (GridCacheEntryEx entry : locked) - entry.touch(); + for (GridCacheEntryEx entry : locked) { + if (entry != null) + entry.touch(); + } throw new NullPointerException("Null key."); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java new file mode 100644 index 0000000000000..6863c94930ee1 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicInteger; +import javax.cache.configuration.Factory; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.store.CacheStore; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; + +/** Test concurrent putAll/removeAll operations with unordered set of keys on atomic caches. */ +@RunWith(Parameterized.class) +public class IgniteCacheAtomicConcurrentUnorderedUpdateAllTest extends GridCommonAbstractTest { + /** */ + private static final int NODES_CNT = 3; + + /** */ + private static final int THREADS_CNT = 20; + + /** */ + private static final String CACHE_NAME = "test-cache"; + + /** */ + private static final int CACHE_SIZE = 1_000; + + /** Parameters. */ + @Parameterized.Parameters(name = "cacheMode={0}, writeThrough={1}") + public static Iterable data() { + return Arrays.asList( + new Object[] {CacheMode.PARTITIONED, Boolean.FALSE}, + new Object[] {CacheMode.PARTITIONED, Boolean.TRUE}, + new Object[] {CacheMode.REPLICATED, Boolean.FALSE}, + new Object[] {CacheMode.REPLICATED, Boolean.TRUE}, + new Object[] {CacheMode.LOCAL, Boolean.FALSE}, + new Object[] {CacheMode.LOCAL, Boolean.TRUE} + ); + } + + /** Cache mode. */ + @Parameterized.Parameter() + public CacheMode cacheMode; + + /** Write through. */ + @Parameterized.Parameter(1) + public Boolean writeThrough; + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testConcurrentUpdateAll() throws Exception { + Ignite ignite = startGridsMultiThreaded(NODES_CNT); + + Factory> cacheStoreFactory = writeThrough ? + new MapCacheStoreStrategy.MapStoreFactory() : null; + + IgniteCache cache = ignite.createCache(new CacheConfiguration<>(CACHE_NAME) + .setWriteThrough(writeThrough).setCacheStoreFactory(cacheStoreFactory) + .setCacheMode(cacheMode).setAtomicityMode(ATOMIC).setBackups(1)); + + CyclicBarrier barrier = new CyclicBarrier(THREADS_CNT); + + AtomicInteger threadCnt = new AtomicInteger(); + + GridTestUtils.runMultiThreaded(() -> { + int threadIdx = threadCnt.incrementAndGet(); + + IgniteCache cache0 = grid(ThreadLocalRandom.current().nextInt(NODES_CNT)).cache(CACHE_NAME); + + Map map = new LinkedHashMap<>(); + + if (threadIdx % 2 == 0) { + for (int i = 0; i < CACHE_SIZE; i++) + map.put(i, i); + } else { + for (int i = CACHE_SIZE - 1; i >= 0; i--) + map.put(i, i); + } + + for (int i = 0; i < 20; i++) { + try { + barrier.await(); + } catch (Exception e) { + fail(e.getMessage()); + } + + cache0.putAll(map); + + cache0.removeAll(map.keySet()); + + log.info("Thread " + threadIdx + " iteration " + i + " finished"); + } + }, THREADS_CNT, "update-all-runner"); + + assertEquals(0, cache.size()); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java index 52ca6487577e2..e0589f73cb2bd 100755 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite1.java @@ -63,6 +63,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheStopSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheTcpClientDiscoveryMultiThreadedTest; import org.apache.ignite.internal.processors.cache.GridDataStorageConfigurationConsistencySelfTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicConcurrentUnorderedUpdateAllTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalWithStoreInvokeTest; @@ -151,6 +152,7 @@ public static List> suite() { ignoredTests.add(IgniteCacheAtomicWithStoreInvokeTest.class); ignoredTests.add(IgniteCacheAtomicLocalInvokeTest.class); ignoredTests.add(IgniteCacheAtomicLocalWithStoreInvokeTest.class); + ignoredTests.add(IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.class); ignoredTests.add(GridCachePartitionedLocalStoreSelfTest.class); ignoredTests.add(GridCacheReplicatedLocalStoreSelfTest.class); ignoredTests.add(CacheStoreReadFromBackupTest.class); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java index 5876beef2ce69..f89759a2d0eec 100755 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java @@ -102,6 +102,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheTxPartitionedLocalStoreSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheTxUsersAffinityMapperSelfTest; import org.apache.ignite.internal.processors.cache.GridDataStorageConfigurationConsistencySelfTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicConcurrentUnorderedUpdateAllTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalWithStoreInvokeTest; @@ -215,6 +216,7 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicWithStoreInvokeTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicLocalInvokeTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicLocalWithStoreInvokeTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteCacheTxInvokeTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CacheEntryProcessorNonSerializableTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CacheEntryProcessorExternalizableFailedTest.class, ignoredTests); From faf4f467e964d478b3d99b94d43d32430a7e88f0 Mon Sep 17 00:00:00 2001 From: Denis Magda Date: Wed, 28 Oct 2020 12:01:10 -0700 Subject: [PATCH 006/110] ignite docs: fixed broken lings to the SQLLine page --- docs/_docs/monitoring-metrics/system-views.adoc | 2 +- docs/_docs/quick-start/sql.adoc | 2 +- docs/_docs/sql-reference/operational-commands.adoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/_docs/monitoring-metrics/system-views.adoc b/docs/_docs/monitoring-metrics/system-views.adoc index 92b713ff1ff11..1d400c6681800 100644 --- a/docs/_docs/monitoring-metrics/system-views.adoc +++ b/docs/_docs/monitoring-metrics/system-views.adoc @@ -32,7 +32,7 @@ See the link:SQL/schemas[Understanding Schemas] page for the information on how == Querying System Views -To query the system views using the link:sqlline[SQLLine] tool, connect to the SYS schema as follows: +To query the system views using the link:tools/sqlline[SQLLine] tool, connect to the SYS schema as follows: [source, shell] ---- diff --git a/docs/_docs/quick-start/sql.adoc b/docs/_docs/quick-start/sql.adoc index 7d1c3dfe82934..c1d1eed2e13f5 100644 --- a/docs/_docs/quick-start/sql.adoc +++ b/docs/_docs/quick-start/sql.adoc @@ -126,4 +126,4 @@ Easy! From here, you may want to: * Read more about using Ignite and link:SQL/sql-introduction[SQL] -* Read more about using link:sqlline[sqlline] +* Read more about using link:tools/sqlline[sqlline] diff --git a/docs/_docs/sql-reference/operational-commands.adoc b/docs/_docs/sql-reference/operational-commands.adoc index f5dea2254ce6d..be7223f6bb51f 100644 --- a/docs/_docs/sql-reference/operational-commands.adoc +++ b/docs/_docs/sql-reference/operational-commands.adoc @@ -115,7 +115,7 @@ While streaming mode allows you to load data much faster than other data loading 2. Due to streaming mode's asynchronous nature, you cannot know update counts for every statement executed; all JDBC/ODBC commands returning update counts will return 0. === Example -As an example, you can use the sample world.sql file that is shipped with the latest Ignite distribution. It can be found in the `{IGNITE_HOME}/examples/sql/` directory. You can use the `run` command from link:sqlline[SQLLine, window=_blank], as shown below: +As an example, you can use the sample world.sql file that is shipped with the latest Ignite distribution. It can be found in the `{IGNITE_HOME}/examples/sql/` directory. You can use the `run` command from tools/sqlline[SQLLine, window=_blank], as shown below: [source,shell] ---- From 0c72bd21e751497d137c74c7e132b6aff314ed9e Mon Sep 17 00:00:00 2001 From: Aleksey Plekhanov Date: Thu, 29 Oct 2020 16:13:08 +0300 Subject: [PATCH 007/110] IGNITE-13008 Java thin client: Add compatibility tests - Fixes #8250. Signed-off-by: Aleksey Plekhanov --- .../AbstractClientCompatibilityTest.java | 247 +++++++++++ .../clients/JavaThinCompatibilityTest.java | 415 ++++++++++++++++++ .../clients/JdbcThinCompatibilityTest.java | 82 ++++ .../{jdbc => clients}/package-info.java | 4 +- .../jdbc/JdbcThinCompatibilityTest.java | 197 --------- .../IgniteCompatibilityAbstractTest.java | 5 +- .../IgniteCompatibilityBasicTestSuite.java | 6 +- 7 files changed, 753 insertions(+), 203 deletions(-) create mode 100644 modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/AbstractClientCompatibilityTest.java create mode 100644 modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JavaThinCompatibilityTest.java create mode 100644 modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JdbcThinCompatibilityTest.java rename modules/compatibility/src/test/java/org/apache/ignite/compatibility/{jdbc => clients}/package-info.java (87%) delete mode 100644 modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/JdbcThinCompatibilityTest.java diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/AbstractClientCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/AbstractClientCompatibilityTest.java new file mode 100644 index 0000000000000..f1ed37b818c16 --- /dev/null +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/AbstractClientCompatibilityTest.java @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.compatibility.clients; + +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; +import org.apache.ignite.Ignite; +import org.apache.ignite.compatibility.testframework.junits.Dependency; +import org.apache.ignite.compatibility.testframework.junits.IgniteCompatibilityAbstractTest; +import org.apache.ignite.compatibility.testframework.junits.IgniteCompatibilityNodeRunner; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteVersionUtils; +import org.apache.ignite.internal.util.GridJavaProcess; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.lang.IgniteProductVersion; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy; +import org.jetbrains.annotations.NotNull; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests that current client version can connect to the server with specified version and + * specified client version can connect to the current server version. + */ +@RunWith(Parameterized.class) +public abstract class AbstractClientCompatibilityTest extends IgniteCompatibilityAbstractTest { + /** Version 2.5.0. */ + protected static final IgniteProductVersion VER_2_5_0 = IgniteProductVersion.fromString("2.5.0"); + + /** Version 2.7.0. */ + protected static final IgniteProductVersion VER_2_7_0 = IgniteProductVersion.fromString("2.7.0"); + + /** Version 2.8.0. */ + protected static final IgniteProductVersion VER_2_8_0 = IgniteProductVersion.fromString("2.8.0"); + + /** Version 2.9.0. */ + protected static final IgniteProductVersion VER_2_9_0 = IgniteProductVersion.fromString("2.9.0"); + + /** Ignite versions to test. Note: Only released versions or current version should be included to this list. */ + protected static final String[] TESTED_IGNITE_VERSIONS = new String[] { + "2.4.0", + "2.5.0", + "2.6.0", + "2.7.0", + "2.7.5", + "2.7.6", + "2.8.0", + "2.8.1", + "2.9.0", + IgniteVersionUtils.VER_STR + }; + + /** Parameters. */ + @Parameterized.Parameters(name = "Version {0}") + public static Iterable versions() { + return Arrays.stream(TESTED_IGNITE_VERSIONS) + .map(v -> new Object[] {v}) + .collect(Collectors.toList()); + } + + /** Old Ignite version. */ + @Parameterized.Parameter + public String verFormatted; + + /** */ + protected IgniteProductVersion ver; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + ver = IgniteProductVersion.fromString(verFormatted); + } + + /** {@inheritDoc} */ + @Override protected @NotNull Collection getDependencies(String igniteVer) { + Collection dependencies = super.getDependencies(igniteVer); + + dependencies.add(new Dependency("indexing", "ignite-indexing", false)); + + // Add corresponding H2 version. + if (ver.compareTo(VER_2_7_0) < 0) + dependencies.add(new Dependency("h2", "com.h2database", "h2", "1.4.195", false)); + + return dependencies; + } + + /** + * @throws Exception If failed. + */ + @Test + public void testOldClientToCurrentServer() throws Exception { + try (Ignite ignite = startGrid(0)) { + initNode(ignite); + + if (verFormatted.equals(IgniteVersionUtils.VER_STR)) + testClient(verFormatted); + else { + String fileName = IgniteCompatibilityNodeRunner.storeToFile((IgniteInClosure)this::testClient); + + GridJavaProcess proc = GridJavaProcess.exec( + RemoteClientRunner.class.getName(), + IgniteVersionUtils.VER_STR + ' ' + fileName, + log, + log::info, + null, + null, + getProcessProxyJvmArgs(verFormatted), + null + ); + + try { + GridTestUtils.waitForCondition(() -> !proc.getProcess().isAlive(), 5_000L); + + assertEquals(0, proc.getProcess().exitValue()); + } + finally { + if (proc.getProcess().isAlive()) + proc.kill(); + } + } + } + } + + /** + * @throws Exception If failed. + */ + @Test + public void testCurrentClientToOldServer() throws Exception { + IgniteProcessProxy proxy = null; + + try { + if (verFormatted.equals(IgniteVersionUtils.VER_STR)) { + Ignite ignite = startGrid(0); + + initNode(ignite); + } + else { + Ignite ignite = startGrid(1, verFormatted, this::processRemoteConfiguration, this::initNode); + + proxy = IgniteProcessProxy.ignite(ignite.name()); + } + + testClient(verFormatted); + } + finally { + stopAllGrids(); + + if (proxy != null) { + Process proc = proxy.getProcess().getProcess(); + + // We should wait until process exits, or it can affect next tests. + assertTrue(GridTestUtils.waitForCondition(() -> !proc.isAlive(), 5_000L)); + } + } + } + + /** + * Method to initiate server node (node can be local or remote). + * + * @param ignite Ignite. + */ + protected void initNode(Ignite ignite) { + // No-op. + } + + /** + * Method to change remote server node configuration. + * + * @param cfg Ignite configuraion. + */ + protected void processRemoteConfiguration(IgniteConfiguration cfg) { + cfg.setLocalHost("127.0.0.1"); + cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(new TcpDiscoveryVmIpFinder(true))); + } + + /** + * Method to test client operations (client can be local or remote). + * + * @param clientVer Client version. + * @param serverVer Server version. + */ + protected abstract void testClient(IgniteProductVersion clientVer, IgniteProductVersion serverVer) + throws Exception; + + /** + * @param serverVer Server version. + */ + private void testClient(String serverVer) { + try { + IgniteProductVersion clientVer = IgniteVersionUtils.VER; + + X.println(">>> Started client test [clientVer=" + clientVer + ", serverVer=" + serverVer + ']'); + + testClient(clientVer, IgniteProductVersion.fromString(serverVer)); + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Runner class to test client operations from remote JVM process with old Ignite version + * as dependencies in class path. + */ + public static class RemoteClientRunner { + /** */ + public static void main(String[] args) throws Exception { + X.println(GridJavaProcess.PID_MSG_PREFIX + U.jvmPid()); + X.println("Start client connection with Ignite version: " + IgniteVersionUtils.VER); + + if (args.length < 2) + throw new IllegalArgumentException("At least 2 arguments expected: [version] [path/to/closure/file]"); + + String ver = args[0]; + String fileName = args[1]; + + IgniteInClosure clo = IgniteCompatibilityNodeRunner.readClosureFromFileAndDelete(fileName); + + clo.apply(ver); + + X.println("Success"); + } + } +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JavaThinCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JavaThinCompatibilityTest.java new file mode 100644 index 0000000000000..0b870bf7e0dc9 --- /dev/null +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JavaThinCompatibilityTest.java @@ -0,0 +1,415 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.compatibility.clients; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.cache.Cache; +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteBinary; +import org.apache.ignite.IgniteException; +import org.apache.ignite.Ignition; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.client.ClientCache; +import org.apache.ignite.client.ClientCacheConfiguration; +import org.apache.ignite.client.ClientTransaction; +import org.apache.ignite.client.IgniteClient; +import org.apache.ignite.client.Person; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.compute.ComputeJob; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.compute.ComputeTaskAdapter; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.ThinClientConfiguration; +import org.apache.ignite.internal.processors.platform.cache.expiry.PlatformExpiryPolicy; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.lang.IgniteProductVersion; +import org.apache.ignite.services.Service; +import org.apache.ignite.services.ServiceContext; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.junit.Assume; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests java thin client compatibility. This test only checks that thin client can perform basic operations with + * different client and server versions. Whole API not checked, corner cases not checked. + */ +@RunWith(Parameterized.class) +public class JavaThinCompatibilityTest extends AbstractClientCompatibilityTest { + /** Thin client endpoint. */ + private static final String ADDR = "127.0.0.1:10800"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName).setClientConnectorConfiguration( + new ClientConnectorConfiguration().setThinClientConfiguration( + new ThinClientConfiguration().setMaxActiveComputeTasksPerConnection(1) + ) + ); + } + + /** {@inheritDoc} */ + @Override protected void initNode(Ignite ignite) { + ignite.services().deployNodeSingleton("test_service", new EchoService()); + + super.initNode(ignite); + } + + /** {@inheritDoc} */ + @Override protected void processRemoteConfiguration(IgniteConfiguration cfg) { + super.processRemoteConfiguration(cfg); + + if (ver.compareTo(VER_2_9_0) >= 0) { + cfg.setClientConnectorConfiguration(new ClientConnectorConfiguration() + .setThinClientConfiguration(new ThinClientConfiguration() + .setMaxActiveComputeTasksPerConnection(1))); + } + } + + /** {@inheritDoc} */ + @Override public void testOldClientToCurrentServer() throws Exception { + Assume.assumeTrue("Java thin client exists only from 2.5.0 release", ver.compareTo(VER_2_5_0) >= 0); + + super.testOldClientToCurrentServer(); + } + + /** */ + private void testCacheConfiguration( + boolean checkFieldsPrecessionAndScale, + boolean checkExpiryPlc + ) throws Exception { + X.println(">>>> Testing cache configuration"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + String cacheName = "testCacheConfiguration"; + + ClientCacheConfiguration ccfg = new ClientCacheConfiguration(); + ccfg.setName(cacheName); + ccfg.setBackups(3); + ccfg.setGroupName("cache"); + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity qryEntity = new QueryEntity(int.class.getName(), "Entity") + .setTableName("ENTITY") + .setFields(new LinkedHashMap<>( + F.asMap("id", Integer.class.getName(), "rate", Double.class.getName()))); + + if (checkFieldsPrecessionAndScale) { + qryEntity.setFieldsPrecision(F.asMap("rate", 5)); + qryEntity.setFieldsScale(F.asMap("rate", 2)); + } + + ccfg.setQueryEntities(qryEntity); + + if (checkExpiryPlc) + ccfg.setExpiryPolicy(new PlatformExpiryPolicy(10, 20, 30)); + + client.createCache(ccfg); + + ClientCacheConfiguration ccfg1 = client.cache(cacheName).getConfiguration(); + + assertEquals(ccfg.getName(), ccfg1.getName()); + assertEquals(ccfg.getBackups(), ccfg1.getBackups()); + assertEquals(ccfg.getGroupName(), ccfg1.getGroupName()); + assertEquals(ccfg.getCacheMode(), ccfg1.getCacheMode()); + assertEquals(ccfg.getQueryEntities().length, ccfg1.getQueryEntities().length); + assertEquals(ccfg.getQueryEntities()[0].getTableName(), ccfg1.getQueryEntities()[0].getTableName()); + assertEquals(ccfg.getQueryEntities()[0].getFields(), ccfg1.getQueryEntities()[0].getFields()); + + if (checkFieldsPrecessionAndScale) { + assertEquals(ccfg.getQueryEntities()[0].getFieldsPrecision(), + ccfg1.getQueryEntities()[0].getFieldsPrecision()); + assertEquals(ccfg.getQueryEntities()[0].getFieldsScale(), ccfg1.getQueryEntities()[0].getFieldsScale()); + } + + if (checkExpiryPlc) { + assertEquals(ccfg.getExpiryPolicy().getExpiryForCreation(), + ccfg1.getExpiryPolicy().getExpiryForCreation()); + assertEquals(ccfg.getExpiryPolicy().getExpiryForAccess(), ccfg1.getExpiryPolicy().getExpiryForAccess()); + assertEquals(ccfg.getExpiryPolicy().getExpiryForUpdate(), ccfg1.getExpiryPolicy().getExpiryForUpdate()); + } + } + } + + /** */ + private void testCacheApi() throws Exception { + X.println(">>>> Testing cache API"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache("testCacheApi"); + + cache.put(1, 1); + + assertEquals(1, cache.get(1)); + + Person person = new Person(2, "name"); + + cache.put(2, person); + + assertEquals(person, cache.get(2)); + } + } + + /** */ + private void testAuthentication() throws Exception { + X.println(">>>> Testing authentication"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR) + .setUserName("user").setUserPassword("password"))) { + assertNotNull(client); + } + + } + + /** */ + private void testTransactions() throws Exception { + X.println(">>>> Testing transactions"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache(new ClientCacheConfiguration() + .setName("testTransactions") + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) + ); + + try (ClientTransaction tx = client.transactions().txStart()) { + cache.put(1, 1); + cache.put(2, 2); + + tx.commit(); + } + + assertEquals(1, cache.get(1)); + assertEquals(2, cache.get(2)); + } + } + + /** */ + private void testBinary() throws Exception { + X.println(">>>> Testing binary"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + IgniteBinary binary = client.binary(); + + BinaryObject val = binary.builder("Person") + .setField("id", 1, int.class) + .setField("name", "Joe", String.class) + .build(); + + ClientCache cache = client.getOrCreateCache("testBinary").withKeepBinary(); + + cache.put(0, val); + + BinaryObject cachedVal = cache.get(0); + + assertEquals(val, cachedVal); + } + } + + /** */ + private void testQueries() throws Exception { + X.println(">>>> Testing queries"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache("testQueries"); + + cache.put(1, 1); + + List> res = cache.query(new ScanQuery<>()).getAll(); + + assertEquals(1, res.size()); + assertEquals(1, res.get(0).getKey()); + assertEquals(1, res.get(0).getValue()); + } + } + + /** */ + private void testExpiryPolicy() throws Exception { + X.println(">>>> Testing expiry policy"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + ClientCache cache = client.getOrCreateCache("testExpiryPolicy"); + cache = cache.withExpirePolicy(new CreatedExpiryPolicy(new Duration(TimeUnit.MILLISECONDS, 1))); + + cache.put(1, 1); + + doSleep(10); + + assertFalse(cache.containsKey(1)); + } + } + + /** */ + private void testUserAttributes() throws Exception { + X.println(">>>> Testing user attributes"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR) + .setUserAttributes(F.asMap("attr", "val")))) { + assertNotNull(client); + } + } + + /** */ + private void testClusterAPI() throws Exception { + X.println(">>>> Testing cluster API"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertTrue(client.cluster().state().active()); + } + } + + /** */ + private void testClusterGroups() throws Exception { + X.println(">>>> Testing cluster groups"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertEquals(1, client.cluster().forServers().nodes().size()); + } + } + + /** */ + private void testCompute() throws Exception { + X.println(">>>> Testing compute"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertEquals((Integer)1, client.compute().execute(EchoTask.class.getName(), 1)); + } + } + + /** */ + private void testServices() throws Exception { + X.println(">>>> Testing services"); + + try (IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(ADDR))) { + assertEquals(1, client.services().serviceProxy("test_service", EchoServiceInterface.class) + .echo(1)); + } + } + + /** {@inheritDoc} */ + @Override protected void testClient(IgniteProductVersion clientVer, IgniteProductVersion serverVer) throws Exception { + IgniteProductVersion minVer = clientVer.compareTo(serverVer) < 0 ? clientVer : serverVer; + + testCacheConfiguration( + minVer.compareTo(VER_2_7_0) >= 0, + minVer.compareTo(VER_2_8_0) >= 0 + ); + + testCacheApi(); + + testBinary(); + + testQueries(); + + if (minVer.compareTo(VER_2_5_0) >= 0) + testAuthentication(); + + if (minVer.compareTo(VER_2_8_0) >= 0) { + testTransactions(); + testExpiryPolicy(); + } + + if (clientVer.compareTo(VER_2_9_0) >= 0 && serverVer.compareTo(VER_2_8_0) >= 0) + testClusterAPI(); + + if (minVer.compareTo(VER_2_9_0) >= 0) { + testUserAttributes(); + testClusterGroups(); + testCompute(); + testServices(); + } + } + + /** */ + public static interface EchoServiceInterface { + /** */ + public int echo(int val); + } + + /** */ + public static class EchoService implements Service, EchoServiceInterface { + /** {@inheritDoc} */ + @Override public void cancel(ServiceContext ctx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void init(ServiceContext ctx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void execute(ServiceContext ctx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public int echo(int val) { + return val; + } + } + + /** */ + public static class EchoJob implements ComputeJob { + /** Value. */ + private final Integer val; + + /** + * @param val Value. + */ + public EchoJob(Integer val) { + this.val = val; + } + + /** {@inheritDoc} */ + @Override public void cancel() { + // No-op. + } + + /** {@inheritDoc} */ + @Override public Object execute() throws IgniteException { + return val; + } + } + + /** */ + public static class EchoTask extends ComputeTaskAdapter { + /** {@inheritDoc} */ + @Override public @NotNull Map map(List subgrid, + @Nullable Integer arg) throws IgniteException { + return F.asMap(new EchoJob(arg), subgrid.get(0)); + } + + /** {@inheritDoc} */ + @Nullable @Override public Integer reduce(List results) throws IgniteException { + return results.get(0).getData(); + } + } +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JdbcThinCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JdbcThinCompatibilityTest.java new file mode 100644 index 0000000000000..857df9f8a91e7 --- /dev/null +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/JdbcThinCompatibilityTest.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.compatibility.clients; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.Statement; +import org.apache.ignite.Ignite; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.lang.IgniteProductVersion; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests JDBC thin compatibility. + */ +@RunWith(Parameterized.class) +public class JdbcThinCompatibilityTest extends AbstractClientCompatibilityTest { + /** Table name. */ + private static final String TABLE_NAME = "test_table"; + + /** URL. */ + private static final String URL = "jdbc:ignite:thin://127.0.0.1"; + + /** Rows count. */ + private static final int ROWS_CNT = 10; + + /** Execute sql. */ + private static void executeSql(IgniteEx igniteEx, String sql) { + igniteEx.context().query().querySqlFields(new SqlFieldsQuery(sql), false).getAll(); + } + + /** {@inheritDoc} */ + @Override protected void initNode(Ignite ignite) { + IgniteEx igniteEx = (IgniteEx)ignite; + + executeSql(igniteEx, "CREATE TABLE " + TABLE_NAME + " (id int primary key, name varchar)"); + + for (int i = 0; i < ROWS_CNT; i++) + executeSql(igniteEx, "INSERT INTO " + TABLE_NAME + " (id, name) VALUES(" + i + ", 'name" + i + "')"); + } + + /** {@inheritDoc} */ + @Override protected void testClient(IgniteProductVersion clientVer, IgniteProductVersion serverVer) throws Exception { + try (Connection conn = DriverManager.getConnection(URL); Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("SELECT id, name FROM " + TABLE_NAME + " ORDER BY id"); + + assertNotNull(rs); + + int cnt = 0; + + while (rs.next()) { + int id = rs.getInt("id"); + String name = rs.getString("name"); + + assertEquals(cnt, id); + assertEquals("name" + cnt, name); + + cnt++; + } + + assertEquals(ROWS_CNT, cnt); + } + } +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/package-info.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/package-info.java similarity index 87% rename from modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/package-info.java rename to modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/package-info.java index 50d961b28f5fd..08c36dcb06cc8 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/package-info.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/clients/package-info.java @@ -16,7 +16,7 @@ */ /** - * Contains compatibility tests related to JDBC. + * Contains compatibility tests related to different clients. */ -package org.apache.ignite.compatibility.jdbc; +package org.apache.ignite.compatibility.clients; diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/JdbcThinCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/JdbcThinCompatibilityTest.java deleted file mode 100644 index 50254e6e6ea21..0000000000000 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/jdbc/JdbcThinCompatibilityTest.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.compatibility.jdbc; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.Statement; -import java.util.Arrays; -import java.util.Collection; -import org.apache.ignite.Ignite; -import org.apache.ignite.cache.query.SqlFieldsQuery; -import org.apache.ignite.compatibility.testframework.junits.Dependency; -import org.apache.ignite.compatibility.testframework.junits.IgniteCompatibilityAbstractTest; -import org.apache.ignite.internal.IgniteEx; -import org.apache.ignite.internal.IgniteVersionUtils; -import org.apache.ignite.internal.util.GridJavaProcess; -import org.apache.ignite.internal.util.typedef.X; -import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; -import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; -import org.apache.ignite.testframework.GridTestUtils; -import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy; -import org.jetbrains.annotations.NotNull; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Tests that current client version can connect to the server with specified version and - * specified client version can connect to the current server version. - */ -@RunWith(Parameterized.class) -public class JdbcThinCompatibilityTest extends IgniteCompatibilityAbstractTest { - /** Table name. */ - private static final String TABLE_NAME = "test_table"; - - /** URL. */ - private static final String URL = "jdbc:ignite:thin://127.0.0.1"; - - /** Rows count. */ - private static final int ROWS_CNT = 10; - - /** Parameters. */ - @Parameterized.Parameters(name = "Version {0}") - public static Iterable versions() { - return Arrays.asList( - new String[] {"2.7.0"}, - new String[] {"2.7.5"}, - new String[] {"2.7.6"}, - new String[] {"2.8.0"}, - new String[] {"2.8.1"} - ); - } - - /** Old Ignite version. */ - @Parameterized.Parameter - public String ver; - - /** {@inheritDoc} */ - @Override protected @NotNull Collection getDependencies(String igniteVer) { - Collection dependencies = super.getDependencies(igniteVer); - - dependencies.add(new Dependency("indexing", "ignite-indexing", false)); - - return dependencies; - } - - /** - * @throws Exception If failed. - */ - @Test - public void testOldClientToCurrentServer() throws Exception { - try (Ignite ignite = startGrid(0)) { - initTable(ignite); - - GridJavaProcess proc = GridJavaProcess.exec( - JdbcThinQueryRunner.class.getName(), - null, - log, - log::info, - null, - null, - getProcessProxyJvmArgs(ver), - null - ); - - try { - GridTestUtils.waitForCondition(() -> !proc.getProcess().isAlive(), 5_000L); - - assertEquals(0, proc.getProcess().exitValue()); - } - finally { - if (proc.getProcess().isAlive()) - proc.kill(); - } - } - } - - /** - * @throws Exception If failed. - */ - @Test - public void testCurrentClientToOldServer() throws Exception { - IgniteProcessProxy proxy = null; - - try { - Ignite ignite = startGrid(1, ver, - cfg -> cfg - .setLocalHost("127.0.0.1") - .setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(new TcpDiscoveryVmIpFinder(true))), - JdbcThinCompatibilityTest::initTable); - - proxy = IgniteProcessProxy.ignite(ignite.name()); - - testJdbcQuery(); - } - finally { - stopAllGrids(); - - if (proxy != null) { - Process proc = proxy.getProcess().getProcess(); - - // We should wait until process exits, or it can affect next tests. - GridTestUtils.waitForCondition(() -> !proc.isAlive(), 5_000L); - } - } - } - - /** Execute sql. */ - private static void executeSql(IgniteEx igniteEx, String sql) { - igniteEx.context().query().querySqlFields(new SqlFieldsQuery(sql), false).getAll(); - } - - /** */ - private static void initTable(Ignite ignite) { - IgniteEx igniteEx = (IgniteEx)ignite; - - executeSql(igniteEx, "CREATE TABLE " + TABLE_NAME + " (id int primary key, name varchar)"); - - for (int i = 0; i < ROWS_CNT; i++) - executeSql(igniteEx, "INSERT INTO " + TABLE_NAME + " (id, name) VALUES(" + i + ", 'name" + i + "')"); - } - - /** */ - private static void testJdbcQuery() throws Exception { - try (Connection conn = DriverManager.getConnection(URL); Statement stmt = conn.createStatement()) { - ResultSet rs = stmt.executeQuery("SELECT id, name FROM " + TABLE_NAME + " ORDER BY id"); - - assertNotNull(rs); - - int cnt = 0; - - while (rs.next()) { - int id = rs.getInt("id"); - String name = rs.getString("name"); - - assertEquals(cnt, id); - assertEquals("name" + cnt, name); - - cnt++; - } - - assertEquals(ROWS_CNT, cnt); - } - } - - /** - * Runner class to test query from remote JVM process with old Ignite version as dependencies in class path. - */ - public static class JdbcThinQueryRunner { - /** */ - public static void main(String[] args) throws Exception { - X.println(GridJavaProcess.PID_MSG_PREFIX + U.jvmPid()); - X.println("Start JDBC connection with Ignite version: " + IgniteVersionUtils.VER); - - testJdbcQuery(); - - X.println("Success"); - } - } -} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java index e3f693af1c561..71340d6793fda 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java @@ -45,6 +45,7 @@ /** * Super class for all compatibility tests. */ +@SuppressWarnings("TransientFieldInNonSerializableClass") public abstract class IgniteCompatibilityAbstractTest extends GridCommonAbstractTest { /** */ private static final ClassLoader CLASS_LOADER = IgniteCompatibilityAbstractTest.class.getClassLoader(); @@ -56,10 +57,10 @@ public abstract class IgniteCompatibilityAbstractTest extends GridCommonAbstract protected static final int NODE_JOIN_TIMEOUT = 30_000; /** Local JVM Ignite node. */ - protected Ignite locJvmInstance = null; + protected transient Ignite locJvmInstance = null; /** Remote JVM Ignite instance. */ - protected Ignite rmJvmInstance = null; + protected transient Ignite rmJvmInstance = null; /** {@inheritDoc} */ @Override protected boolean isMultiJvm() { diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java index 4da7401aba74b..45821689cb998 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java @@ -18,7 +18,8 @@ package org.apache.ignite.compatibility.testsuites; import org.apache.ignite.compatibility.cache.LocalCacheTest; -import org.apache.ignite.compatibility.jdbc.JdbcThinCompatibilityTest; +import org.apache.ignite.compatibility.clients.JavaThinCompatibilityTest; +import org.apache.ignite.compatibility.clients.JdbcThinCompatibilityTest; import org.apache.ignite.compatibility.persistence.FoldersReuseCompatibilityTest; import org.apache.ignite.compatibility.persistence.MetaStorageCompatibilityTest; import org.apache.ignite.compatibility.persistence.MigratingToWalV2SerializerWithCompactionTest; @@ -38,7 +39,8 @@ MetaStorageCompatibilityTest.class, LocalCacheTest.class, MoveBinaryMetadataCompatibility.class, - JdbcThinCompatibilityTest.class + JdbcThinCompatibilityTest.class, + JavaThinCompatibilityTest.class }) public class IgniteCompatibilityBasicTestSuite { } From 0abf6fafe77ec447f2bfea1d7583a2b85b2ab192 Mon Sep 17 00:00:00 2001 From: ktkalenko Date: Fri, 30 Oct 2020 09:51:55 +0300 Subject: [PATCH 008/110] IGNITE-13613 API to get full WAL size and implementation to track WAL segments rollover and compression processes - Fixes #8388. Signed-off-by: Sergey Chugunov --- .../dto/IgniteDataTransferObject.java | 6 + .../wal/IgniteWriteAheadLogManager.java | 15 + .../cache/GridCacheSharedContext.java | 2 +- .../cache/persistence/wal/FileDescriptor.java | 26 +- .../wal/FileWriteAheadLogManager.java | 332 +++++++++++------- .../db/wal/IgniteLocalWalSizeTest.java | 229 ++++++++++++ .../persistence/pagemem/NoOpWALManager.java | 10 + .../testsuites/IgnitePdsTestSuite2.java | 3 + 8 files changed, 489 insertions(+), 134 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteLocalWalSizeTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java index 4d735d0b2514d..279586d0a0257 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java @@ -59,6 +59,12 @@ public abstract class IgniteDataTransferObject implements Externalizable { /** Version 7. */ protected static final byte V7 = 7; + /** Version 8. */ + protected static final byte V8 = 8; + + /** Version 9. */ + protected static final byte V9 = 9; + /** * @param col Source collection. * @param Collection type. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java index cc183bfda3f88..cb4fc306cdb49 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java @@ -214,4 +214,19 @@ public WALIterator replay( * @param grpId Group id. */ public boolean disabled(int grpId); + + /** + * Getting local WAL segment size. + * + * @param idx Absolute segment index. + * @return Segment size, {@code 0} if size is unknown. + */ + long segmentSize(long idx); + + /** + * Get last written pointer. + * + * @return Last written pointer. + */ + WALPointer lastWritePointer(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java index c1981c664e189..f40d4d7b41c9a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java @@ -760,7 +760,7 @@ public IgniteSnapshotManager snapshotMgr() { /** * @return Write ahead log manager. */ - public IgniteWriteAheadLogManager wal() { + @Nullable public IgniteWriteAheadLogManager wal() { return walMgr; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java index f2653765cb1e5..2f088d19f6979 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java @@ -25,14 +25,12 @@ import org.apache.ignite.internal.processors.cache.persistence.file.UnzipFileIO; import org.apache.ignite.internal.processors.cache.persistence.wal.io.SegmentIO; import org.apache.ignite.internal.util.typedef.internal.SB; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * WAL file descriptor. */ public class FileDescriptor implements Comparable, AbstractWalRecordsIterator.AbstractFileDescriptor { - /** file extension of WAL segment. */ private static final String WAL_SEGMENT_FILE_EXT = ".wal"; @@ -50,15 +48,17 @@ public class FileDescriptor implements Comparable, AbstractWalRe * * @param file WAL segment file. */ - public FileDescriptor(@NotNull File file) { + public FileDescriptor(File file) { this(file, null); } /** + * Creates file descriptor. + * * @param file WAL segment file. * @param idx Absolute WAL segment file index. For null value index is restored from file name. */ - public FileDescriptor(@NotNull File file, @Nullable Long idx) { + public FileDescriptor(File file, @Nullable Long idx) { this.file = file; String fileName = file.getName(); @@ -69,13 +69,15 @@ public FileDescriptor(@NotNull File file, @Nullable Long idx) { } /** - * @param segment Segment index. + * Getting segment file name. + * + * @param idx Segment index. * @return Segment file name. */ - public static String fileName(long segment) { + public static String fileName(long idx) { SB b = new SB(); - String segmentStr = Long.toString(segment); + String segmentStr = Long.toString(idx); for (int i = segmentStr.length(); i < WAL_SEGMENT_FILE_NAME_LENGTH; i++) b.a('0'); @@ -86,7 +88,7 @@ public static String fileName(long segment) { } /** {@inheritDoc} */ - @Override public int compareTo(@NotNull FileDescriptor o) { + @Override public int compareTo(FileDescriptor o) { return Long.compare(idx, o.idx); } @@ -109,14 +111,18 @@ public static String fileName(long segment) { } /** - * @return Absolute WAL segment file index + * Return absolute WAL segment file index. + * + * @return Absolute WAL segment file index. */ public long getIdx() { return idx; } /** - * @return absolute pathname string of this file descriptor pathname. + * Return absolute pathname string of this file descriptor pathname. + * + * @return Absolute pathname string of this file descriptor pathname. */ public String getAbsolutePath() { return file.getAbsolutePath(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index 70da8e9a95a3d..a92168b9525d6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -44,6 +44,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLongArray; @@ -86,7 +87,6 @@ import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; -import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware; @@ -106,7 +106,6 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactory; import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactoryImpl; import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer; -import org.apache.ignite.internal.processors.compress.CompressionProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor; @@ -127,7 +126,6 @@ import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.thread.IgniteThread; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static java.nio.file.StandardOpenOption.CREATE; @@ -144,11 +142,14 @@ import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.TMP_SUFFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.ZIP_SUFFIX; import static org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor.fileName; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactory.LATEST_SERIALIZER_VERSION; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.HEADER_RECORD_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readPosition; import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readSegmentHeader; +import static org.apache.ignite.internal.processors.compress.CompressionProcessor.checkCompressionLevelBounds; +import static org.apache.ignite.internal.processors.compress.CompressionProcessor.getDefaultCompressionLevel; /** * File WAL manager. @@ -262,7 +263,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** */ private final boolean alwaysWriteFullPages; - /** WAL segment size in bytes. . This is maximum value, actual segments may be shorter. */ + /** WAL segment size in bytes. This is maximum value, actual segments may be shorter. */ private final long maxWalSegmentSize; /** @@ -295,10 +296,10 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Persistence metrics tracker. */ private DataStorageMetricsImpl metrics; - /** */ + /** WAL work directory (including consistent ID as subfolder). */ private File walWorkDir; - /** WAL archive directory (including consistent ID as subfolder) */ + /** WAL archive directory (including consistent ID as subfolder). */ private File walArchiveDir; /** Serializer of latest version, used to read header record and for write records */ @@ -317,7 +318,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Holder of actual information of latest manipulation on WAL segments. */ private volatile SegmentAware segmentAware; - /** Updater for {@link #currHnd}, used for verify there are no concurrent update for current log segment handle */ + /** Updater for {@link #currHnd}, used for verify there are no concurrent update for current log segment handle. */ private static final AtomicReferenceFieldUpdater CURR_HND_UPD = AtomicReferenceFieldUpdater.newUpdater(FileWriteAheadLogManager.class, FileWriteHandle.class, "currHnd"); @@ -328,10 +329,10 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl @Nullable private FileArchiver archiver; /** Compressor. */ - private FileCompressor compressor; + @Nullable private FileCompressor compressor; /** Decompressor. */ - private FileDecompressor decompressor; + @Nullable private FileDecompressor decompressor; /** Current log segment handle. */ private volatile FileWriteHandle currHnd; @@ -384,7 +385,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl private final FileHandleManagerFactory fileHandleManagerFactory; /** Switch segment record offset. */ - private final AtomicLongArray switchSegmentRecordOffset; + @Nullable private final AtomicLongArray switchSegmentRecordOffset; /** Page snapshot records compression algorithm. */ private DiskPageCompression pageCompression; @@ -392,10 +393,17 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Page snapshot records compression level. */ private int pageCompressionLevel; + /** + * Local segment sizes: absolute segment index -> size in bytes. + * For segments from {@link #walWorkDir} and {@link #walArchiveDir}. + * If there is a raw and compressed segment, compressed size is getting. + */ + private final Map segmentSize = new ConcurrentHashMap<>(); + /** * @param ctx Kernal context. */ - public FileWriteAheadLogManager(@NotNull final GridKernalContext ctx) { + public FileWriteAheadLogManager(final GridKernalContext ctx) { igCfg = ctx.config(); DataStorageConfiguration dsCfg = igCfg.getDataStorageConfiguration(); @@ -467,8 +475,9 @@ public void setFileIOFactory(FileIOFactory ioFactory) { checkOrPrepareFiles(); - if (metrics != null) + if (metrics != null) { metrics.setWalSizeProvider(new CO() { + /** {@inheritDoc} */ @Override public Long apply() { long size = 0; @@ -481,6 +490,7 @@ public void setFileIOFactory(FileIOFactory ioFactory) { return size; } }); + } segmentAware = new SegmentAware(dsCfg.getWalSegments(), dsCfg.isWalCompactionEnabled()); @@ -520,8 +530,8 @@ public void setFileIOFactory(FileIOFactory ioFactory) { cctx.kernalContext().compress().checkPageCompressionSupported(); pageCompressionLevel = dsCfg.getWalPageCompressionLevel() != null ? - CompressionProcessor.checkCompressionLevelBounds(dsCfg.getWalPageCompressionLevel(), pageCompression) : - CompressionProcessor.getDefaultCompressionLevel(pageCompression); + checkCompressionLevelBounds(dsCfg.getWalPageCompressionLevel(), pageCompression) : + getDefaultCompressionLevel(pageCompression); } } } @@ -585,10 +595,10 @@ public Collection getAndReserveWalFiles(WALPointer low, WALPointer high) t List res = new ArrayList<>(); for (long i = low.index(); i < high.index(); i++) { - String segmentName = FileDescriptor.fileName(i); + String segmentName = fileName(i); File file = new File(walArchiveDir, segmentName); - File fileZip = new File(walArchiveDir, segmentName + FilePageStoreManager.ZIP_SUFFIX); + File fileZip = new File(walArchiveDir, segmentName + ZIP_SUFFIX); if (file.exists()) res.add(file); @@ -640,7 +650,7 @@ private void checkWalConfiguration() throws IgniteCheckedException { fileHandleManager.onDeactivate(); } catch (Exception e) { - U.error(log, "Failed to gracefully close WAL segment: " + this.currHnd, e); + U.error(log, "Failed to gracefully close WAL segment: " + currHnd, e); } segmentAware.interrupt(); @@ -691,13 +701,12 @@ private void checkWalConfiguration() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void resumeLogging(WALPointer filePtr) throws IgniteCheckedException { - if (log.isDebugEnabled()) + if (log.isDebugEnabled()) { log.debug("File write ahead log manager resuming logging [nodeId=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); + } - /* - walDisableContext is started after FileWriteAheadLogManager, so we obtain actual walDisableContext ref here. - */ + // walDisableContext is started after FileWriteAheadLogManager, so we obtain actual walDisableContext ref here. synchronized (this) { walDisableContext = cctx.walState().walDisableContext(); } @@ -711,17 +720,18 @@ private void checkWalConfiguration() throws IgniteCheckedException { fileHandleManager.resumeLogging(); - currHnd = restoreWriteHandle(filePtr); + updateCurrentHandle(restoreWriteHandle(filePtr), null); // For new handle write serializer version to it. if (filePtr == null) currHnd.writeHeader(); if (currHnd.serializerVersion() != serializer.version()) { - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Record serializer version change detected, will start logging with a new WAL record " + "serializer to a new WAL segment [curFile=" + currHnd + ", newVer=" + serializer.version() + ", oldVer=" + currHnd.serializerVersion() + ']'); + } rollOver(currHnd, null); } @@ -1010,9 +1020,9 @@ private FileWriteHandle closeBufAndRollover( * @return {@code true} if has this index. */ private boolean hasIndex(long absIdx) { - String segmentName = FileDescriptor.fileName(absIdx); + String segmentName = fileName(absIdx); - String zipSegmentName = FileDescriptor.fileName(absIdx) + FilePageStoreManager.ZIP_SUFFIX; + String zipSegmentName = segmentName + ZIP_SUFFIX; boolean inArchive = new File(walArchiveDir, segmentName).exists() || new File(walArchiveDir, zipSegmentName).exists(); @@ -1053,12 +1063,16 @@ private boolean hasIndex(long absIdx) { // We need to leave at least one archived segment to correctly determine the archive index. if (desc.idx < high.index() && desc.idx < lastArchived) { - if (!desc.file.delete()) + if (!desc.file.delete()) { U.warn(log, "Failed to remove obsolete WAL segment (make sure the process has enough rights): " + desc.file.getAbsolutePath()); - else + } + else { deleted++; + segmentSize.remove(desc.idx()); + } + // Bump up the oldest archive segment index. if (segmentAware.lastTruncatedArchiveIdx() < desc.idx) segmentAware.lastTruncatedArchiveIdx(desc.idx); @@ -1174,11 +1188,11 @@ private long lastArchivedIndex() { * @param file File to read. * @param ioFactory IO factory. */ - private FileDescriptor readFileDescriptor(File file, FileIOFactory ioFactory) { + @Nullable private FileDescriptor readFileDescriptor(File file, FileIOFactory ioFactory) { FileDescriptor ds = new FileDescriptor(file); try (SegmentIO fileIO = ds.toIO(ioFactory)) { - // File may be empty when LOG_ONLY mode is enabled and mmap is disabled + // File may be empty when LOG_ONLY mode is enabled and mmap is disabled. if (fileIO.size() == 0) return null; @@ -1283,9 +1297,7 @@ private FileWriteHandle rollOver(FileWriteHandle cur, @Nullable WALRecord rec) t if (next.getSegmentId() - lashCheckpointFileIdx() >= maxSegCountWithoutCheckpoint) cctx.database().forceCheckpoint("too big size of WAL without checkpoint"); - boolean swapped = CURR_HND_UPD.compareAndSet(this, hnd, next); - - assert swapped : "Concurrent updates on rollover are not allowed"; + assert updateCurrentHandle(next, hnd) : "Concurrent updates on rollover are not allowed"; if (walAutoArchiveAfterInactivity > 0) lastRecordLoggedMs.set(0); @@ -1313,14 +1325,14 @@ private long lashCheckpointFileIdx() { * @return Initialized file write handle. * @throws StorageException If failed to initialize WAL write handle. */ - private FileWriteHandle restoreWriteHandle(WALPointer lastReadPtr) throws StorageException { + private FileWriteHandle restoreWriteHandle(@Nullable WALPointer lastReadPtr) throws StorageException { long absIdx = lastReadPtr == null ? 0 : lastReadPtr.index(); @Nullable FileArchiver archiver0 = archiver; long segNo = archiver0 == null ? absIdx : absIdx % dsCfg.getWalSegments(); - File curFile = new File(walWorkDir, FileDescriptor.fileName(segNo)); + File curFile = new File(walWorkDir, fileName(segNo)); int off = lastReadPtr == null ? 0 : lastReadPtr.fileOffset(); int len = lastReadPtr == null ? 0 : lastReadPtr.length(); @@ -1348,9 +1360,10 @@ private FileWriteHandle restoreWriteHandle(WALPointer lastReadPtr) throws Storag RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(serVer); - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Resuming logging to WAL segment [file=" + curFile.getAbsolutePath() + ", offset=" + off + ", ver=" + serVer + ']'); + } FileWriteHandle hnd = fileHandleManager.initHandle(fileIO, off + len, ser); @@ -1359,6 +1372,24 @@ private FileWriteHandle restoreWriteHandle(WALPointer lastReadPtr) throws Storag else segmentAware.setLastArchivedAbsoluteIndex(absIdx - 1); + // Getting segment sizes. + F.asList(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)).stream() + .map(FileDescriptor::new) + .forEach(fd -> { + if (fd.isCompressed()) + segmentSize.put(fd.idx(), fd.file().length()); + else + segmentSize.putIfAbsent(fd.idx(), fd.file().length()); + }); + + // If walArchiveDir != walWorkDir, then need to get size of all segments that were not in archive. + // For example, absIdx == 8, and there are 0-4 segments in archive, then we need to get sizes of 5-7 segments. + // Size of the 8th segment will be set in #resumeLogging. + if (archiver0 != null) { + for (long i = absIdx - (absIdx % dsCfg.getWalSegments()); i < absIdx; i++) + segmentSize.putIfAbsent(i, maxWalSegmentSize); + } + return hnd; } catch (IgniteCheckedException | IOException e) { @@ -1467,25 +1498,24 @@ private void checkOrPrepareFiles() throws StorageException { if (!F.isEmpty(tmpFiles)) { for (File tmp : tmpFiles) { - boolean deleted = tmp.delete(); - - if (!deleted) + if (!tmp.delete()) { throw new StorageException("Failed to delete previously created temp file " + "(make sure Ignite process has enough rights): " + tmp.getAbsolutePath()); + } } } } File[] allFiles = walWorkDir.listFiles(WAL_SEGMENT_FILE_FILTER); - if (isArchiverEnabled()) - if (allFiles.length != 0 && allFiles.length > dsCfg.getWalSegments()) - throw new StorageException("Failed to initialize wal (work directory contains " + - "incorrect number of segments) [cur=" + allFiles.length + ", expected=" + dsCfg.getWalSegments() + ']'); + if (isArchiverEnabled() && !F.isEmpty(allFiles) && allFiles.length > dsCfg.getWalSegments()) { + throw new StorageException("Failed to initialize wal (work directory contains incorrect " + + "number of segments) [cur=" + allFiles.length + ", expected=" + dsCfg.getWalSegments() + ']'); + } // Allocate the first segment synchronously. All other segments will be allocated by archiver in background. - if (allFiles.length == 0) { - File first = new File(walWorkDir, FileDescriptor.fileName(0)); + if (F.isEmpty(allFiles)) { + File first = new File(walWorkDir, fileName(0)); createFile(first); } @@ -1575,7 +1605,7 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte if (archiver0 == null) { segmentAware.setLastArchivedAbsoluteIndex(curIdx); - return new File(walWorkDir, FileDescriptor.fileName(curIdx + 1)); + return new File(walWorkDir, fileName(curIdx + 1)); } long absNextIdxStartTime = System.nanoTime(); @@ -1598,7 +1628,7 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte long segmentIdx = absNextIdx % dsCfg.getWalSegments(); - return new File(walWorkDir, FileDescriptor.fileName(segmentIdx)); + return new File(walWorkDir, fileName(segmentIdx)); } /** @@ -1638,7 +1668,7 @@ private FileDescriptor[] walArchiveFiles() { /** * @return Sorted WAL files descriptors. */ - public static FileDescriptor[] scan(File[] allFiles) { + public static FileDescriptor[] scan(@Nullable File[] allFiles) { if (allFiles == null) return EMPTY_DESCRIPTORS; @@ -1701,7 +1731,10 @@ private class FileArchiver extends GridWorker { private int formatted; /** + * Constructor. * + * @param segmentAware Segment aware. + * @param log Logger. */ private FileArchiver(SegmentAware segmentAware, IgniteLogger log) throws IgniteCheckedException { super(cctx.igniteInstanceName(), "wal-file-archiver%" + cctx.igniteInstanceName(), log, @@ -1711,6 +1744,8 @@ private FileArchiver(SegmentAware segmentAware, IgniteLogger log) throws IgniteC } /** + * Initialization. + * * @param segmentAware Segment aware. * @throws IgniteCheckedException If initialization failed. */ @@ -1737,13 +1772,13 @@ private IgniteBiTuple scanMinMaxArchiveIndices() throws IgniteChecke for (File file : walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)) { try { - long idx = Long.parseLong(file.getName().substring(0, 16)); + long idx = new FileDescriptor(file).idx(); FileDescriptor desc = readFileDescriptor(file, ioFactory); if (desc != null) { if (desc.idx() == idx) - archiveIndices.put(desc.idx(), desc); + archiveIndices.put(idx, desc); } else log.warning("Skip file, failed read file header " + file); @@ -1762,7 +1797,7 @@ private IgniteBiTuple scanMinMaxArchiveIndices() throws IgniteChecke // Try to find min and max if we have skipped range semgnets in archive. Find firs gap. for (Long idx : archiveIndices.descendingKeySet()) { - if (!archiveIndices.keySet().contains(idx - 1)) + if (!archiveIndices.containsKey(idx - 1)) return F.t(idx, max); } @@ -1964,41 +1999,41 @@ public void releaseWorkSegment(long absIdx) { } /** - * Moves WAL segment from work folder to archive folder. Temp file is used to do movement + * Moves WAL segment from work folder to archive folder. Temp file is used to do movement. * * @param absIdx Absolute index to archive. + * @throws StorageException If failed. */ public SegmentArchiveResult archiveSegment(long absIdx) throws StorageException { long segIdx = absIdx % dsCfg.getWalSegments(); - File origFile = new File(walWorkDir, FileDescriptor.fileName(segIdx)); + File origFile = new File(walWorkDir, fileName(segIdx)); - String name = FileDescriptor.fileName(absIdx); + String name = fileName(absIdx); File dstTmpFile = new File(walArchiveDir, name + TMP_SUFFIX); File dstFile = new File(walArchiveDir, name); - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Starting to copy WAL segment [absIdx=" + absIdx + ", segIdx=" + segIdx + ", origFile=" + origFile.getAbsolutePath() + ", dstFile=" + dstFile.getAbsolutePath() + ']'); + } try { Files.deleteIfExists(dstTmpFile.toPath()); boolean copied = false; - if (switchSegmentRecordOffset != null) { - long offs = switchSegmentRecordOffset.get((int)segIdx); + long offs = switchSegmentRecordOffset.get((int)segIdx); - if (offs > 0) { - switchSegmentRecordOffset.set((int)segIdx, 0); + if (offs > 0) { + switchSegmentRecordOffset.set((int)segIdx, 0); - if (offs < origFile.length()) { - GridFileUtils.copy(ioFactory, origFile, ioFactory, dstTmpFile, offs); + if (offs < origFile.length()) { + GridFileUtils.copy(ioFactory, origFile, ioFactory, dstTmpFile, offs); - copied = true; - } + copied = true; } } @@ -2012,6 +2047,8 @@ public SegmentArchiveResult archiveSegment(long absIdx) throws StorageException f0.force(); } } + + segmentSize.put(absIdx, dstFile.length()); } catch (IOException e) { throw new StorageException("Failed to archive WAL segment [" + @@ -2019,9 +2056,10 @@ public SegmentArchiveResult archiveSegment(long absIdx) throws StorageException ", dstFile=" + dstTmpFile.getAbsolutePath() + ']', e); } - if (log.isInfoEnabled()) + if (log.isInfoEnabled()) { log.info("Copied file [src=" + origFile.getAbsolutePath() + ", dst=" + dstFile.getAbsolutePath() + ']'); + } return new SegmentArchiveResult(absIdx, origFile, dstFile); } @@ -2078,7 +2116,11 @@ private class FileCompressor extends FileCompressorWorker { /** Workers queue. */ private final List workers = new ArrayList<>(); - /** */ + /** + * Constructor. + * + * @param log Logger. + */ FileCompressor(IgniteLogger log) { super(0, log); @@ -2215,12 +2257,13 @@ private void body0() { deleteObsoleteRawSegments(); - File tmpZip = new File(walArchiveDir, FileDescriptor.fileName(segIdx) - + FilePageStoreManager.ZIP_SUFFIX + TMP_SUFFIX); + String segmentFileName = fileName(segIdx); - File zip = new File(walArchiveDir, FileDescriptor.fileName(segIdx) + FilePageStoreManager.ZIP_SUFFIX); + File tmpZip = new File(walArchiveDir, segmentFileName + ZIP_SUFFIX + TMP_SUFFIX); - File raw = new File(walArchiveDir, FileDescriptor.fileName(segIdx)); + File zip = new File(walArchiveDir, segmentFileName + ZIP_SUFFIX); + + File raw = new File(walArchiveDir, segmentFileName); if (!Files.exists(raw.toPath())) throw new IgniteCheckedException("WAL archive segment is missing: " + raw); @@ -2235,13 +2278,8 @@ private void body0() { segmentAware.onSegmentCompressed(segIdx); - if (evt.isRecordable(EVT_WAL_SEGMENT_COMPACTED) && !cctx.kernalContext().recoveryMode()) { - evt.record(new WalSegmentCompactedEvent( - cctx.localNode(), - segIdx, - zip.getAbsoluteFile()) - ); - } + if (evt.isRecordable(EVT_WAL_SEGMENT_COMPACTED) && !cctx.kernalContext().recoveryMode()) + evt.record(new WalSegmentCompactedEvent(cctx.localNode(), segIdx, zip.getAbsoluteFile())); } catch (IgniteInterruptedCheckedException ignore) { Thread.currentThread().interrupt(); @@ -2250,7 +2288,7 @@ private void body0() { lastCompressionError = e; U.error(log, "Compression of WAL segment [idx=" + segIdx + - "] was skipped due to unexpected error", lastCompressionError); + "] was skipped due to unexpected error", lastCompressionError); segmentAware.onSegmentCompressed(segIdx); } @@ -2262,26 +2300,30 @@ private void body0() { } /** - * @param nextSegment Next segment absolute idx. - * @param raw Raw file. - * @param zip Zip file. + * Segment compression. + * + * @param idx Segment absolute index. + * @param raw Raw segment file. + * @param zip Zip file to writing. + * @throws IOException If failed. + * @throws IgniteCheckedException If failed. */ - private void compressSegmentToFile(long nextSegment, File raw, File zip) - throws IOException, IgniteCheckedException { - int segmentSerializerVer; + private void compressSegmentToFile(long idx, File raw, File zip) throws IOException, IgniteCheckedException { + int serializerVer; try (FileIO fileIO = ioFactory.create(raw)) { - segmentSerializerVer = readSegmentHeader(new SegmentIO(nextSegment, fileIO), segmentFileInputFactory).getSerializerVersion(); + serializerVer = readSegmentHeader(new SegmentIO(idx, fileIO), segmentFileInputFactory) + .getSerializerVersion(); } try (ZipOutputStream zos = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(zip)))) { zos.setLevel(dsCfg.getWalCompactionLevel()); - zos.putNextEntry(new ZipEntry(nextSegment + ".wal")); + zos.putNextEntry(new ZipEntry(idx + ".wal")); ByteBuffer buf = ByteBuffer.allocate(HEADER_RECORD_SIZE); buf.order(ByteOrder.nativeOrder()); - zos.write(prepareSerializerVersionBuffer(nextSegment, segmentSerializerVer, true, buf).array()); + zos.write(prepareSerializerVersionBuffer(idx, serializerVer, true, buf).array()); final CIX1 appendToZipC = new CIX1() { @Override public void applyx(WALRecord record) throws IgniteCheckedException { @@ -2297,32 +2339,36 @@ private void compressSegmentToFile(long nextSegment, File raw, File zip) }; try (SingleSegmentLogicalRecordsIterator iter = new SingleSegmentLogicalRecordsIterator( - log, cctx, ioFactory, BUF_SIZE, nextSegment, walArchiveDir, appendToZipC)) { + log, cctx, ioFactory, BUF_SIZE, idx, walArchiveDir, appendToZipC)) { while (iter.hasNextX()) iter.nextX(); } - RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(segmentSerializerVer); + RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVer); - ByteBuffer heapBuf = prepareSwitchSegmentRecordBuffer(nextSegment, ser); + ByteBuffer heapBuf = prepareSwitchSegmentRecordBuffer(idx, ser); zos.write(heapBuf.array()); } + + segmentSize.put(idx, zip.length()); } /** - * @param nextSegment Segment index. + * @param idx Segment index. * @param ser Record Serializer. */ - @NotNull private ByteBuffer prepareSwitchSegmentRecordBuffer(long nextSegment, RecordSerializer ser) - throws IgniteCheckedException { + private ByteBuffer prepareSwitchSegmentRecordBuffer( + long idx, + RecordSerializer ser + ) throws IgniteCheckedException { SwitchSegmentRecord switchRecord = new SwitchSegmentRecord(); int switchRecordSize = ser.size(switchRecord); switchRecord.size(switchRecordSize); - switchRecord.position(new WALPointer(nextSegment, 0, switchRecordSize)); + switchRecord.position(new WALPointer(idx, 0, switchRecordSize)); ByteBuffer heapBuf = ByteBuffer.allocate(switchRecordSize); @@ -2353,9 +2399,11 @@ private void deleteObsoleteRawSegments() { return; if (desc.idx < segmentAware.keepUncompressedIdxFrom() && duplicateIndices.contains(desc.idx)) { - if (desc.file.exists() && !desc.file.delete()) - U.warn(log, "Failed to remove obsolete WAL segment (make sure the process has enough rights): " + - desc.file.getAbsolutePath() + ", exists: " + desc.file.exists()); + if (desc.file.exists() && !desc.file.delete()) { + U.warn(log, "Failed to remove obsolete WAL segment " + + "(make sure the process has enough rights): " + desc.file.getAbsolutePath() + + ", exists: " + desc.file.exists()); + } } } } @@ -2403,11 +2451,11 @@ private class FileDecompressor extends GridWorker { if (isCancelled()) break; - File zip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) - + FilePageStoreManager.ZIP_SUFFIX); - File unzipTmp = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) - + TMP_SUFFIX); - File unzip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress)); + String segmentFileName = fileName(segmentToDecompress); + + File zip = new File(walArchiveDir, segmentFileName + ZIP_SUFFIX); + File unzipTmp = new File(walArchiveDir, segmentFileName + TMP_SUFFIX); + File unzip = new File(walArchiveDir, segmentFileName); try (ZipInputStream zis = new ZipInputStream(new BufferedInputStream(new FileInputStream(zip))); FileIO io = ioFactory.create(unzipTmp)) { @@ -2475,7 +2523,7 @@ synchronized IgniteInternalFuture decompressFile(long idx) { if (decompressionFutures.containsKey(idx)) return decompressionFutures.get(idx); - File f = new File(walArchiveDir, FileDescriptor.fileName(idx)); + File f = new File(walArchiveDir, fileName(idx)); if (f.exists()) return new GridFinishedFuture<>(); @@ -2518,33 +2566,36 @@ void restart() { * @param startWith Start with. * @param create Flag create file. * @param p Predicate Exit condition. + * @param completionCb Callback after verification segment. * @throws StorageException if validation or create file fail. */ private void checkFiles( int startWith, boolean create, @Nullable IgnitePredicate p, - @Nullable IgniteInClosure completionCallback + @Nullable IgniteInClosure completionCb ) throws StorageException { for (int i = startWith; i < dsCfg.getWalSegments() && (p == null || p.apply(i)); i++) { - File checkFile = new File(walWorkDir, FileDescriptor.fileName(i)); + File checkFile = new File(walWorkDir, fileName(i)); if (checkFile.exists()) { - if (checkFile.isDirectory()) + if (checkFile.isDirectory()) { throw new StorageException("Failed to initialize WAL log segment (a directory with " + "the same name already exists): " + checkFile.getAbsolutePath()); - else if (checkFile.length() != dsCfg.getWalSegmentSize() && mode == WALMode.FSYNC) + } + else if (checkFile.length() != dsCfg.getWalSegmentSize() && mode == WALMode.FSYNC) { throw new StorageException("Failed to initialize WAL log segment " + "(WAL segment size change is not supported in 'DEFAULT' WAL mode) " + "[filePath=" + checkFile.getAbsolutePath() + ", fileSize=" + checkFile.length() + ", configSize=" + dsCfg.getWalSegmentSize() + ']'); + } } else if (create) createFile(checkFile); - if (completionCallback != null) - completionCallback.apply(i); + if (completionCb != null) + completionCb.apply(i); } } @@ -2555,7 +2606,7 @@ else if (create) * @param ver Version. * @param compacted Compacted flag. */ - @NotNull public static ByteBuffer prepareSerializerVersionBuffer(long idx, int ver, boolean compacted, ByteBuffer buf) { + public static ByteBuffer prepareSerializerVersionBuffer(long idx, int ver, boolean compacted, ByteBuffer buf) { // Write record type. buf.put((byte) (WALRecord.RecordType.HEADER_RECORD.ordinal() + 1)); @@ -2712,7 +2763,7 @@ private RecordsIterator( @Nullable WALPointer start, @Nullable WALPointer end, DataStorageConfiguration dsCfg, - @NotNull RecordSerializerFactory serializerFactory, + RecordSerializerFactory serializerFactory, FileIOFactory ioFactory, @Nullable FileArchiver archiver, FileDecompressor decompressor, @@ -2742,15 +2793,14 @@ private RecordsIterator( /** {@inheritDoc} */ @Override protected ReadFileHandle initReadHandle( - @NotNull AbstractFileDescriptor desc, + AbstractFileDescriptor desc, @Nullable WALPointer start ) throws IgniteCheckedException, FileNotFoundException { AbstractFileDescriptor currDesc = desc; if (!desc.file().exists()) { FileDescriptor zipFile = new FileDescriptor( - new File(walArchiveDir, FileDescriptor.fileName(desc.idx()) - + FilePageStoreManager.ZIP_SUFFIX)); + new File(walArchiveDir, fileName(desc.idx()) + ZIP_SUFFIX)); if (!zipFile.file.exists()) { throw new FileNotFoundException("Both compressed and raw segment files are missing in archive " + @@ -2902,10 +2952,7 @@ private static List listFileNames(File dir) { } /** {@inheritDoc} */ - @Override protected IgniteCheckedException handleRecordException( - @NotNull Exception e, - @Nullable WALPointer ptr) { - + @Override protected IgniteCheckedException handleRecordException(Exception e, @Nullable WALPointer ptr) { if (e instanceof IgniteCheckedException) if (X.hasCause(e, IgniteDataIntegrityViolationException.class)) // This means that there is no explicit last sengment, so we iterate unil the very end. @@ -2971,12 +3018,10 @@ private boolean isArchiverEnabled() { private boolean canIgnoreCrcError( long workIdx, long walSegmentIdx, - @NotNull Exception e, - @Nullable WALPointer ptr) { - FileDescriptor fd = new FileDescriptor( - new File(walWorkDir, FileDescriptor.fileName(workIdx)), - walSegmentIdx - ); + Exception e, + @Nullable WALPointer ptr + ) { + FileDescriptor fd = new FileDescriptor(new File(walWorkDir, fileName(workIdx)), walSegmentIdx); try { if (!fd.file().exists()) @@ -3023,7 +3068,7 @@ private void doFlush() { * @param walFilesDir directory to scan * @return found WAL file descriptors */ - public static FileDescriptor[] loadFileDescriptors(@NotNull final File walFilesDir) throws IgniteCheckedException { + public static FileDescriptor[] loadFileDescriptors(final File walFilesDir) throws IgniteCheckedException { final File[] files = walFilesDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER); if (files == null) { @@ -3032,4 +3077,45 @@ public static FileDescriptor[] loadFileDescriptors(@NotNull final File walFilesD } return scan(files); } + + /** {@inheritDoc} */ + @Override public long segmentSize(long idx) { + return segmentSize.getOrDefault(idx, 0L); + } + + /** {@inheritDoc} */ + @Override public WALPointer lastWritePointer() { + return currHnd.position(); + } + + /** + * Concurrent {@link #currHnd} update. + * + * @param n New handle. + * @param c Current handle, if not {@code null} CAS will be used. + * @return {@code True} if updated. + */ + private boolean updateCurrentHandle(FileWriteHandle n, @Nullable FileWriteHandle c) { + boolean res = true; + + if (c == null) + currHnd = n; + else + res = CURR_HND_UPD.compareAndSet(this, c, n); + + segmentSize.put(n.getSegmentId(), maxWalSegmentSize); + + return res; + } + + /** + * Check that file name matches segment name. + * + * @param name File name. + * @return {@code True} if file name matches segment name. + */ + public static boolean isSegmentFileName(@Nullable String name) { + return name != null && (WAL_NAME_PATTERN.matcher(name).matches() || + WAL_SEGMENT_FILE_COMPACTED_PATTERN.matcher(name).matches()); + } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteLocalWalSizeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteLocalWalSizeTest.java new file mode 100644 index 0000000000000..2854a2a614d66 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteLocalWalSizeTest.java @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.wal; + +import java.io.File; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.IntStream; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor; +import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager; +import org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileWriteHandle; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.Nullable; +import org.junit.Test; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.ZIP_SUFFIX; +import static org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor.fileName; +import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER; +import static org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.isSegmentFileName; +import static org.apache.ignite.testframework.GridTestUtils.getFieldValue; + +/** + * Class for testing local size of WAL. + */ +public class IgniteLocalWalSizeTest extends GridCommonAbstractTest { + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(); + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + return super.getConfiguration(gridName) + .setCacheConfiguration(new CacheConfiguration<>(DEFAULT_CACHE_NAME)) + .setDataStorageConfiguration( + new DataStorageConfiguration() + .setWalSegments(5) + .setWalSegmentSize((int)U.MB) + .setDefaultDataRegionConfiguration(new DataRegionConfiguration().setPersistenceEnabled(true)) + ); + } + + /** + * Checking correctness of working with local segment sizes for case: archiving only. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesArchiveOnly() throws Exception { + checkLocalSegmentSizesForOneNode(null); + } + + /** + * Checking correctness of working with local segment sizes for case: archiving and compression. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesArchiveAndCompression() throws Exception { + checkLocalSegmentSizesForOneNode(cfg -> cfg.getDataStorageConfiguration().setWalCompactionEnabled(true)); + } + + /** + * Checking correctness of working with local segment sizes for case: without archiving. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesWithoutArchive() throws Exception { + checkLocalSegmentSizesForOneNode(cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + dsCfg.setWalArchivePath(dsCfg.getWalPath()); + }); + } + + /** + * Checking correctness of working with local segment sizes for case: without archiving and with compression. + * + * @throws Exception If failed. + */ + @Test + public void testLocalSegmentSizesWithoutArchiveWithCompression() throws Exception { + checkLocalSegmentSizesForOneNode(cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + dsCfg.setWalArchivePath(dsCfg.getWalPath()).setWalCompactionEnabled(true); + }); + } + + /** + * Checking whether segment file name is checked correctly. + * + * @throws Exception If failed. + */ + @Test + public void testSegmentFileName() throws Exception { + Arrays.asList(null, "", "1", "wal", fileName(0) + "1", fileName(1).replace(".wal", ".wa")) + .forEach(s -> assertFalse(s, isSegmentFileName(s))); + + IntStream.range(0, 10) + .mapToObj(FileDescriptor::fileName) + .forEach(fn -> assertTrue(fn, isSegmentFileName(fn) && isSegmentFileName(fn + ZIP_SUFFIX))); + } + + /** + * Checks whether local segment sizes are working correctly for a single node after loading and restarting. + * + * @param cfgUpdater Configuration updater. + * @throws Exception If failed. + */ + private void checkLocalSegmentSizesForOneNode( + @Nullable Consumer cfgUpdater + ) throws Exception { + IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(0)); + + if (cfgUpdater != null) + cfgUpdater.accept(cfg); + + IgniteEx n = startGrid(cfg); + n.cluster().state(ClusterState.ACTIVE); + + awaitPartitionMapExchange(); + + IgniteCache c = n.getOrCreateCache(DEFAULT_CACHE_NAME); + IntStream.range(0, 10_000).forEach(i -> c.put(i, i)); + + forceCheckpoint(); + checkLocalSegmentSizes(n); + + stopGrid(cfg.getIgniteInstanceName()); + awaitPartitionMapExchange(); + + cfg = getConfiguration(cfg.getIgniteInstanceName()); + + if (cfgUpdater != null) + cfgUpdater.accept(cfg); + + // To avoid a race between compressor and getting the segment sizes. + if (cfg.getDataStorageConfiguration().isWalCompactionEnabled()) + cfg.getDataStorageConfiguration().setWalCompactionEnabled(false); + + n = startGrid(cfg); + awaitPartitionMapExchange(); + + checkLocalSegmentSizes(n); + } + + /** + * Check that local segment sizes in the memory and actual match. + * + * @param n Node. + */ + private void checkLocalSegmentSizes(IgniteEx n) { + FileWriteAheadLogManager wal = (FileWriteAheadLogManager)n.context().cache().context().wal(); + + File walWorkDir = getFieldValue(wal, "walWorkDir"); + File walArchiveDir = getFieldValue(wal, "walArchiveDir"); + + Map expSegmentSize = new HashMap<>(); + + F.asList(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)) + .stream() + .map(FileDescriptor::new) + .forEach(fd -> { + if (fd.isCompressed()) + expSegmentSize.put(fd.idx(), fd.file().length()); + else + expSegmentSize.putIfAbsent(fd.idx(), fd.file().length()); + }); + + FileWriteHandle currHnd = getFieldValue(wal, "currHnd"); + + if (!walArchiveDir.equals(walWorkDir)) { + long absIdx = currHnd.getSegmentId(); + int segments = n.configuration().getDataStorageConfiguration().getWalSegments(); + + for (long i = absIdx - (absIdx % segments); i <= absIdx; i++) + expSegmentSize.putIfAbsent(i, new File(walWorkDir, fileName(i % segments)).length()); + } + + assertEquals(currHnd.getSegmentId() + 1, expSegmentSize.size()); + + Map segmentSize = getFieldValue(wal, "segmentSize"); + assertEquals(expSegmentSize.size(), segmentSize.size()); + + expSegmentSize.forEach((idx, size) -> { + assertEquals(idx.toString(), size, segmentSize.get(idx)); + assertEquals(idx.toString(), size.longValue(), wal.segmentSize(idx)); + }); + + assertEquals(0, wal.segmentSize(currHnd.getSegmentId() + 1)); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java index 3dedd8feae1c8..2e78ad03c4b4a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java @@ -188,4 +188,14 @@ public class NoOpWALManager implements IgniteWriteAheadLogManager { @Override public long maxArchivedSegmentToDelete() { return -1; } + + /** {@inheritDoc} */ + @Override public long segmentSize(long idx) { + return -1; + } + + /** {@inheritDoc} */ + @Override public WALPointer lastWritePointer() { + return null; + } } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java index 8ff45e44efa43..11b9ba5a38010 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java @@ -56,6 +56,7 @@ import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.IgniteCheckpointDirtyPagesForLowLoadTest; import org.apache.ignite.internal.processors.cache.persistence.db.filename.IgniteUidAsConsistentIdMigrationTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.FsyncWalRolloverDoesNotBlockTest; +import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteLocalWalSizeTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteNodeStoppedDuringDisableWALTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWALTailIsReachedDuringIterationOverArchiveTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushBackgroundSelfTest; @@ -231,5 +232,7 @@ public static void addRealPageStoreTests(List> suite, Collection GridTestUtils.addTestIfNeeded(suite, IgniteWalRebalanceLoggingTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, HistoricalRebalanceHeuristicsTest.class, ignoredTests); + + GridTestUtils.addTestIfNeeded(suite, IgniteLocalWalSizeTest.class, ignoredTests); } } From d66e4405a57726be10ba80a3b24221f285ba1618 Mon Sep 17 00:00:00 2001 From: Vladislav Pyatkov Date: Mon, 2 Nov 2020 15:33:29 +0300 Subject: [PATCH 009/110] IGNITE-13593 Fixed IgniteClientCacheStartFailoverTest.testRebalanceStateConcurrentStart in MVCC mode. Fixes #8366 Signed-off-by: Slava Koptilin --- .../IgniteClientCacheStartFailoverTest.java | 27 +++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java index 41b0c11264355..e475cbc5ec5b4 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java @@ -28,6 +28,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import javax.cache.CacheException; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheAtomicityMode; @@ -44,12 +45,14 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.TransactionSerializationException; import org.junit.Ignore; import org.junit.Test; @@ -365,9 +368,29 @@ public void testRebalanceStateConcurrentStart() throws Exception { Map map0 = cache.getAll(keys); - assertEquals(KEYS, map0.size()); + assertEquals("[cache=" + cacheName + + ", expected=" + KEYS + + ", actual=" + map0.size() + ']', KEYS, map0.size()); - cache.put(rnd.nextInt(KEYS), i); + int key = rnd.nextInt(KEYS); + + try { + cache.put(key, i); + } + catch (CacheException e) { + log.error("It couldn't put a value [cache=" + cacheName + + ", key=" + key + + ", val=" + i + ']', e); + + CacheConfiguration ccfg = cache.getConfiguration(CacheConfiguration.class); + + TransactionSerializationException txEx = X.cause(e, TransactionSerializationException.class); + + if (txEx == null || + ccfg.getAtomicityMode() != TRANSACTIONAL_SNAPSHOT || + !txEx.getMessage().contains("Cannot serialize transaction due to write conflict (transaction is marked for rollback)")) + fail("Assert violated because exception was thrown [e=" + e.getMessage() + ']'); + } } } From 6d9785706e4a7ca0edeccc32dc6fdf34f9143956 Mon Sep 17 00:00:00 2001 From: Ivan Daschinskiy Date: Mon, 2 Nov 2020 17:55:13 +0300 Subject: [PATCH 010/110] IGNITE-13577 Graceful node shutdown for Zookeeper Discovery SPI - Fixes #8371. Signed-off-by: Sergey Chugunov --- .../discovery/zk/ZookeeperDiscoverySpi.java | 2 +- ...tCallabck.java => ZkAbstractCallback.java} | 6 +- .../internal/ZkAbstractChildrenCallback.java | 2 +- .../zk/internal/ZkAbstractWatcher.java | 2 +- .../zk/internal/ZkDiscoveryEventData.java | 4 +- ...ava => ZkDiscoveryNodeLeaveEventData.java} | 45 ++- .../discovery/zk/internal/ZkIgnitePaths.java | 57 +++- .../spi/discovery/zk/internal/ZkRunnable.java | 2 +- .../zk/internal/ZookeeperClient.java | 26 ++ .../zk/internal/ZookeeperDiscoveryImpl.java | 124 +++++--- .../ZookeeperDiscoveryStatistics.java | 32 ++- .../zk/ZookeeperDiscoverySpiTestSuite1.java | 2 + ...coveryConcurrentStartAndStartStopTest.java | 6 +- .../internal/ZookeeperDiscoveryMiscTest.java | 2 + ...scoveryRandomStopOrFailConcurrentTest.java | 264 ++++++++++++++++++ ...ySegmentationAndConnectionRestoreTest.java | 4 +- .../ZookeeperDiscoverySpiTestBase.java | 90 ++++++ .../ZookeeperDiscoverySpiTestHelper.java | 6 +- ...scoveryTopologyChangeAndReconnectTest.java | 90 ------ .../zookeeper/ZkTestClientCnxnSocketNIO.java | 3 +- 20 files changed, 595 insertions(+), 174 deletions(-) rename modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/{ZkAbstractCallabck.java => ZkAbstractCallback.java} (92%) rename modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/{ZkDiscoveryNodeFailEventData.java => ZkDiscoveryNodeLeaveEventData.java} (53%) create mode 100644 modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryRandomStopOrFailConcurrentTest.java diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java index 5cdfa581c1817..3de8df6435b35 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java @@ -595,7 +595,7 @@ public ZookeeperDiscoverySpiMBeanImpl(IgniteSpiAdapter spiAdapter) { /** {@inheritDoc} */ @Override public long getNodesLeft() { - return 0; + return stats.leftNodesCnt(); } /** {@inheritDoc} */ diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallabck.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallback.java similarity index 92% rename from modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallabck.java rename to modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallback.java index b80a9ddbf129a..427a81c27eac4 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallabck.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractCallback.java @@ -22,12 +22,12 @@ /** * */ -abstract class ZkAbstractCallabck { +abstract class ZkAbstractCallback { /** */ final ZkRuntimeState rtState; /** */ - private final ZookeeperDiscoveryImpl impl; + final ZookeeperDiscoveryImpl impl; /** */ private final GridSpinBusyLock busyLock; @@ -36,7 +36,7 @@ abstract class ZkAbstractCallabck { * @param rtState Runtime state. * @param impl Discovery impl. */ - ZkAbstractCallabck(ZkRuntimeState rtState, ZookeeperDiscoveryImpl impl) { + ZkAbstractCallback(ZkRuntimeState rtState, ZookeeperDiscoveryImpl impl) { this.rtState = rtState; this.impl = impl; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java index 2292e35056041..dc680f329df45 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractChildrenCallback.java @@ -24,7 +24,7 @@ /** * */ -abstract class ZkAbstractChildrenCallback extends ZkAbstractCallabck implements AsyncCallback.Children2Callback { +abstract class ZkAbstractChildrenCallback extends ZkAbstractCallback implements AsyncCallback.Children2Callback { /** * @param rtState Runtime state. * @param impl Discovery impl. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java index 9098d0520a52a..37e65e5b90457 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkAbstractWatcher.java @@ -23,7 +23,7 @@ /** * */ -abstract class ZkAbstractWatcher extends ZkAbstractCallabck implements Watcher { +abstract class ZkAbstractWatcher extends ZkAbstractCallback implements Watcher { /** * @param rtState Runtime state. * @param impl Discovery impl. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java index d667a17f6643c..2bc49e5252522 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryEventData.java @@ -30,7 +30,7 @@ abstract class ZkDiscoveryEventData implements Serializable { static final byte ZK_EVT_NODE_JOIN = 1; /** */ - static final byte ZK_EVT_NODE_FAILED = 2; + static final byte ZK_EVT_NODE_LEFT = 2; /** */ static final byte ZK_EVT_CUSTOM_EVT = 3; @@ -59,7 +59,7 @@ abstract class ZkDiscoveryEventData implements Serializable { * @param topVer Topology version. */ ZkDiscoveryEventData(long evtId, byte evtType, long topVer) { - assert evtType == ZK_EVT_NODE_JOIN || evtType == ZK_EVT_NODE_FAILED || evtType == ZK_EVT_CUSTOM_EVT : evtType; + assert evtType == ZK_EVT_NODE_JOIN || evtType == ZK_EVT_NODE_LEFT || evtType == ZK_EVT_CUSTOM_EVT : evtType; this.evtId = evtId; this.evtType = evtType; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeFailEventData.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeLeaveEventData.java similarity index 53% rename from modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeFailEventData.java rename to modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeLeaveEventData.java index c76158ff090a7..77d1157aa4ba8 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeFailEventData.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDiscoveryNodeLeaveEventData.java @@ -20,36 +20,59 @@ /** * */ -class ZkDiscoveryNodeFailEventData extends ZkDiscoveryEventData { +class ZkDiscoveryNodeLeaveEventData extends ZkDiscoveryEventData { /** */ private static final long serialVersionUID = 0L; /** */ - private long failedNodeInternalId; + private final long leftNodeInternalId; + + /** */ + private final boolean failed; /** * @param evtId Event ID. * @param topVer Topology version. - * @param failedNodeInternalId Failed node ID. + * @param leftNodeInternalId Failed node ID. */ - ZkDiscoveryNodeFailEventData(long evtId, long topVer, long failedNodeInternalId) { - super(evtId, ZK_EVT_NODE_FAILED, topVer); + ZkDiscoveryNodeLeaveEventData(long evtId, long topVer, long leftNodeInternalId) { + this(evtId, topVer, leftNodeInternalId, false); + } - this.failedNodeInternalId = failedNodeInternalId; + /** + * @param evtId Event ID. + * @param topVer Topology version. + * @param leftNodeInternalId Left node ID. + */ + ZkDiscoveryNodeLeaveEventData(long evtId, long topVer, long leftNodeInternalId, boolean failed) { + super(evtId, ZK_EVT_NODE_LEFT, topVer); + + this.leftNodeInternalId = leftNodeInternalId; + + this.failed = failed; + } + + /** + * @return Left node ID. + */ + long leftNodeInternalId() { + return leftNodeInternalId; } /** - * @return Failed node ID. + * + * @return {@code true} if failed. */ - long failedNodeInternalId() { - return failedNodeInternalId; + boolean failed() { + return failed; } /** {@inheritDoc} */ @Override public String toString() { - return "ZkDiscoveryNodeFailEventData [" + + return "ZkDiscoveryNodeLeaveEventData [" + "evtId=" + eventId() + ", topVer=" + topologyVersion() + - ", nodeId=" + failedNodeInternalId + ']'; + ", nodeId=" + leftNodeInternalId + + ", failed=" + failed + ']'; } } diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java index 4e542549adef0..02e9d36a94657 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java @@ -44,6 +44,9 @@ public class ZkIgnitePaths { /** Directory to store acknowledge messages for custom events. */ private static final String CUSTOM_EVTS_ACKS_DIR = "ca"; + /** Directory to store node's stopped flags. */ + private static final String STOPPED_NODES_FLAGS_DIR = "sf"; + /** Directory to store EPHEMERAL znodes for alive cluster nodes. */ static final String ALIVE_NODES_DIR = "n"; @@ -71,6 +74,9 @@ public class ZkIgnitePaths { /** */ final String customEvtsAcksDir; + /** */ + final String stoppedNodesFlagsDir; + /** * @param zkRootPath Base Zookeeper directory for all Ignite nodes. */ @@ -83,6 +89,7 @@ public class ZkIgnitePaths { customEvtsDir = zkPath(CUSTOM_EVTS_DIR); customEvtsPartsDir = zkPath(CUSTOM_EVTS_PARTS_DIR); customEvtsAcksDir = zkPath(CUSTOM_EVTS_ACKS_DIR); + stoppedNodesFlagsDir = zkPath(STOPPED_NODES_FLAGS_DIR); } /** @@ -90,7 +97,7 @@ public class ZkIgnitePaths { * @return Full path. */ private String zkPath(String path) { - return clusterDir + "/" + path; + return join(clusterDir, path); } /** @@ -99,7 +106,7 @@ private String zkPath(String path) { * @return Path. */ String joiningNodeDataPath(UUID nodeId, UUID prefixId) { - return joinDataDir + '/' + prefixId + ":" + nodeId.toString(); + return join(joinDataDir, prefixId + ":" + nodeId.toString()); } /** @@ -109,7 +116,7 @@ String joiningNodeDataPath(UUID nodeId, UUID prefixId) { static long aliveInternalId(String path) { int idx = path.lastIndexOf('|'); - return Integer.parseInt(path.substring(idx + 1)); + return Long.parseLong(path.substring(idx + 1)); } /** @@ -123,7 +130,7 @@ String aliveNodePathForCreate(String prefix, ZookeeperClusterNode node) { if (node.isClient()) flags |= CLIENT_NODE_FLAG_MASK; - return aliveNodesDir + "/" + prefix + ":" + node.id() + ":" + encodeFlags(flags) + "|"; + return join(aliveNodesDir, prefix + ":" + node.id() + ":" + encodeFlags(flags) + "|"); } /** @@ -155,6 +162,26 @@ static UUID aliveNodeId(String path) { return UUID.fromString(idStr); } + /** + * @param node Leaving node. + * @return Stopped node path. + */ + String nodeStoppedFlag(ZookeeperClusterNode node) { + String path = node.id().toString() + '|' + node.internalId(); + + return join(stoppedNodesFlagsDir, path); + } + + /** + * @param path Leaving flag path. + * @return Stopped node internal id. + */ + static long stoppedFlagNodeInternalId(String path) { + int idx = path.lastIndexOf('|'); + + return Long.parseLong(path.substring(idx + 1)); + } + /** * @param path Event zk path. * @return Event sequence number. @@ -212,7 +239,7 @@ static int customEventPartsCount(String path) { * @return Path. */ String createCustomEventPath(String prefix, UUID nodeId, int partCnt) { - return customEvtsDir + "/" + prefix + ":" + nodeId + ":" + String.format("%04d", partCnt) + '|'; + return join(customEvtsDir, prefix + ":" + nodeId + ":" + String.format("%04d", partCnt) + '|'); } /** @@ -221,7 +248,7 @@ String createCustomEventPath(String prefix, UUID nodeId, int partCnt) { * @return Path. */ String customEventPartsBasePath(String prefix, UUID nodeId) { - return customEvtsPartsDir + "/" + prefix + ":" + nodeId + ":"; + return join(customEvtsPartsDir, prefix + ":" + nodeId + ":"); } /** @@ -239,7 +266,7 @@ String customEventPartPath(String prefix, UUID nodeId, int part) { * @return Event zk path. */ String joinEventDataPathForJoined(long evtId) { - return evtsPath + "/fj-" + evtId; + return join(evtsPath,"fj-" + evtId); } /** @@ -247,7 +274,7 @@ String joinEventDataPathForJoined(long evtId) { * @return Event zk path. */ String joinEventSecuritySubjectPath(long topVer) { - return evtsPath + "/s-" + topVer; + return join(evtsPath, "s-" + topVer); } /** @@ -257,7 +284,7 @@ String joinEventSecuritySubjectPath(long topVer) { String ackEventDataPath(long origEvtId) { assert origEvtId != 0; - return customEvtsAcksDir + "/" + String.valueOf(origEvtId); + return join(customEvtsAcksDir, String.valueOf(origEvtId)); } /** @@ -265,7 +292,7 @@ String ackEventDataPath(long origEvtId) { * @return Future path. */ String distributedFutureBasePath(UUID id) { - return evtsPath + "/f-" + id; + return join(evtsPath, "f-" + id); } /** @@ -273,7 +300,7 @@ String distributedFutureBasePath(UUID id) { * @return Future path. */ String distributedFutureResultPath(UUID id) { - return evtsPath + "/fr-" + id; + return join(evtsPath, "fr-" + id); } /** @@ -305,6 +332,14 @@ private static byte aliveFlags(String path) { return (byte)(Integer.parseInt(flagsStr, 16) - 128); } + /** + * @param paths Paths to join. + * @return Paths joined with separator. + */ + public static String join(String... paths) { + return String.join(PATH_SEPARATOR, paths); + } + /** * Validate the provided znode path string. * diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java index 965bdc0f45851..1be63e02c3e0e 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRunnable.java @@ -20,7 +20,7 @@ /** * Zk Runnable. */ -public abstract class ZkRunnable extends ZkAbstractCallabck implements Runnable { +public abstract class ZkRunnable extends ZkAbstractCallback implements Runnable { /** * @param rtState Runtime state. * @param impl Discovery impl. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java index 7e1bb9af29a9c..e98bc01199a91 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java @@ -441,6 +441,32 @@ String createIfNeeded(String path, byte[] data, CreateMode createMode) } } + /** + * @param path Path. + * @param data Data. + * @param createMode Create mode. + * @return Created path. + * @throws KeeperException In case of zookeeper error. + * @throws InterruptedException If interrupted. + */ + String createIfNeededNoRetry(String path, byte[] data, CreateMode createMode) + throws KeeperException, InterruptedException { + assert !createMode.isSequential() : createMode; + + if (data == null) + data = EMPTY_BYTES; + + try { + return zk.create(path, data, ZK_ACL, createMode); + } + catch (KeeperException.NodeExistsException e) { + if (log.isDebugEnabled()) + log.debug("Node already exists: " + path); + + return path; + } + } + /** * @param checkPrefix Unique prefix to check in case of retry. * @param parentPath Parent node path. diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java index e9196f2663c1b..d9d56aef1aa69 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java @@ -101,6 +101,7 @@ import static org.apache.ignite.events.EventType.EVT_CLIENT_NODE_RECONNECTED; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_JOINED; +import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.events.EventType.EVT_NODE_SEGMENTED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SECURITY_CREDENTIALS; @@ -854,7 +855,8 @@ private void initZkNodes() throws InterruptedException { zkPaths.customEvtsDir, zkPaths.customEvtsPartsDir, zkPaths.customEvtsAcksDir, - zkPaths.aliveNodesDir}; + zkPaths.aliveNodesDir, + zkPaths.stoppedNodesFlagsDir}; List dirs = new ArrayList<>(); @@ -1009,7 +1011,7 @@ private void startJoin(ZkRuntimeState rtState, @Nullable ZkRuntimeState prevStat final int OVERHEAD = 5; // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8193 - String joinDataPath = zkPaths.joinDataDir + "/" + prefix + ":" + locNode.id(); + String joinDataPath = ZkIgnitePaths.join(zkPaths.joinDataDir, prefix + ":" + locNode.id()); if (zkClient.needSplitNodeData(joinDataPath, joinDataBytes, OVERHEAD)) { List parts = zkClient.splitNodeData(joinDataPath, joinDataBytes, OVERHEAD); @@ -1379,7 +1381,7 @@ private void checkIsCoordinator(final List aliveNodes) throws Exception PreviousNodeWatcher watcher = new ServerPreviousNodeWatcher(rtState); - rtState.zkClient.existsAsync(zkPaths.aliveNodesDir + "/" + prevE.getValue(), watcher, watcher); + rtState.zkClient.existsAsync(ZkIgnitePaths.join(zkPaths.aliveNodesDir, prevE.getValue()), watcher, watcher); } } @@ -1478,7 +1480,7 @@ private void checkClientsStatus(final List aliveNodes) throws Exception PreviousNodeWatcher watcher = new ClientPreviousNodeWatcher(rtState); - rtState.zkClient.existsAsync(zkPaths.aliveNodesDir + "/" + watchPath, watcher, watcher); + rtState.zkClient.existsAsync(ZkIgnitePaths.join(zkPaths.aliveNodesDir, watchPath), watcher, watcher); } } @@ -1512,6 +1514,16 @@ private void generateNoServersEvent(ZkDiscoveryEventsData evtsData, Stat evtsSta * @throws Exception If failed. */ private void previousCoordinatorCleanup(ZkDiscoveryEventsData lastEvts) throws Exception { + for (String stoppedFlagPath : rtState.zkClient.getChildren(zkPaths.stoppedNodesFlagsDir)) { + long leftIntId = ZkIgnitePaths.stoppedFlagNodeInternalId(stoppedFlagPath); + + if (!rtState.top.nodesByInternalId.containsKey(leftIntId)) { + rtState.zkClient.deleteIfExistsAsync( + ZkIgnitePaths.join(zkPaths.stoppedNodesFlagsDir, stoppedFlagPath) + ); + } + } + for (ZkDiscoveryEventData evtData : lastEvts.evts.values()) { if (evtData instanceof ZkDiscoveryCustomEventData) { ZkDiscoveryCustomEventData evtData0 = (ZkDiscoveryCustomEventData)evtData; @@ -1620,7 +1632,7 @@ private void onBecomeCoordinator(List aliveNodes) throws Exception { private void watchAliveNodeData(String alivePath) { assert rtState.locNodeZkPath != null; - String path = zkPaths.aliveNodesDir + "/" + alivePath; + String path = ZkIgnitePaths.join(zkPaths.aliveNodesDir, alivePath); if (!path.equals(rtState.locNodeZkPath)) rtState.zkClient.getDataAsync(path, rtState.aliveNodeDataWatcher, rtState.aliveNodeDataWatcher); @@ -1642,6 +1654,11 @@ private void generateTopologyEvents(List aliveNodes) throws Exception { rtState.updateAlives = false; } + Set stoppedNodes = new HashSet<>(); + + for (String stoppedFlagPath : rtState.zkClient.getChildren(zkPaths.stoppedNodesFlagsDir)) + stoppedNodes.add(ZkIgnitePaths.stoppedFlagNodeInternalId(stoppedFlagPath)); + TreeMap alives = new TreeMap<>(); for (String child : aliveNodes) { @@ -1670,7 +1687,7 @@ private void generateTopologyEvents(List aliveNodes) throws Exception { failedNodes.add(failedNode); - generateNodeFail(curTop, failedNode); + generateNodeLeave(curTop, failedNode, !stoppedNodes.contains(failedNode.internalId())); newEvts++; @@ -2031,15 +2048,11 @@ private void processJoinError(String aliveNodePath, String joinDataPath = zkPaths.joiningNodeDataPath(nodeId, prefixId); client.setData(joinDataPath, marshalZip(joinErr), -1); - - client.deleteIfExists(zkPaths.aliveNodesDir + "/" + aliveNodePath, -1); } - else { - if (log.isInfoEnabled()) + else if (log.isInfoEnabled()) log.info("Ignore join data, node was failed by previous coordinator: " + aliveNodePath); - client.deleteIfExists(zkPaths.aliveNodesDir + "/" + aliveNodePath, -1); - } + client.deleteIfExists(ZkIgnitePaths.join(zkPaths.aliveNodesDir, aliveNodePath), -1); } /** @@ -2180,25 +2193,35 @@ else if (log.isDebugEnabled()) { /** * @param curTop Current topology. - * @param failedNode Failed node. + * @param leftNode Failed node. + * @param failed Whether node failed or stopped. */ - private void generateNodeFail(TreeMap curTop, ZookeeperClusterNode failedNode) { - Object rmvd = curTop.remove(failedNode.order()); + private void generateNodeLeave( + TreeMap curTop, + ZookeeperClusterNode leftNode, + boolean failed + ) { + Object rmvd = curTop.remove(leftNode.order()); assert rmvd != null; rtState.evtsData.topVer++; rtState.evtsData.evtIdGen++; - ZkDiscoveryNodeFailEventData evtData = new ZkDiscoveryNodeFailEventData( + ZkDiscoveryNodeLeaveEventData evtData = new ZkDiscoveryNodeLeaveEventData( rtState.evtsData.evtIdGen, rtState.evtsData.topVer, - failedNode.internalId()); + leftNode.internalId(), + failed + ); rtState.evtsData.addEvent(curTop.values(), evtData); - if (log.isInfoEnabled()) - log.info("Generated NODE_FAILED event [evt=" + evtData + ']'); + if (log.isInfoEnabled()) { + String evtName = failed ? "NODE_FAILED" : "NODE_LEFT"; + + log.info("Generated " + evtName + " event [evt=" + evtData + ']'); + } } /** @@ -2389,12 +2412,14 @@ private void cleanupPreviousClusterData(long startInternalOrder) throws Exceptio batch.addAll(client.getChildrenPaths(zkPaths.customEvtsAcksDir)); + batch.addAll(client.getChildrenPaths(zkPaths.stoppedNodesFlagsDir)); + client.deleteAll(batch, -1); if (startInternalOrder > 0) { for (String alive : client.getChildren(zkPaths.aliveNodesDir)) { if (ZkIgnitePaths.aliveInternalId(alive) < startInternalOrder) - client.deleteIfExists(zkPaths.aliveNodesDir + "/" + alive, -1); + client.deleteIfExists(ZkIgnitePaths.join(zkPaths.aliveNodesDir, alive), -1); } } @@ -2423,7 +2448,7 @@ private byte[] readCustomEventData(ZookeeperClient zkClient, String evtPath, UUI return readMultipleParts(zkClient, partsBasePath, partCnt); } else - return zkClient.getData(zkPaths.customEvtsDir + "/" + evtPath); + return zkClient.getData(ZkIgnitePaths.join(zkPaths.customEvtsDir, evtPath)); } /** @@ -2594,7 +2619,7 @@ private void deleteAliveNode(long internalId) throws Exception { for (String child : rtState.zkClient.getChildren(zkPaths.aliveNodesDir)) { if (ZkIgnitePaths.aliveInternalId(child) == internalId) { // Need use sync delete to do not process again join of this node again. - rtState.zkClient.deleteIfExists(zkPaths.aliveNodesDir + "/" + child, -1); + rtState.zkClient.deleteIfExists(ZkIgnitePaths.join(zkPaths.aliveNodesDir, child), -1); return; } @@ -2623,7 +2648,7 @@ private void deleteCustomEventDataAsync(ZookeeperClient zkClient, String evtPath } } - zkClient.deleteIfExistsAsync(zkPaths.customEvtsDir + "/" + evtPath); + zkClient.deleteIfExistsAsync(ZkIgnitePaths.join(zkPaths.customEvtsDir, evtPath)); } /** @@ -2690,13 +2715,13 @@ private void processNewEvents(final ZkDiscoveryEventsData evtsData) throws Excep break; } - case ZkDiscoveryEventData.ZK_EVT_NODE_FAILED: { + case ZkDiscoveryEventData.ZK_EVT_NODE_LEFT: { if (!rtState.joined) break; evtProcessed = true; - notifyNodeFail((ZkDiscoveryNodeFailEventData)evtData); + notifyNodeLeave((ZkDiscoveryNodeLeaveEventData)evtData); break; } @@ -3204,7 +3229,7 @@ private void deleteAliveNodes(@Nullable GridLongList internalIds) throws Excepti String alive = alives.get(i); if (internalIds.contains(ZkIgnitePaths.aliveInternalId(alive))) - rtState.zkClient.deleteIfExistsAsync(zkPaths.aliveNodesDir + "/" + alive); + rtState.zkClient.deleteIfExistsAsync(ZkIgnitePaths.join(zkPaths.aliveNodesDir, alive)); } } @@ -3532,8 +3557,8 @@ private void notifyNodeJoin(ZkJoinedNodeEvtData joinedEvtData, ZkJoiningNodeData /** * @param evtData Event data. */ - private void notifyNodeFail(final ZkDiscoveryNodeFailEventData evtData) { - notifyNodeFail(evtData.failedNodeInternalId(), evtData.topologyVersion()); + private void notifyNodeLeave(final ZkDiscoveryNodeLeaveEventData evtData) { + notifyNodeLeave(evtData.leftNodeInternalId(), evtData.topologyVersion(), evtData.failed()); } /** @@ -3541,11 +3566,23 @@ private void notifyNodeFail(final ZkDiscoveryNodeFailEventData evtData) { * @param topVer Topology version. */ private void notifyNodeFail(long nodeInternalOrder, long topVer) { - final ZookeeperClusterNode failedNode = rtState.top.removeNode(nodeInternalOrder); + notifyNodeLeave(nodeInternalOrder, topVer, true); + } - assert failedNode != null && !failedNode.isLocal() : failedNode; + /** + * @param nodeInternalOrder Node order. + * @param topVer Topology version. + * @param failed {@code true} if node failed, {@code false} otherwise. + */ + private void notifyNodeLeave(long nodeInternalOrder, long topVer, boolean failed) { + final ZookeeperClusterNode leftNode = rtState.top.removeNode(nodeInternalOrder); - PingFuture pingFut = pingFuts.get(failedNode.order()); + assert leftNode != null && !leftNode.isLocal() : leftNode; + + if (!failed && rtState.crd) + rtState.zkClient.deleteIfExistsAsync(zkPaths.nodeStoppedFlag(leftNode)); + + PingFuture pingFut = pingFuts.get(leftNode.order()); if (pingFut != null) pingFut.onDone(false); @@ -3554,9 +3591,9 @@ private void notifyNodeFail(long nodeInternalOrder, long topVer) { lsnr.onDiscovery( new DiscoveryNotification( - EVT_NODE_FAILED, + failed ? EVT_NODE_FAILED : EVT_NODE_LEFT, topVer, - failedNode, + leftNode, topSnapshot, Collections.emptyMap(), null, @@ -3564,7 +3601,10 @@ private void notifyNodeFail(long nodeInternalOrder, long topVer) { ) ).get(); - stats.onNodeFailed(); + if (failed) + stats.onNodeFailed(); + else + stats.onNodeLeft(); } /** @@ -3680,11 +3720,11 @@ private void handleProcessedEvents(String ctx) throws Exception { break; } - case ZkDiscoveryEventData.ZK_EVT_NODE_FAILED: { + case ZkDiscoveryEventData.ZK_EVT_NODE_LEFT: { if (log.isDebugEnabled()) - log.debug("All nodes processed node fail [evtData=" + evtData + ']'); + log.debug("All nodes processed node left [evtData=" + evtData + ']'); - break; // Do not need addition cleanup. + break; } } @@ -3899,7 +3939,7 @@ void runInWorkerThread(Runnable c) { * */ public void stop() { - stop0(new IgniteSpiException("Node stopped")); + stop0(null); } /** @@ -3913,6 +3953,14 @@ private void stop0(Throwable e) { if (rtState.zkClient != null && rtState.locNodeZkPath != null && rtState.zkClient.connected()) { try { + if (e == null && rtState.joined) { + rtState.zkClient.createIfNeededNoRetry( + zkPaths.nodeStoppedFlag(locNode), + null, + PERSISTENT + ); + } + rtState.zkClient.deleteIfExistsNoRetry(rtState.locNodeZkPath, -1); } catch (Exception err) { diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java index cc95dd3fe9f0e..21b62c49d43cb 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java @@ -16,6 +16,7 @@ */ package org.apache.ignite.spi.discovery.zk.internal; +import java.util.concurrent.atomic.LongAdder; import org.apache.ignite.internal.util.typedef.internal.S; /** @@ -23,42 +24,55 @@ */ public class ZookeeperDiscoveryStatistics { /** */ - private long joinedNodesCnt; + private final LongAdder joinedNodesCnt = new LongAdder(); /** */ - private long failedNodesCnt; + private final LongAdder failedNodesCnt = new LongAdder(); + + /** */ + private final LongAdder leftNodesCnt = new LongAdder(); /** Communication error count. */ - private long commErrCnt; + private final LongAdder commErrCnt = new LongAdder(); /** */ public long joinedNodesCnt() { - return joinedNodesCnt; + return joinedNodesCnt.longValue(); } /** */ public long failedNodesCnt() { - return failedNodesCnt; + return failedNodesCnt.longValue(); + } + + /** */ + public long leftNodesCnt() { + return leftNodesCnt.longValue(); } /** */ public long commErrorCount() { - return commErrCnt; + return commErrCnt.longValue(); } /** */ public void onNodeJoined() { - joinedNodesCnt++; + joinedNodesCnt.increment(); } /** */ public void onNodeFailed() { - failedNodesCnt++; + failedNodesCnt.increment(); + } + + /** */ + public void onNodeLeft() { + leftNodesCnt.increment(); } /** */ public void onCommunicationError() { - commErrCnt++; + commErrCnt.increment(); } /** {@inheritDoc} */ diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java index d5be881792556..03d6a43904a92 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java @@ -24,6 +24,7 @@ import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryConcurrentStartAndStartStopTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryCustomEventsTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryMiscTest; +import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoveryRandomStopOrFailConcurrentTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoverySegmentationAndConnectionRestoreTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoverySpiSaslFailedAuthTest; import org.apache.ignite.spi.discovery.zk.internal.ZookeeperDiscoverySpiSaslSuccessfulAuthTest; @@ -43,6 +44,7 @@ ZookeeperValidatePathsTest.class, ZookeeperDiscoverySegmentationAndConnectionRestoreTest.class, ZookeeperDiscoveryConcurrentStartAndStartStopTest.class, + ZookeeperDiscoveryRandomStopOrFailConcurrentTest.class, ZookeeperDiscoveryTopologyChangeAndReconnectTest.class, ZookeeperDiscoveryCommunicationFailureTest.class, ZookeeperDiscoveryClientDisconnectTest.class, diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java index 1572af532bd25..cea59751eacec 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryConcurrentStartAndStartStopTest.java @@ -180,7 +180,9 @@ private void concurrentStartStop(final int initNodes) throws Exception { }, NODES, "stop-node"); for (int j = 0; j < NODES; j++) - expEvts[j] = ZookeeperDiscoverySpiTestHelper.failEvent(++topVer); + expEvts[j] = ZookeeperDiscoverySpiTestHelper.leftEvent(++topVer, false); + + helper.checkEvents(ignite(0), evts, expEvts); checkEventsConsistency(); } @@ -199,6 +201,8 @@ public void testClusterRestart() throws Exception { startGridsMultiThreaded(3, false); + checkZkNodesCleanup(); + waitForTopology(3); } diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java index c644a4b1ff5a9..f271bad61d05f 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java @@ -255,6 +255,8 @@ public void testMbeanGetCoordinator() throws Exception { stopGrid(0); + waitForTopology(2); + assertEquals(mbean.getCoordinator(), srv2.localNode().id()); assertEquals(mbean.getCoordinatorNodeFormatted(), String.valueOf(srv2.localNode())); } diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryRandomStopOrFailConcurrentTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryRandomStopOrFailConcurrentTest.java new file mode 100644 index 0000000000000..0f9935b2ec6a8 --- /dev/null +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryRandomStopOrFailConcurrentTest.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.discovery.zk.internal; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.ignite.Ignite; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.spi.discovery.DiscoverySpiMBean; +import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi; +import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpiMBean; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.zookeeper.ZkTestClientCnxnSocketNIO; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * + */ +@RunWith(Parameterized.class) +public class ZookeeperDiscoveryRandomStopOrFailConcurrentTest extends ZookeeperDiscoverySpiTestBase { + /** */ + private static final int NUM_CLIENTS = 10; + + /** */ + private static final int NUM_SERVERS = 10; + + /** */ + private static final int ZK_SESSION_TIMEOUT = 5_000; + + /** */ + @Parameterized.Parameters(name = "stop mode = {0}, with crd = {1}") + public static Collection parameters() { + List params = new ArrayList<>(); + + for (StopMode stopMode: StopMode.values()) { + params.add(new Object[] {stopMode, true}); + params.add(new Object[] {stopMode, false}); + } + + return params; + } + + /** */ + @Parameterized.Parameter(0) + public StopMode stopMode; + + /** */ + @Parameterized.Parameter(1) + public boolean killCrd; + + /** */ + private final AtomicLong nodesLeft = new AtomicLong(0); + + /** */ + private final AtomicLong nodesFailed = new AtomicLong(0); + + /** */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setClusterStateOnStart(ClusterState.INACTIVE); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + sesTimeout = ZK_SESSION_TIMEOUT; + + testSockNio = true; + + clientReconnectDisabled = true; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + for (Ignite g: G.allGrids()) { + ZkTestClientCnxnSocketNIO cnxn = ZkTestClientCnxnSocketNIO.forNode(g); + + if (cnxn != null) + cnxn.allowConnect(); + } + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected void waitForTopology(int expSize) throws Exception { + assertTrue(GridTestUtils.waitForCondition(() -> grid(0).cluster().nodes().size() == expSize, 30_000)); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testStopOrFailConcurrently() throws Exception { + IgniteEx client = startServersAndClients(NUM_SERVERS, NUM_CLIENTS); + + int crd = getCoordinatorIndex(); + + List srvToStop = IntStream.range(1, NUM_SERVERS + 1) + .filter(j -> j != crd) + .boxed() + .collect(Collectors.collectingAndThen(Collectors.toList(), list -> { + Collections.shuffle(list); + + return list.subList(0, NUM_SERVERS / 2); + })); + + if (killCrd) + srvToStop.set(0, crd); + + List cliToStop = IntStream.range(NUM_SERVERS + 1, NUM_CLIENTS + NUM_SERVERS) + .boxed() + .collect(Collectors.collectingAndThen(Collectors.toList(), list -> { + Collections.shuffle(list); + + return list.subList(0, NUM_CLIENTS / 2); + })); + + srvToStop.addAll(cliToStop); + + stopOrKillMultithreaded(srvToStop); + + waitForTopology(NUM_CLIENTS + NUM_SERVERS - srvToStop.size()); + + checkStopFlagsDeleted(10_000); + + DiscoverySpiMBean mBean = getMbean(client); + + GridTestUtils.waitForCondition(() -> nodesLeft.get() == mBean.getNodesLeft(), 10_000); + GridTestUtils.waitForCondition(() -> nodesFailed.get() == mBean.getNodesFailed(), 10_000); + } + + /** */ + private void checkStopFlagsDeleted(long timeout) throws Exception { + ZookeeperClient zkClient = new ZookeeperClient(getTestResources().getLogger(), + zkCluster.getConnectString(), + 30_000, + null); + + ZkIgnitePaths paths = new ZkIgnitePaths(ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); + + GridTestUtils.waitForCondition(() -> { + try { + return zkClient.getChildren(paths.stoppedNodesFlagsDir).isEmpty(); + } + catch (Exception e) { + if (e instanceof InterruptedException) + Thread.currentThread().interrupt(); + + throw new RuntimeException("Failed to wait for stopped nodes flags", e); + } + }, timeout); + } + + /** */ + private void stopOrKillMultithreaded(final List stopIndices) throws Exception { + log.info("Stopping or killing nodes by idx: " + stopIndices.toString()); + + final StopMode mode = stopMode; + + GridTestUtils.runMultiThreaded((idx) -> { + try { + Random rnd = ThreadLocalRandom.current(); + + int nodeIdx = stopIndices.get(idx); + + if (mode == StopMode.FAIL_ONLY || (mode == StopMode.RANDOM && rnd.nextBoolean())) { + ZkTestClientCnxnSocketNIO c0 = ZkTestClientCnxnSocketNIO.forNode(grid(nodeIdx)); + + c0.closeSocket(true); + + nodesFailed.incrementAndGet(); + } + else { + stopGrid(nodeIdx); + + nodesLeft.incrementAndGet(); + } + } + catch (Exception e) { + e.printStackTrace(); + + fail(e.getMessage()); + } + }, stopIndices.size(), "stop-node"); + } + + /** */ + private int getCoordinatorIndex() { + UUID crdId = getMbean(grid(0)).getCoordinator(); + + Optional crdIdx = grid(0).cluster().nodes().stream().filter(n -> n.id().equals(crdId)) + .map(n -> getTestIgniteInstanceIndex((String)n.consistentId())).findAny(); + + assertTrue(crdIdx.isPresent()); + + return crdIdx.get(); + } + + /** */ + private DiscoverySpiMBean getMbean(IgniteEx grid) { + ZookeeperDiscoverySpiMBean bean = getMxBean(grid.context().igniteInstanceName(), "SPIs", + ZookeeperDiscoverySpi.class, ZookeeperDiscoverySpiMBean.class); + + assertNotNull(bean); + + return bean; + } + + /** */ + private IgniteEx startServersAndClients(int numServers, int numClients) throws Exception { + startGridsMultiThreaded(1, numServers); + startClientGridsMultiThreaded(numServers + 1, numClients - 1); + + IgniteEx res = startClientGrid(0); + + waitForTopology(numClients + numServers); + + // Set initial value of counters from MBean. + nodesLeft.addAndGet(getMbean(res).getNodesLeft()); + nodesFailed.addAndGet(getMbean(res).getNodesFailed()); + + return res; + } + + enum StopMode { + STOP_ONLY, + FAIL_ONLY, + RANDOM + } +} diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java index 49e39a881909b..d33932b53121d 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySegmentationAndConnectionRestoreTest.java @@ -391,7 +391,7 @@ private void connectionRestore_NonCoordinator(boolean failWhenDisconnected) thro closeZkClient(spi); - helper.checkEvents(node0, evts, ZookeeperDiscoverySpiTestHelper.failEvent(4)); + helper.checkEvents(node0, evts, ZookeeperDiscoverySpiTestHelper.leftEvent(4, true)); } c1.allowConnect(); @@ -399,7 +399,7 @@ private void connectionRestore_NonCoordinator(boolean failWhenDisconnected) thro helper.checkEvents(ignite(1), evts, ZookeeperDiscoverySpiTestHelper.joinEvent(3)); if (failWhenDisconnected) { - helper.checkEvents(ignite(1), evts, ZookeeperDiscoverySpiTestHelper.failEvent(4)); + helper.checkEvents(ignite(1), evts, ZookeeperDiscoverySpiTestHelper.leftEvent(4, true)); IgnitionEx.stop(getTestIgniteInstanceName(2), true, ShutdownPolicy.IMMEDIATE, true); } diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java index d23aa97dba9c8..bed11bd696835 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestBase.java @@ -80,7 +80,10 @@ import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpiTestUtil; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZKUtil; import org.apache.zookeeper.ZkTestClientCnxnSocketNIO; +import org.apache.zookeeper.ZooKeeper; import org.jetbrains.annotations.Nullable; import static java.util.concurrent.TimeUnit.SECONDS; @@ -566,6 +569,93 @@ void stopZkCluster() { } } + /** + * @throws Exception If failed. + */ + protected void checkZkNodesCleanup() throws Exception { + final ZookeeperClient zkClient = new ZookeeperClient(getTestResources().getLogger(), + zkCluster.getConnectString(), + 30_000, + null); + + final String basePath = ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT + "/"; + + final String aliveDir = basePath + ZkIgnitePaths.ALIVE_NODES_DIR + "/"; + + try { + List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); + + boolean foundAlive = false; + + for (String znode : znodes) { + if (znode.startsWith(aliveDir)) { + foundAlive = true; + + break; + } + } + + assertTrue(foundAlive); // Sanity check to make sure we check correct directory. + + assertTrue("Failed to wait for unused znodes cleanup", GridTestUtils.waitForCondition(new GridAbsPredicate() { + @Override public boolean apply() { + try { + List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); + + for (String znode : znodes) { + if (znode.startsWith(aliveDir) || znode.length() < basePath.length()) + continue; + + znode = znode.substring(basePath.length()); + + if (!znode.contains("/")) // Ignore roots. + continue; + + // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8193 + if (znode.startsWith("jd/")) + continue; + + log.info("Found unexpected znode: " + znode); + + return false; + } + + return true; + } + catch (Exception e) { + error("Unexpected error: " + e, e); + + fail("Unexpected error: " + e); + } + + return false; + } + }, 10_000)); + } + finally { + zkClient.close(); + } + } + + /** + * @param zk ZooKeeper client. + * @param root Root path. + * @return All children znodes for given path. + * @throws Exception If failed/ + */ + private List listSubTree(ZooKeeper zk, String root) throws Exception { + for (int i = 0; i < 30; i++) { + try { + return ZKUtil.listSubTreeBFS(zk, root); + } + catch (KeeperException.NoNodeException e) { + info("NoNodeException when get znodes, will retry: " + e); + } + } + + throw new Exception("Failed to get znodes: " + root); + } + /** */ private CacheConfiguration getCacheConfiguration() { CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME); diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java index 32e3855b0ca0a..be5f2e6bfdda0 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTestHelper.java @@ -135,8 +135,10 @@ static DiscoveryEvent joinEvent(long topVer) { * @param topVer Topology version. * @return Expected event instance. */ - static DiscoveryEvent failEvent(long topVer) { - DiscoveryEvent expEvt = new DiscoveryEvent(null, null, EventType.EVT_NODE_FAILED, null); + static DiscoveryEvent leftEvent(long topVer, boolean fail) { + int eventType = fail ? EventType.EVT_NODE_FAILED : EventType.EVT_NODE_LEFT; + + DiscoveryEvent expEvt = new DiscoveryEvent(null, null, eventType, null); expEvt.topologySnapshot(topVer, null); diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java index ba17a2fdca42b..f38baa7fc0355 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryTopologyChangeAndReconnectTest.java @@ -41,7 +41,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheAbstractFullApiSelfTest; import org.apache.ignite.internal.processors.query.DummyQueryIndexing; import org.apache.ignite.internal.processors.query.GridQueryProcessor; -import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteInClosure; @@ -49,8 +48,6 @@ import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi; import org.apache.ignite.testframework.GridTestUtils; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.ZKUtil; import org.apache.zookeeper.ZkTestClientCnxnSocketNIO; import org.apache.zookeeper.ZooKeeper; import org.junit.Ignore; @@ -237,74 +234,6 @@ public void testRandomTopologyChanges() throws Exception { randomTopologyChanges(false, false); } - /** - * @throws Exception If failed. - */ - private void checkZkNodesCleanup() throws Exception { - final ZookeeperClient zkClient = new ZookeeperClient(getTestResources().getLogger(), - zkCluster.getConnectString(), - 30_000, - null); - - final String basePath = ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT + "/"; - - final String aliveDir = basePath + ZkIgnitePaths.ALIVE_NODES_DIR + "/"; - - try { - List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); - - boolean foundAlive = false; - - for (String znode : znodes) { - if (znode.startsWith(aliveDir)) { - foundAlive = true; - - break; - } - } - - assertTrue(foundAlive); // Sanity check to make sure we check correct directory. - - assertTrue("Failed to wait for unused znodes cleanup", GridTestUtils.waitForCondition(new GridAbsPredicate() { - @Override public boolean apply() { - try { - List znodes = listSubTree(zkClient.zk(), ZookeeperDiscoverySpiTestHelper.IGNITE_ZK_ROOT); - - for (String znode : znodes) { - if (znode.startsWith(aliveDir) || znode.length() < basePath.length()) - continue; - - znode = znode.substring(basePath.length()); - - if (!znode.contains("/")) // Ignore roots. - continue; - - // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8193 - if (znode.startsWith("jd/")) - continue; - - log.info("Found unexpected znode: " + znode); - - return false; - } - - return true; - } - catch (Exception e) { - error("Unexpected error: " + e, e); - - fail("Unexpected error: " + e); - } - - return false; - } - }, 10_000)); - } - finally { - zkClient.close(); - } - } - /** * @throws Exception If failed. */ @@ -766,25 +695,6 @@ else if (evt.type() == EVT_CLIENT_NODE_RECONNECTED) { client.events().stopLocalListen(p); } - /** - * @param zk ZooKeeper client. - * @param root Root path. - * @return All children znodes for given path. - * @throws Exception If failed/ - */ - private List listSubTree(ZooKeeper zk, String root) throws Exception { - for (int i = 0; i < 30; i++) { - try { - return ZKUtil.listSubTreeBFS(zk, root); - } - catch (KeeperException.NoNodeException e) { - info("NoNodeException when get znodes, will retry: " + e); - } - } - - throw new Exception("Failed to get znodes: " + root); - } - /** * @param cacheName Cache name. * @return Configuration. diff --git a/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java b/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java index 2b741a1926014..47fe0acb31fca 100644 --- a/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java +++ b/modules/zookeeper/src/test/java/org/apache/zookeeper/ZkTestClientCnxnSocketNIO.java @@ -114,7 +114,8 @@ public ZkTestClientCnxnSocketNIO(ZKClientConfig clientCfg) throws IOException { * */ public void allowConnect() { - assert blockConnectLatch != null && blockConnectLatch.getCount() == 1 : blockConnectLatch; + if (blockConnectLatch == null || blockConnectLatch.getCount() == 0) + return; log.info("ZkTestClientCnxnSocketNIO allowConnect [node=" + nodeName + ']'); From 557830a63b4ce7a7a9a50101c0424ca2a1a58ee7 Mon Sep 17 00:00:00 2001 From: sergeyuttsel Date: Mon, 2 Nov 2020 22:14:49 +0300 Subject: [PATCH 011/110] IGNITE-13217 Fixed partition loss detection on client nodes. Fixes #7994 Signed-off-by: Slava Koptilin --- .../cache/CacheAffinitySharedManager.java | 24 +-- .../dht/ClientCacheDhtTopologyFuture.java | 95 --------- .../GridDhtPartitionsExchangeFuture.java | 13 ++ .../topology/GridClientPartitionTopology.java | 3 +- .../IgniteClientCacheStartFailoverTest.java | 4 - .../CacheDetectLostPartitionsTest.java | 180 ++++++++++++++++++ .../testsuites/IgniteCacheTestSuite2.java | 2 + ...vccAbstractSqlCoordinatorFailoverTest.java | 3 - 8 files changed, 209 insertions(+), 115 deletions(-) delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ClientCacheDhtTopologyFuture.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDetectLostPartitionsTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index 05fa72ef0c35a..66aa98d1cadfc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -46,7 +46,6 @@ import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.Event; import org.apache.ignite.internal.IgniteInternalFuture; -import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException; import org.apache.ignite.internal.events.DiscoveryCustomEvent; import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; @@ -54,7 +53,6 @@ import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache; -import org.apache.ignite.internal.processors.cache.distributed.dht.ClientCacheDhtTopologyFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAssignmentFetchFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CacheGroupAffinityMessage; @@ -581,30 +579,32 @@ else if (!crd && !fetchFuts.containsKey(grp.groupId())) { fetchFut); GridDhtPartitionFullMap partMap; - ClientCacheDhtTopologyFuture topFut; if (res != null) { partMap = res.partitionMap(); assert partMap != null : res; - - topFut = new ClientCacheDhtTopologyFuture(topVer); } - else { + else partMap = new GridDhtPartitionFullMap(cctx.localNodeId(), cctx.localNode().order(), 1); - topFut = new ClientCacheDhtTopologyFuture(topVer, - new ClusterTopologyServerNotFoundException("All server nodes left grid.")); - } + GridDhtPartitionsExchangeFuture exchFut = context().exchange().lastFinishedFuture(); - grp.topology().updateTopologyVersion(topFut, + grp.topology().updateTopologyVersion(exchFut, discoCache, -1, false); - grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null, null); + GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId()); + + Set lostParts = clientTop == null ? null : clientTop.lostPartitions(); + + grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null, lostParts); + + if (clientTop == null) + grp.topology().detectLostPartitions(topVer, exchFut); - topFut.validate(grp, discoCache.allNodes()); + exchFut.validate(grp); } catch (IgniteCheckedException e) { cctx.cache().closeCaches(startedCaches, false); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ClientCacheDhtTopologyFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ClientCacheDhtTopologyFuture.java deleted file mode 100644 index 8fae639a1fa27..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ClientCacheDhtTopologyFuture.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.processors.cache.distributed.dht; - -import java.util.Collection; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; -import org.apache.ignite.internal.processors.cache.CacheGroupContext; -import org.apache.ignite.internal.util.typedef.internal.U; - -/** - * Topology future created for client cache start. - */ -public class ClientCacheDhtTopologyFuture extends GridDhtTopologyFutureAdapter { - /** */ - final AffinityTopologyVersion topVer; - - /** - * @param topVer Topology version. - */ - public ClientCacheDhtTopologyFuture(AffinityTopologyVersion topVer) { - assert topVer != null; - - this.topVer = topVer; - - onDone(topVer); - } - - /** - * @param topVer Topology version. - * @param e Error. - */ - public ClientCacheDhtTopologyFuture(AffinityTopologyVersion topVer, IgniteCheckedException e) { - assert e != null; - assert topVer != null; - - this.topVer = topVer; - - onDone(e); - } - - /** - * @param grp Cache group. - * @param topNodes Topology nodes. - */ - public void validate(CacheGroupContext grp, Collection topNodes) { - grpValidRes = U.newHashMap(1); - - CacheGroupValidation valRes = validateCacheGroup(grp, topNodes); - - if (!valRes.isValid() || valRes.hasLostPartitions()) - grpValidRes.put(grp.groupId(), valRes); - } - - /** {@inheritDoc} */ - @Override public AffinityTopologyVersion initialVersion() { - return topVer; - } - - /** {@inheritDoc} */ - @Override public boolean exchangeDone() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public AffinityTopologyVersion topologyVersion() { - return topVer; - } - - /** {@inheritDoc} */ - @Override public boolean changedAffinity() { - return true; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return "ClientCacheDhtTopologyFuture [topVer=" + topVer + ']'; - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java index e9f4d05e720a1..4a9435c378c93 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java @@ -2672,6 +2672,19 @@ private String exchangeTimingsLogMessage(String header, List timings) { return false; } + /** + * @param grp Cache group. + */ + public void validate(CacheGroupContext grp) { + if (grpValidRes == null) + grpValidRes = new ConcurrentHashMap<>(); + + CacheGroupValidation valRes = validateCacheGroup(grp, events().lastEvent().topologyNodes()); + + if (!valRes.isValid() || valRes.hasLostPartitions()) + grpValidRes.put(grp.groupId(), valRes); + } + /** * Updates the {@link GridMetricManager#PME_OPS_BLOCKED_DURATION_HISTOGRAM} and {@link * GridMetricManager#PME_DURATION_HISTOGRAM} metrics if needed. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java index ad0a52b2a9d8c..4df827338b327 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridClientPartitionTopology.java @@ -849,7 +849,8 @@ private boolean shouldOverridePartitionMap(GridDhtPartitionMap currentMap, GridD consistencyCheck(); - this.lostParts = lostParts == null ? null : new TreeSet<>(lostParts); + if (exchangeVer != null) + this.lostParts = lostParts == null ? null : new TreeSet<>(lostParts); if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java index e475cbc5ec5b4..2626459644027 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheStartFailoverTest.java @@ -131,8 +131,6 @@ private void clientStartCoordinatorFails(CacheAtomicityMode atomicityMode) throw } }, "start-cache"); - U.sleep(1000); - assertFalse(fut.isDone()); stopGrid(0); @@ -201,8 +199,6 @@ private void clientStartLastServerFails(CacheAtomicityMode atomicityMode) throws } }, "start-cache"); - U.sleep(1000); - assertFalse(fut.isDone()); stopGrid(1); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDetectLostPartitionsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDetectLostPartitionsTest.java new file mode 100644 index 0000000000000..d7143d00dad55 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDetectLostPartitionsTest.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.distributed; + +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteException; +import org.apache.ignite.cache.PartitionLossPolicy; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** */ +public class CacheDetectLostPartitionsTest extends GridCommonAbstractTest { + /** */ + private static final String TEST_CACHE_NAME = "testcache"; + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } + + /** + * Test detect lost partitions on a client node when the cache init after partitions was lost. + * @throws Exception + */ + @Test + public void testDetectLostPartitionsOnClient() throws Exception { + IgniteEx ig = startGrids(2); + + awaitPartitionMapExchange(); + + IgniteCache cache1 = ig.createCache(getCacheConfig(TEST_CACHE_NAME + 1)); + + IgniteCache cache2 = ig.createCache(getCacheConfig(TEST_CACHE_NAME + 2)); + + for (int i = 0; i < 1000; i++) { + cache1.put(i, i); + + cache2.put(i, i); + } + + IgniteEx client = startClientGrid(2); + + stopGrid(1); + + cache1 = client.cache(TEST_CACHE_NAME + 1); + checkCache(cache1); + + cache2 = client.cache(TEST_CACHE_NAME + 2); + checkCache(cache2); + + cache1.close(); + cache2.close(); + + checkCache(client.cache(TEST_CACHE_NAME + 1)); + checkCache(client.cache(TEST_CACHE_NAME + 2)); + } + + /** + * Test detect lost partitions on a client node when the cache was closed before partitions was lost. + * @throws Exception + */ + @Test + public void testDetectLostPartitionsOnClientWithClosedCache() throws Exception { + IgniteEx ig = startGrids(2); + + awaitPartitionMapExchange(); + + IgniteCache cacheSrv = ig.createCache(getCacheConfig(TEST_CACHE_NAME)); + + for (int i = 0; i < 1000; i++) + cacheSrv.put(i, i); + + IgniteEx client = startClientGrid(2); + + IgniteCache cacheCl = client.cache(TEST_CACHE_NAME); + + cacheCl.close(); + + stopGrid(1); + + cacheCl = client.cache(TEST_CACHE_NAME); + + checkCache(cacheCl); + } + + /** + * Test detect lost partitions on a server node which doesn't have partitions when the cache was closed + * before partitions was lost. + * @throws Exception + */ + @Test + public void testDetectLostPartitionsOnServerWithClosedCache() throws Exception { + startGrids(3); + + awaitPartitionMapExchange(); + + IgniteCache cacheSrv1 = grid(1).createCache( + getCacheConfig(TEST_CACHE_NAME) + .setNodeFilter(new NodeConsistentIdFilter(grid(2).localNode().consistentId())) + ); + + for (int i = 0; i < 1000; i++) + cacheSrv1.put(i, i); + + IgniteEx ig2 = grid(2); + + IgniteCache cacheSrv2 = ig2.cache(TEST_CACHE_NAME); + + cacheSrv2.close(); + + stopGrid(1); + + cacheSrv2 = ig2.cache(TEST_CACHE_NAME); + + checkCache(cacheSrv2); + } + + /** */ + private CacheConfiguration getCacheConfig(String cacheName) { + return new CacheConfiguration<>(cacheName) + .setPartitionLossPolicy(PartitionLossPolicy.READ_WRITE_SAFE); + } + + /** */ + private void checkCache(IgniteCache cache) { + assertFalse(cache.lostPartitions().isEmpty()); + + GridTestUtils.assertThrows(null, () -> { + for (int i = 0; i < 1000; i++) + cache.get(i); + }, + IgniteException.class, "partition data has been lost"); + + GridTestUtils.assertThrows(null, () -> { + for (int i = 0; i < 1000; i++) + cache.put(i, i); + }, + IgniteException.class, "partition data has been lost"); + } + + /** Filter by consistent id. */ + private static class NodeConsistentIdFilter implements IgnitePredicate { + /** */ + private final Object consistentId; + + /** + * @param consistentId Consistent id where cache should be started. + */ + NodeConsistentIdFilter(Object consistentId) { + this.consistentId = consistentId; + } + + /** {@inheritDoc} */ + @Override public boolean apply(ClusterNode node) { + return !node.consistentId().equals(consistentId); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java index 4935d49054a62..63516f8af6a79 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java @@ -58,6 +58,7 @@ import org.apache.ignite.internal.processors.cache.MemoryPolicyConfigValidationTest; import org.apache.ignite.internal.processors.cache.NoPresentCacheInterceptorOnClientTest; import org.apache.ignite.internal.processors.cache.NonAffinityCoordinatorDynamicStartStopTest; +import org.apache.ignite.internal.processors.cache.distributed.CacheDetectLostPartitionsTest; import org.apache.ignite.internal.processors.cache.distributed.CacheLoadingConcurrentGridStartSelfTest; import org.apache.ignite.internal.processors.cache.distributed.CacheLoadingConcurrentGridStartSelfTestAllowOverwrite; import org.apache.ignite.internal.processors.cache.distributed.CacheLockReleaseNodeLeaveTest; @@ -381,6 +382,7 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, CachePartitionPartialCountersMapSelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteReflectionFactorySelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, NoPresentCacheInterceptorOnClientTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, CacheDetectLostPartitionsTest.class, ignoredTests); return suite; } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java index 87f2c6ae7a9bf..fb6241760f51e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java @@ -31,7 +31,6 @@ import org.apache.ignite.internal.IgniteNodeAttributes; import org.apache.ignite.internal.TestRecordingCommunicationSpi; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse; -import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.transactions.Transaction; @@ -283,8 +282,6 @@ public void testStartLastServerFails() throws Exception { } }, "start-cache"); - U.sleep(1000); - assertFalse(fut.isDone()); stopGrid(1); From 6eb8f7407499fc76a4284d981d8aea5c5de45833 Mon Sep 17 00:00:00 2001 From: ktkalenko Date: Tue, 3 Nov 2020 09:45:30 +0300 Subject: [PATCH 012/110] IGNITE-13650 updateCurrentHandle method call is moved from under assert clause - Fixes #8412. Signed-off-by: Sergey Chugunov --- .../cache/persistence/wal/FileWriteAheadLogManager.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index a92168b9525d6..ff64f5b034156 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -1297,7 +1297,9 @@ private FileWriteHandle rollOver(FileWriteHandle cur, @Nullable WALRecord rec) t if (next.getSegmentId() - lashCheckpointFileIdx() >= maxSegCountWithoutCheckpoint) cctx.database().forceCheckpoint("too big size of WAL without checkpoint"); - assert updateCurrentHandle(next, hnd) : "Concurrent updates on rollover are not allowed"; + boolean updated = updateCurrentHandle(next, hnd); + + assert updated : "Concurrent updates on rollover are not allowed"; if (walAutoArchiveAfterInactivity > 0) lastRecordLoggedMs.set(0); From 49199695ee4533e312ef8f24e2563ff513da6121 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Tue, 3 Nov 2020 10:30:54 +0300 Subject: [PATCH 013/110] IGNITE-13615 Update .NET thin client feature set documentation --- docs/_docs/thin-client-comparison.csv | 9 +++++---- .../thin-clients/getting-started-with-thin-clients.adoc | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/_docs/thin-client-comparison.csv b/docs/_docs/thin-client-comparison.csv index ee2fe80ab1c5c..232518383cda9 100644 --- a/docs/_docs/thin-client-comparison.csv +++ b/docs/_docs/thin-client-comparison.csv @@ -7,9 +7,10 @@ Async Operations,No,{yes},No,{yes},{yes},{yes} SSL/TLS,{yes},{yes},{yes},{yes},{yes},{yes} Authentication,{yes},{yes},{yes},{yes},{yes},{yes} Partition Awareness,{yes},{yes},{yes},{yes},{yes},No -Failover,{yes},No,{yes},{yes},{yes},{yes} -Transactions,{yes},No,No,No,No,No +Failover,{yes},{yes},{yes},{yes},{yes},{yes} +Transactions,{yes},{yes},No,No,No,No Cluster API,{yes},{yes},No,No,No,No -Cluster discovery,No,{yes},No,No,No,No Compute,{yes},{yes},No,No,No,No -Service invocation,{yes},No,No,No,No,No \ No newline at end of file +Service invocation,{yes},{yes},No,No,No,No +Server Discovery,No,{yes},No,No,No,No +Server Discovery in Kubernetes,{yes},No,No,No,No,No \ No newline at end of file diff --git a/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc b/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc index 7860bf49491de..5e0c37ccd2660 100644 --- a/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc +++ b/docs/_docs/thin-clients/getting-started-with-thin-clients.adoc @@ -46,7 +46,7 @@ include::thin-client-comparison.csv[] === Client Connection Failover -All thin clients (except for the .NET thin client) support a connection failover mechanism, whereby the client automatically switches to an available node in case of the current node or connection failure. +All thin clients support a connection failover mechanism, whereby the client automatically switches to an available node in case of the current node or connection failure. For this mechanism to work, you need to provide a list of node addresses you want to use for failover purposes in the client configuration. Refer to the specific client documentation for more details. From baf8b7d3add68347158a989be1b566744293ee5e Mon Sep 17 00:00:00 2001 From: Aleksey Plekhanov Date: Tue, 3 Nov 2020 13:16:07 +0300 Subject: [PATCH 014/110] IGNITE-13657 Fix flaky TxOptimisticDeadlockDetectionCrossCacheTest - Fixes #8419. Signed-off-by: Aleksey Plekhanov --- .../TxOptimisticDeadlockDetectionCrossCacheTest.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java index 734b4609ab398..e0ce155540c5e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionCrossCacheTest.java @@ -78,6 +78,8 @@ public class TxOptimisticDeadlockDetectionCrossCacheTest extends GridCommonAbstr cfg.setCacheConfiguration(ccfg0, ccfg1); + cfg.setIncludeEventTypes(EventType.EVT_CACHE_OBJECT_LOCKED); + return cfg; } From 0baa6b700ef926b14ea71353db47a826a002e207 Mon Sep 17 00:00:00 2001 From: Kirill Gusakov Date: Tue, 3 Nov 2020 16:32:08 +0300 Subject: [PATCH 015/110] IGNITE-10837 Enriched the output of control.sh --baseline command with IPs of baseline and coordinator nodes. Fixes #8351 Signed-off-by: Slava Koptilin --- .../internal/commandline/AbstractCommand.java | 35 +++++++ .../internal/commandline/ActivateCommand.java | 2 +- .../internal/commandline/BaselineCommand.java | 42 ++++++++- .../commandline/ClusterChangeTagCommand.java | 2 +- .../ClusterStateChangeCommand.java | 2 +- .../ignite/internal/commandline/Command.java | 16 ++++ .../internal/commandline/CommandHandler.java | 2 +- .../commandline/DeactivateCommand.java | 2 +- .../commandline/ShutdownPolicyCommand.java | 2 +- .../internal/commandline/StateCommand.java | 2 +- .../TracingConfigurationCommand.java | 2 +- .../internal/commandline/TxCommands.java | 2 +- .../internal/commandline/WalCommands.java | 2 +- .../internal/commandline/WarmUpCommand.java | 2 +- .../commandline/cache/CacheCommands.java | 5 +- .../commandline/cache/CacheContention.java | 3 +- .../commandline/cache/CacheDistribution.java | 3 +- .../cache/CacheIndexesForceRebuild.java | 3 +- .../commandline/cache/CacheIndexesList.java | 3 +- .../cache/CacheIndexesRebuildStatus.java | 3 +- .../cache/CacheValidateIndexes.java | 3 +- .../commandline/cache/CacheViewer.java | 3 +- .../cache/CheckIndexInlineSizes.java | 3 +- .../cache/FindAndDeleteGarbage.java | 3 +- .../commandline/cache/IdleVerify.java | 3 +- .../cache/ResetLostPartitions.java | 3 +- .../diagnostic/DiagnosticCommand.java | 3 +- .../diagnostic/PageLocksCommand.java | 3 +- .../encryption/EncryptionCommand.java | 3 +- .../commandline/meta/MetadataCommand.java | 3 +- .../MetadataAbstractSubCommand.java | 3 +- .../meta/subcommands/MetadataHelpCommand.java | 4 +- .../commandline/metric/MetricCommand.java | 3 +- .../commandline/property/PropertyCommand.java | 3 +- .../PropertyAbstractSubCommand.java | 3 +- .../subcommands/PropertyHelpCommand.java | 4 +- .../commandline/query/KillCommand.java | 3 +- .../commandline/snapshot/SnapshotCommand.java | 3 +- .../systemview/SystemViewCommand.java | 3 +- .../ignite/util/GridCommandHandlerTest.java | 73 +++++++++++++-- .../apache/ignite/IgniteSystemProperties.java | 13 +++ .../visor/baseline/VisorBaselineNode.java | 91 ++++++++++++++++++- .../baseline/VisorBaselineTaskResult.java | 88 +++++++++++++++++- .../internal/visor/util/VisorTaskUtils.java | 4 +- .../junits/GridAbstractTest.java | 2 + ...mmandHandlerClusterByClassTest_help.output | 5 +- ...ndlerClusterByClassWithSSLTest_help.output | 5 +- 47 files changed, 417 insertions(+), 58 deletions(-) create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/AbstractCommand.java diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/AbstractCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/AbstractCommand.java new file mode 100644 index 0000000000000..504961ed8e78d --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/AbstractCommand.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; + +/** + * Abstract class for control.sh commands, that support verbose mode. + */ +public abstract class AbstractCommand implements Command { + /** Use verbose mode or not. */ + protected boolean verbose; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log, boolean verbose) throws Exception { + this.verbose = verbose; + return execute(clientCfg, log); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java index d1fb112ebf634..d2d12ca88fcef 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java @@ -32,7 +32,7 @@ * @deprecated Use {@link ClusterStateChangeCommand} instead. */ @Deprecated -public class ActivateCommand implements Command { +public class ActivateCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { Command.usage(logger, "Activate cluster (deprecated. Use " + SET_STATE.toString() + " instead):", ACTIVATE); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java index b377b711382d8..969542bd2879a 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java @@ -18,12 +18,17 @@ package org.apache.ignite.internal.commandline; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; +import java.util.function.Function; import java.util.logging.Logger; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientNode; @@ -37,8 +42,10 @@ import org.apache.ignite.internal.visor.baseline.VisorBaselineTask; import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskArg; import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskResult; +import org.apache.ignite.internal.visor.util.VisorTaskUtils; import static java.lang.Boolean.TRUE; +import static java.util.Collections.singletonMap; import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; import static org.apache.ignite.internal.commandline.CommandList.BASELINE; import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; @@ -50,7 +57,7 @@ /** * Commands associated with baseline functionality. */ -public class BaselineCommand implements Command { +public class BaselineCommand extends AbstractCommand { /** Arguments. */ private BaselineArguments baselineArgs; @@ -58,7 +65,8 @@ public class BaselineCommand implements Command { @Override public void printUsage(Logger logger) { final String constistIds = "consistentId1[,consistentId2,....,consistentIdN]"; - Command.usage(logger, "Print cluster baseline topology:", BASELINE); + Command.usage(logger, "Print cluster baseline topology:", BASELINE, + singletonMap("verbose", "Show the full list of node ips."), optional("--verbose")); Command.usage(logger, "Add nodes into baseline topology:", BASELINE, BaselineSubcommands.ADD.text(), constistIds, optional(CMD_AUTO_CONFIRMATION)); Command.usage(logger, "Remove nodes from baseline topology:", BASELINE, BaselineSubcommands.REMOVE.text(), @@ -166,13 +174,38 @@ else if (res.getRemainingTimeToBaselineAdjust() < 0) Map srvs = res.getServers(); // if task runs on a node with VisorBaselineNode of old version (V1) we'll get order=null for all nodes. + Function extractFormattedAddrs = node -> { + Stream sortedByIpHosts = + Optional.ofNullable(node) + .map(addrs -> node.getAddrs()) + .orElse(Collections.emptyList()) + .stream() + .sorted(Comparator + .comparing(resolvedAddr -> new VisorTaskUtils.SortableAddress(resolvedAddr.address()))) + .map(resolvedAddr -> { + if (!resolvedAddr.hostname().equals(resolvedAddr.address())) + return resolvedAddr.hostname() + "/" + resolvedAddr.address(); + else + return resolvedAddr.address(); + }); + if (verbose) { + String hosts = String.join(",", sortedByIpHosts.collect(Collectors.toList())); + + if (!hosts.isEmpty()) + return ", Addresses=" + hosts; + else + return ""; + } else + return sortedByIpHosts.findFirst().map(ip -> ", Address=" + ip).orElse(""); + }; String crdStr = srvs.values().stream() // check for not null .filter(node -> node.getOrder() != null) .min(Comparator.comparing(VisorBaselineNode::getOrder)) // format - .map(crd -> " (Coordinator: ConsistentId=" + crd.getConsistentId() + ", Order=" + crd.getOrder() + ")") + .map(crd -> " (Coordinator: ConsistentId=" + crd.getConsistentId() + extractFormattedAddrs.apply(crd) + + ", Order=" + crd.getOrder() + ")") .orElse(""); logger.info("Current topology version: " + res.getTopologyVersion() + crdStr); @@ -190,7 +223,8 @@ else if (res.getRemainingTimeToBaselineAdjust() < 0) String order = srvNode != null ? ", Order=" + srvNode.getOrder() : ""; - logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + state + order); + logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + + extractFormattedAddrs.apply(srvNode) + state + order); } logger.info(DELIM); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java index 58e5f05a6d3dd..f5b8c10218dac 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterChangeTagCommand.java @@ -35,7 +35,7 @@ /** * Command to access cluster ID and tag functionality. */ -public class ClusterChangeTagCommand implements Command { +public class ClusterChangeTagCommand extends AbstractCommand { /** */ private static final String ERR_NO_NEW_TAG_PROVIDED = "Please provide new tag."; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java index 33580881f1e5e..0e92cca2cb90f 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ClusterStateChangeCommand.java @@ -35,7 +35,7 @@ /** * Command to change cluster state. */ -public class ClusterStateChangeCommand implements Command { +public class ClusterStateChangeCommand extends AbstractCommand { /** Flag of forced cluster deactivation. */ static final String FORCE_COMMAND = "--force"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java index 12f85a0e07247..fe667b0033f56 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/Command.java @@ -186,6 +186,22 @@ public static String extendToLen(String s, int targetLen) { */ public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception; + /** + * Actual command execution with verbose mode if needed. + * Implement it if your command supports verbose mode. + * + * @see Command#execute(GridClientConfiguration, Logger) + * + * @param clientCfg Thin client configuration if connection to cluster is necessary. + * @param logger Logger to use. + * @param verbose Use verbose mode or not + * @return Result of operation (mostly usable for tests). + * @throws Exception If error occur. + */ + default Object execute(GridClientConfiguration clientCfg, Logger logger, boolean verbose) throws Exception { + return execute(clientCfg, logger); + } + /** * Prepares confirmation for the command. * diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java index fbfc14a948324..a612e1c15102e 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java @@ -269,7 +269,7 @@ public int execute(List rawArgs) { logger.info("Arguments: " + String.join(" ", rawArgs)); logger.info(DELIM); - lastOperationRes = command.execute(clientCfg, logger); + lastOperationRes = command.execute(clientCfg, logger, args.verbose()); break; } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java index 10d0fe56235d3..750001662ce60 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java @@ -33,7 +33,7 @@ * @deprecated Use {@link ClusterStateChangeCommand} instead. */ @Deprecated -public class DeactivateCommand implements Command { +public class DeactivateCommand extends AbstractCommand { /** Cluster name. */ private String clusterName; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java index 3830bdb713ba7..4ef6833a00375 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/ShutdownPolicyCommand.java @@ -34,7 +34,7 @@ /** * Command for change or display policy for shutdown. */ -public class ShutdownPolicyCommand implements Command { +public class ShutdownPolicyCommand extends AbstractCommand { /** Arguments. */ private ShutdownPolicyArgument shutdownPolicyArgument; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java index 7084f2fe4af33..f19637ba2a2c5 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java @@ -29,7 +29,7 @@ /** * Command to print cluster state. */ -public class StateCommand implements Command { +public class StateCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { Command.usage(logger, "Print current cluster state:", STATE); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java index d8a9673ea57eb..a9d16543f8d04 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TracingConfigurationCommand.java @@ -50,7 +50,7 @@ /** * Commands associated with tracing configuration functionality. */ -public class TracingConfigurationCommand implements Command { +public class TracingConfigurationCommand extends AbstractCommand { /** Arguments. */ private TracingConfigurationArguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java index fbe89f22ee1fd..e266f592f28c7 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java @@ -61,7 +61,7 @@ /** * Transaction commands. */ -public class TxCommands implements Command { +public class TxCommands extends AbstractCommand { /** Arguments */ private VisorTxTaskArg args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java index 2919470db4a1b..783318cebb3cb 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java @@ -44,7 +44,7 @@ /** * Wal commands. */ -public class WalCommands implements Command> { +public class WalCommands extends AbstractCommand> { /** */ static final String WAL_PRINT = "print"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java index 2219c5abf446a..3f03e2c4bdced 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/WarmUpCommand.java @@ -31,7 +31,7 @@ /** * Command for interacting with warm-up. */ -public class WarmUpCommand implements Command { +public class WarmUpCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { Command.usage(logger, "Stop warm-up:", WARM_UP, WarmUpCommandArg.STOP.argName()); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java index 681c238eb55d7..778280da11ae8 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java @@ -21,6 +21,7 @@ import java.util.Map; import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -42,7 +43,7 @@ /** * High-level "cache" command implementation. */ -public class CacheCommands implements Command { +public class CacheCommands extends AbstractCommand { /** Empty group name. */ public static final String EMPTY_GROUP_NAME = "no_group"; @@ -75,7 +76,7 @@ public class CacheCommands implements Command { if (command == null) throw new IllegalStateException("Unknown command " + subcommand); - return command.execute(clientCfg, logger); + return command.execute(clientCfg, logger, verbose); } /** {@inheritDoc} */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java index 6f7062057e090..35e658498b416 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java @@ -21,6 +21,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -39,7 +40,7 @@ /** * Cache contention detection subcommand. */ -public class CacheContention implements Command { +public class CacheContention extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String description = "Show the keys that are point of contention for multiple transactions."; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java index 4b5f3b9813fa6..1def122d1ea34 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java @@ -23,6 +23,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandHandler; @@ -46,7 +47,7 @@ /** * Would collect and print info about how data is spread between nodes and partitions. */ -public class CacheDistribution implements Command { +public class CacheDistribution extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java index ace4baebcd0d0..c1415ce11c616 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesForceRebuild.java @@ -25,6 +25,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.TaskExecutor; @@ -50,7 +51,7 @@ /** * Cache subcommand that triggers indexes force rebuild. */ -public class CacheIndexesForceRebuild implements Command { +public class CacheIndexesForceRebuild extends AbstractCommand { /** Command parsed arguments. */ private Arguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java index b31ea5a71e70a..11d317662441d 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesList.java @@ -27,6 +27,7 @@ import java.util.regex.PatternSyntaxException; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.TaskExecutor; @@ -47,7 +48,7 @@ /** * Cache subcommand that allows to show indexes. */ -public class CacheIndexesList implements Command { +public class CacheIndexesList extends AbstractCommand { /** Command parsed arguments. */ private Arguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java index 0ec662e9f966f..4bf4115237ac7 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheIndexesRebuildStatus.java @@ -23,6 +23,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.TaskExecutor; @@ -41,7 +42,7 @@ /** * Cache subcommand that allows to show caches that have */ -public class CacheIndexesRebuildStatus implements Command { +public class CacheIndexesRebuildStatus extends AbstractCommand { /** Command parsed arguments. */ private Arguments args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java index d467b82208de3..fb2ffae5d70e9 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java @@ -26,6 +26,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -62,7 +63,7 @@ /** * Validate indexes command. */ -public class CacheValidateIndexes implements Command { +public class CacheValidateIndexes extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java index 859a815df9bf0..95171c9c99944 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java @@ -26,6 +26,7 @@ import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientException; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.OutputFormat; @@ -68,7 +69,7 @@ /** * Command to show caches on cluster. */ -public class CacheViewer implements Command { +public class CacheViewer extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String description = "Show information about caches, groups or sequences that match a regular expression. " + diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java index 7821d07a56b58..457f189afb054 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/CheckIndexInlineSizes.java @@ -30,6 +30,7 @@ import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesResult; import org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesTask; @@ -44,7 +45,7 @@ /** * Command for check secondary indexes inline size on the different nodes. */ -public class CheckIndexInlineSizes implements Command { +public class CheckIndexInlineSizes extends AbstractCommand { /** Success message. */ public static final String INDEXES_INLINE_SIZE_ARE_THE_SAME = "All secondary indexes have the same effective inline size on all cluster nodes."; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java index 7fa625b7f019b..b2c97a3615fe5 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java @@ -24,6 +24,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -44,7 +45,7 @@ /** * Command to find and delete garbage which could left after destroying caches in shared group. */ -public class FindAndDeleteGarbage implements Command { +public class FindAndDeleteGarbage extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String GROUPS = "groupName1,...,groupNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java index 7f4ecb05d2c0d..29b3447cb1a07 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientException; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; @@ -65,7 +66,7 @@ /** * */ -public class IdleVerify implements Command { +public class IdleVerify extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java index dc35436196b28..34fb57a81ba99 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java @@ -21,6 +21,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTask; @@ -34,7 +35,7 @@ /** * Command for reseting lost partition state. */ -public class ResetLostPartitions implements Command> { +public class ResetLostPartitions extends AbstractCommand> { /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { String CACHES = "cacheName1,...,cacheNameN"; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java index b4ca6e4606c3b..c0e59a32f2358 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java @@ -20,6 +20,7 @@ import java.util.Arrays; import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; @@ -34,7 +35,7 @@ /** * */ -public class DiagnosticCommand implements Command { +public class DiagnosticCommand extends AbstractCommand { /** */ private DiagnosticSubCommand subcommand; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java index 5d91227762939..18d3d5db6eddc 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java @@ -26,6 +26,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -52,7 +53,7 @@ /** * */ -public class PageLocksCommand implements Command { +public class PageLocksCommand extends AbstractCommand { /** */ private Arguments arguments; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java index eae804979c2c7..5cbd723ddf73d 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java @@ -20,6 +20,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -37,7 +38,7 @@ * * @see EncryptionSubcommand */ -public class EncryptionCommand implements Command { +public class EncryptionCommand extends AbstractCommand { /** Subcommand. */ EncryptionSubcommand cmd; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java index 180d5ec9fb2be..7eeab1f5bf487 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/MetadataCommand.java @@ -19,6 +19,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.meta.subcommands.MetadataRemoveCommand; @@ -37,7 +38,7 @@ /** * */ -public class MetadataCommand implements Command { +public class MetadataCommand extends AbstractCommand { /** * */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java index dded0fdb41243..8111a023861d8 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataAbstractSubCommand.java @@ -26,6 +26,7 @@ import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientDisconnectedException; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -37,7 +38,7 @@ public abstract class MetadataAbstractSubCommand< MetadataArgsDto extends IgniteDataTransferObject, MetadataResultDto extends IgniteDataTransferObject> - implements Command { + extends AbstractCommand { /** Filesystem. */ protected static final FileSystem FS = FileSystems.getDefault(); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java index 35db8f254468d..da93f4f5fbbce 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/meta/subcommands/MetadataHelpCommand.java @@ -19,12 +19,12 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.meta.MetadataCommand; import org.apache.ignite.internal.commandline.meta.MetadataSubCommandsList; /** */ -public class MetadataHelpCommand implements Command { +public class MetadataHelpCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger log) { throw new UnsupportedOperationException("printUsage"); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java index a409111f2e406..861a8b9d196f4 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/metric/MetricCommand.java @@ -26,6 +26,7 @@ import java.util.stream.Collectors; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -42,7 +43,7 @@ import static org.apache.ignite.internal.visor.systemview.VisorSystemViewTask.SimpleType.STRING; /** Represents command for metric values printing. */ -public class MetricCommand implements Command { +public class MetricCommand extends AbstractCommand { /** * Argument for the metric values obtainig task. * @see VisorMetricTask diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java index bee355f348d28..1f2c694e69b30 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java @@ -19,6 +19,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.processors.configuration.distributed.DistributedChangeableProperty; @@ -33,7 +34,7 @@ /** * Command to manage distributed properties (see {@link DistributedChangeableProperty}) */ -public class PropertyCommand implements Command { +public class PropertyCommand extends AbstractCommand { /** * */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java index 251ae21d54507..f852459b9b6d3 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyAbstractSubCommand.java @@ -24,6 +24,7 @@ import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientDisconnectedException; import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -35,7 +36,7 @@ public abstract class PropertyAbstractSubCommand< MetadataArgsDto extends IgniteDataTransferObject, MetadataResultDto extends IgniteDataTransferObject> - implements Command { + extends AbstractCommand { /** */ private MetadataArgsDto args; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java index 61f3080d1b077..29de3ec0a24b3 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java @@ -19,12 +19,12 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.meta.MetadataCommand; import org.apache.ignite.internal.commandline.meta.MetadataSubCommandsList; /** */ -public class PropertyHelpCommand implements Command { +public class PropertyHelpCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public void printUsage(Logger log) { throw new UnsupportedOperationException("printUsage"); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java index 38b170d210a3f..af2dbd565751a 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/query/KillCommand.java @@ -23,6 +23,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -69,7 +70,7 @@ * @see ComputeMXBean * @see TransactionsMXBean */ -public class KillCommand implements Command { +public class KillCommand extends AbstractCommand { /** Command argument. */ private Object taskArgs; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java index 2c77e6997aae5..2f9597e5838f4 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/snapshot/SnapshotCommand.java @@ -20,6 +20,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -41,7 +42,7 @@ * @see SnapshotMXBean * @see IgniteSnapshotManager */ -public class SnapshotCommand implements Command { +public class SnapshotCommand extends AbstractCommand { /** Command argument. */ private Object taskArgs; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java index cef934477002f..14b544f8304a5 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/systemview/SystemViewCommand.java @@ -29,6 +29,7 @@ import java.util.stream.Collectors; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -49,7 +50,7 @@ import static org.apache.ignite.internal.visor.systemview.VisorSystemViewTask.SimpleType.STRING; /** Represents command for {@link SystemView} content printing. */ -public class SystemViewCommand implements Command { +public class SystemViewCommand extends AbstractCommand { /** Column separator. */ public static final String COLUMN_SEPARATOR = " "; diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index a2ddf0b218730..5970ae43b4386 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.io.Serializable; +import java.lang.reflect.Field; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; @@ -606,13 +607,49 @@ private void setState(Ignite ignite, ClusterState state, String strState, String */ @Test public void testBaselineCollect() throws Exception { - Ignite ignite = startGrids(1); + Ignite ignite = startGrid( + optimize(getConfiguration(getTestIgniteInstanceName(0))).setLocalHost("0.0.0.0")); + + Field addresses = ignite.cluster().node().getClass().getDeclaredField("addrs"); + addresses.setAccessible(true); + addresses.set(ignite.cluster().node(), Arrays.asList("127.0.0.1", "0:0:0:0:0:0:0:1", "10.19.112.175", "188.166.164.247")); + Field hostNames = ignite.cluster().node().getClass().getDeclaredField("hostNames"); + hostNames.setAccessible(true); + hostNames.set(ignite.cluster().node(), Arrays.asList("10.19.112.175.hostname")); assertFalse(ignite.cluster().active()); ignite.cluster().active(true); - assertEquals(EXIT_CODE_OK, execute("--baseline")); + injectTestSystemOut(); + + { // non verbose mode + assertEquals(EXIT_CODE_OK, execute("--baseline")); + + List nodesInfo = findBaselineNodesInfo(); + assertEquals(1, nodesInfo.size()); + assertContains(log, nodesInfo.get(0), "Address=188.166.164.247.hostname/188.166.164.247, "); + } + + { // verbose mode + assertEquals(EXIT_CODE_OK, execute("--verbose", "--baseline")); + + List nodesInfo = findBaselineNodesInfo(); + assertEquals(1, nodesInfo.size()); + assertContains(log, nodesInfo.get(0), "Addresses=188.166.164.247.hostname/188.166.164.247,10.19.112.175.hostname/10.19.112.175"); + } + + { // empty resolved addresses + addresses.set(ignite.cluster().node(), Collections.emptyList()); + hostNames.set(ignite.cluster().node(), Collections.emptyList()); + + assertEquals(EXIT_CODE_OK, execute("--verbose", "--baseline")); + + List nodesInfo = findBaselineNodesInfo(); + assertEquals(1, nodesInfo.size()); + assertContains(log, nodesInfo.get(0), "ConsistentId=" + + grid(0).cluster().localNode().consistentId() + ", State="); + } assertEquals(1, ignite.cluster().currentBaselineTopology().size()); } @@ -637,7 +674,7 @@ public void testBaselineCollectCrd() throws Exception { String crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(0).cluster().localNode().consistentId() + ", Order=1)", crdStr); + grid(0).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=1)", crdStr); stopGrid(0); @@ -646,7 +683,7 @@ public void testBaselineCollectCrd() throws Exception { crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(1).cluster().localNode().consistentId() + ", Order=2)", crdStr); + grid(1).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=2)", crdStr); startGrid(0); @@ -655,7 +692,7 @@ public void testBaselineCollectCrd() throws Exception { crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(1).cluster().localNode().consistentId() + ", Order=2)", crdStr); + grid(1).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=2)", crdStr); stopGrid(1); @@ -664,7 +701,7 @@ public void testBaselineCollectCrd() throws Exception { crdStr = findCrdInfo(); assertEquals("(Coordinator: ConsistentId=" + - grid(0).cluster().localNode().consistentId() + ", Order=4)", crdStr); + grid(0).cluster().localNode().consistentId() + ", Address=127.0.0.1.hostname/127.0.0.1" + ", Order=4)", crdStr); } /** @@ -682,6 +719,30 @@ private String findCrdInfo() { return crdStr.substring(0, crdStr.indexOf('\n')).trim(); } + /** + * @return utility information about baseline nodes + */ + private List findBaselineNodesInfo() { + String outStr = testOut.toString(); + + int i = outStr.indexOf("Baseline nodes:"); + + assertTrue("Baseline nodes information is not found", i != -1); + + int j = outStr.indexOf("\n", i) + 1; + + int beginOfNodeDesc = -1; + + List nodesInfo = new ArrayList<>(); + + while ((beginOfNodeDesc = outStr.indexOf("ConsistentId=", j) ) != -1) { + j = outStr.indexOf("\n", beginOfNodeDesc); + nodesInfo.add(outStr.substring(beginOfNodeDesc, j).trim()); + } + + return nodesInfo; + } + /** * @param ignites Ignites. * @return Local node consistent ID. diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index 949e9c6f6ed7c..148e86dbe4b96 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -1935,6 +1935,19 @@ public final class IgniteSystemProperties { defaults = "" + DFLT_DUMP_TX_COLLISIONS_INTERVAL) public static final String IGNITE_DUMP_TX_COLLISIONS_INTERVAL = "IGNITE_DUMP_TX_COLLISIONS_INTERVAL"; + /** + * Set to true only during the junit tests. + * Signals that the cluster is running in a test environment. + * + * Can be used for changing behaviour of tightly coupled code pieces during the tests. + * Use it as a last resort only, prefer another toolchain like DI, mocks and etc. if possible + */ + @SystemProperty(value = "Set to true only during the junit tests. " + + "Can be used for changing behaviour of tightly coupled code pieces during the tests. " + + "Use it as a last resort only, prefer another toolchain like DI, mocks and etc. if possible", + type = Boolean.class) + public static final String IGNITE_TEST_ENV = "IGNITE_TEST_ENV"; + /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java index 9f4b39f2db3f0..b4ee5d1a81cfb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineNode.java @@ -20,12 +20,17 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import java.net.InetAddress; +import java.util.Collection; +import java.util.Collections; import java.util.Map; import org.apache.ignite.cluster.BaselineNode; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; import org.apache.ignite.internal.managers.discovery.IgniteClusterNode; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorDataTransferObject; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** @@ -44,6 +49,12 @@ public class VisorBaselineNode extends VisorDataTransferObject { /** */ private @Nullable Long order; + /** + * Resolved list of (ip, hostname) pairs + * (if ip has no resolved host, hostname will be the string representation of ip). + */ + private @NotNull Collection addrs = Collections.emptyList(); + /** * Default constructor. */ @@ -55,19 +66,22 @@ public VisorBaselineNode() { * Create data transfer object for baseline node. * * @param node Baseline node. + * @param resolvedInetAddrs List of resolved ip, hostnames pairs. */ - public VisorBaselineNode(BaselineNode node) { + public VisorBaselineNode(BaselineNode node, @NotNull Collection resolvedInetAddrs) { consistentId = String.valueOf(node.consistentId()); attrs = node.attributes(); //Baseline topology returns instances of DetachedClusternode - if (node instanceof IgniteClusterNode) + if (node instanceof IgniteClusterNode) { order = ((IgniteClusterNode)node).order(); + addrs = resolvedInetAddrs; + } } /** {@inheritDoc} */ @Override public byte getProtocolVersion() { - return V2; + return V3; } /** @@ -91,11 +105,20 @@ public Map getAttributes() { return order; } + /** + * + * @return Collection with resolved pairs ip->hostname + */ + public @NotNull Collection getAddrs() { + return addrs; + } + /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { U.writeString(out, consistentId); U.writeMap(out, attrs); out.writeObject(order); + U.writeCollection(out, addrs); } /** {@inheritDoc} */ @@ -105,10 +128,72 @@ public Map getAttributes() { if (protoVer >= V2) order = (Long)in.readObject(); + + if (protoVer >= V3) { + Collection inputAddrs = U.readCollection(in); + + if (inputAddrs != null) + addrs = inputAddrs; + } } /** {@inheritDoc} */ @Override public String toString() { return S.toString(VisorBaselineNode.class, this); } + + /** + * Simple data class for storing (hostname, address) pairs + */ + public static class ResolvedAddresses extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private String hostname; + + /** Textual representation of IP address. */ + private String addr; + + /** + * @param inetAddr Inet address. + */ + ResolvedAddresses(InetAddress inetAddr) { + this.hostname = inetAddr.getHostName(); + this.addr = inetAddr.getHostAddress(); + } + + /** + * Default constructor. + */ + public ResolvedAddresses() { + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeString(out, hostname); + U.writeString(out, addr); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) + throws IOException, ClassNotFoundException { + hostname = U.readString(in); + addr = U.readString(in); + } + + /** + * @return Hostname. + */ + public String hostname() { + return hostname; + } + + /** + * @return Textual representation of IP address. + */ + public String address() { + return addr; + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java index 769100828217a..b283e6bec0b18 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/baseline/VisorBaselineTaskResult.java @@ -20,10 +20,20 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; import java.util.Map; +import java.util.Set; import java.util.TreeMap; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.BaselineNode; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.managers.discovery.IgniteClusterNode; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -75,7 +85,7 @@ private static Map toMap(Collection map = new TreeMap<>(); for (BaselineNode node : nodes) { - VisorBaselineNode dto = new VisorBaselineNode(node); + VisorBaselineNode dto = new VisorBaselineNode(node, Collections.emptyList()); map.put(dto.getConsistentId(), dto); } @@ -83,6 +93,80 @@ private static Map toMap(Collectionhostname pairs. + */ + private static Map toMapWithResolvedAddresses(Collection nodes) { + if (F.isEmpty(nodes)) + return null; + + Map map = new TreeMap<>(); + + for (BaselineNode node : nodes) { + Collection addrs = new ArrayList<>(); + + if (node instanceof IgniteClusterNode) { + for (InetAddress inetAddress: resolveInetAddresses((ClusterNode)node)) + addrs.add(new VisorBaselineNode.ResolvedAddresses(inetAddress)); + } + + VisorBaselineNode dto = new VisorBaselineNode(node, addrs); + + map.put(dto.getConsistentId(), dto); + } + + return map; + } + + /** + * @return Resolved inet addresses of node + */ + private static Collection resolveInetAddresses(ClusterNode node) { + Set res = new HashSet<>(node.addresses().size()); + + Iterator hostNamesIt = node.hostNames().iterator(); + + for (String addr : node.addresses()) { + String hostName = hostNamesIt.hasNext() ? hostNamesIt.next() : null; + + InetAddress inetAddr = null; + + if (!F.isEmpty(hostName)) { + try { + if (IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_TEST_ENV)) { + // 127.0.0.1.hostname will be resolved to 127.0.0.1 + if (hostName.endsWith(".hostname")) { + String ipStr = hostName.substring(0, hostName.length() - ".hostname".length()); + inetAddr = InetAddress.getByAddress(hostName, InetAddress.getByName(ipStr).getAddress()); + } + } + else + inetAddr = InetAddress.getByName(hostName); + } + catch (UnknownHostException ignored) { + } + } + + if (inetAddr == null || inetAddr.isLoopbackAddress()) { + try { + if (IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_TEST_ENV)) + // 127.0.0.1 will be reverse-resolved to 127.0.0.1.hostname + inetAddr = InetAddress.getByAddress(addr + ".hostname", InetAddress.getByName(addr).getAddress()); + else + inetAddr = InetAddress.getByName(addr); + } + catch (UnknownHostException ignored) { + } + } + + if (inetAddr != null) + res.add(inetAddr); + } + + return res; + } + /** * Constructor. * @@ -104,7 +188,7 @@ public VisorBaselineTaskResult( this.active = active; this.topVer = topVer; this.baseline = toMap(baseline); - this.servers = toMap(servers); + this.servers = toMapWithResolvedAddresses(servers); this.autoAdjustSettings = autoAdjustSettings; this.remainingTimeToBaselineAdjust = remainingTimeToBaselineAdjust; this.baselineAdjustInProgress = baselineAdjustInProgress; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java index 03f9ecd986d57..5fecd01189244 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java @@ -1107,7 +1107,7 @@ public static boolean joinTimedOut(String msg) { * IPv4, private IPv4, IPv4 local host, IPv6. * Lower addresses first. */ - private static class SortableAddress implements Comparable { + public static class SortableAddress implements Comparable { /** */ private int type; @@ -1122,7 +1122,7 @@ private static class SortableAddress implements Comparable { * * @param addr Address as string. */ - private SortableAddress(String addr) { + public SortableAddress(String addr) { this.addr = addr; if (addr.indexOf(':') > 0) diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java index 3fa2aaf3772f8..d2f044b04f386 100755 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java @@ -156,6 +156,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT; import static org.apache.ignite.IgniteSystemProperties.IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY; import static org.apache.ignite.IgniteSystemProperties.IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_TEST_ENV; import static org.apache.ignite.IgniteSystemProperties.IGNITE_TO_STRING_INCLUDE_SENSITIVE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_UPDATE_NOTIFIER; import static org.apache.ignite.IgniteSystemProperties.getBoolean; @@ -281,6 +282,7 @@ public String getName() { System.setProperty(IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY, "1"); System.setProperty(IGNITE_CLIENT_CACHE_CHANGE_MESSAGE_TIMEOUT, "1000"); System.setProperty(IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP, "false"); + System.setProperty(IGNITE_TEST_ENV, "true"); S.setIncludeSensitiveSupplier(() -> getBoolean(IGNITE_TO_STRING_INCLUDE_SENSITIVE, true)); diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index e84d8fb87815b..2806cd60552cc 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -27,7 +27,10 @@ This utility can do the following commands: ACTIVE_READ_ONLY - Activate cluster. Cache updates are denied. Print cluster baseline topology: - control.(sh|bat) --baseline + control.(sh|bat) --baseline [--verbose] + + Parameters: + verbose - Show the full list of node ips. Add nodes into baseline topology: control.(sh|bat) --baseline add consistentId1[,consistentId2,....,consistentIdN] [--yes] diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index e84d8fb87815b..2806cd60552cc 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -27,7 +27,10 @@ This utility can do the following commands: ACTIVE_READ_ONLY - Activate cluster. Cache updates are denied. Print cluster baseline topology: - control.(sh|bat) --baseline + control.(sh|bat) --baseline [--verbose] + + Parameters: + verbose - Show the full list of node ips. Add nodes into baseline topology: control.(sh|bat) --baseline add consistentId1[,consistentId2,....,consistentIdN] [--yes] From 78f1043330e8c92fc330c1e851e6ce8c4f17e739 Mon Sep 17 00:00:00 2001 From: Sergey Chugunov Date: Tue, 3 Nov 2020 17:15:24 +0300 Subject: [PATCH 016/110] IGNITE-13550 Persistence CLEAN command implementation - Fixes #8408. Signed-off-by: Sergey Chugunov --- .../internal/commandline/CommandList.java | 5 +- .../commandline/PersistenceCommand.java | 290 +++++++++++++ .../CleanAndBackupSubcommandArg.java | 45 ++ .../persistence/PersistenceArguments.java | 98 +++++ .../persistence/PersistenceSubcommands.java | 73 ++++ .../ignite/util/GridCommandHandlerTest.java | 366 ++++++++++++++++ .../maintenance/MaintenanceProcessor.java | 15 +- .../CheckCorruptedCacheStoresCleanAction.java | 70 +++ .../CorruptedPdsMaintenanceCallback.java | 4 +- .../PersistenceCleanAndBackupSettings.java | 71 ++++ .../PersistenceCleanAndBackupType.java | 41 ++ .../persistence/PersistenceOperation.java | 43 ++ .../visor/persistence/PersistenceTask.java | 401 ++++++++++++++++++ .../visor/persistence/PersistenceTaskArg.java | 82 ++++ .../persistence/PersistenceTaskResult.java | 123 ++++++ .../ignite/maintenance/MaintenanceTask.java | 4 +- .../resources/META-INF/classnames.properties | 22 +- ...mmandHandlerClusterByClassTest_help.output | 24 ++ ...ndlerClusterByClassWithSSLTest_help.output | 24 ++ 19 files changed, 1773 insertions(+), 28 deletions(-) create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/PersistenceCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/CleanAndBackupSubcommandArg.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceArguments.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceSubcommands.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckCorruptedCacheStoresCleanAction.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupSettings.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupType.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceOperation.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskArg.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskResult.java diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java index 2daaf86b9e8a1..e16acaa97a32e 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java @@ -89,7 +89,10 @@ public enum CommandList { SYSTEM_VIEW("--system-view", new SystemViewCommand()), /** Command for printing metric values. */ - METRIC("--metric", new MetricCommand()); + METRIC("--metric", new MetricCommand()), + + /** */ + PERSISTENCE("--persistence", new PersistenceCommand()); /** Private values copy so there's no need in cloning it every time. */ private static final CommandList[] VALUES = CommandList.values(); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/PersistenceCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/PersistenceCommand.java new file mode 100644 index 0000000000000..d41269a2a0fc7 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/PersistenceCommand.java @@ -0,0 +1,290 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Logger; + +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg; +import org.apache.ignite.internal.commandline.persistence.PersistenceArguments; +import org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.persistence.PersistenceCleanAndBackupSettings; +import org.apache.ignite.internal.visor.persistence.PersistenceCleanAndBackupType; +import org.apache.ignite.internal.visor.persistence.PersistenceTask; +import org.apache.ignite.internal.visor.persistence.PersistenceTaskArg; +import org.apache.ignite.internal.visor.persistence.PersistenceTaskResult; +import org.apache.ignite.lang.IgniteBiTuple; + +import static org.apache.ignite.internal.commandline.Command.usage; +import static org.apache.ignite.internal.commandline.CommandList.PERSISTENCE; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg.ALL; +import static org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg.CACHES; +import static org.apache.ignite.internal.commandline.persistence.CleanAndBackupSubcommandArg.CORRUPTED; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.BACKUP; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.CLEAN; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.INFO; +import static org.apache.ignite.internal.commandline.persistence.PersistenceSubcommands.of; + +/** */ +public class PersistenceCommand implements Command { + /** */ + private PersistenceArguments cleaningArgs; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Optional firstNodeOpt = client.compute().nodes().stream().findFirst(); + + if (firstNodeOpt.isPresent()) { + UUID uuid = firstNodeOpt.get().nodeId(); + + PersistenceTaskResult res = executeTaskByNameOnNode(client, + PersistenceTask.class.getName(), + convertArguments(cleaningArgs), + uuid, + clientCfg + ); + + printResult(res, logger); + } + else + logger.warning("No nodes found in topology, command won't be executed."); + } + catch (Throwable t) { + logger.severe("Failed to execute persistence command='" + cleaningArgs.subcommand().text() + "'"); + logger.severe(CommandLogger.errorMessage(t)); + + throw t; + } + + return null; + } + + /** + * Prints result of command execution: information about caches or result of clean/backup command. + * + * @param res {@link PersistenceTaskResult} object with results of command execution. + * @param logger {@link Logger} to print output to. + */ + private void printResult(PersistenceTaskResult res, Logger logger) { + if (!res.inMaintenanceMode()) { + logger.warning("Persistence command can be sent only to node in Maintenance Mode."); + + return; + } + //info command + else if (res.cachesInfo() != null) { + logger.info("Persistent caches found on node:"); + + //sort results so corrupted caches occur in the list at the top + res.cachesInfo().entrySet().stream().sorted((ci0, ci1) -> { + IgniteBiTuple t0 = ci0.getValue(); + IgniteBiTuple t1 = ci1.getValue(); + + boolean corrupted0 = t0.get1() || t0.get2(); + boolean corrupted1 = t1.get1() || t1.get2(); + + if (corrupted0 && corrupted1) + return 0; + else if (!corrupted0 && !corrupted1) + return 0; + else if (corrupted0 && !corrupted1) + return -1; + else + return 1; + }).forEach( + e -> { + IgniteBiTuple t = e.getValue(); + + String status; + + if (!t.get1()) + status = "corrupted - WAL disabled globally."; + else if (!t.get1()) + status = "corrupted - WAL disabled locally."; + else + status = "no corruption."; + + logger.info(INDENT + "cache name: " + e.getKey() + ". Status: " + status); + } + ); + } + //clean command + else if (cleaningArgs != null && cleaningArgs.subcommand() == CLEAN) { + logger.info("Maintenance task is " + (!res.maintenanceTaskCompleted() ? "not " : "") + "fixed."); + + List cleanedCaches = res.handledCaches(); + + if (cleanedCaches != null && !cleanedCaches.isEmpty()) { + String cacheDirNames = String.join(", ", cleanedCaches); + + logger.info("Cache directories were cleaned: [" + cacheDirNames + ']'); + } + + List failedToHandleCaches = res.failedCaches(); + + if (failedToHandleCaches != null && !failedToHandleCaches.isEmpty()) { + String failedToHandleCachesStr = String.join(", ", failedToHandleCaches); + + logger.info("Failed to clean following directories: [" + failedToHandleCachesStr + ']'); + } + } + // backup command + else { + List backupCompletedCaches = res.handledCaches(); + + if (backupCompletedCaches != null && !backupCompletedCaches.isEmpty()) { + String cacheDirNames = String.join(", ", backupCompletedCaches); + + logger.info("Cache data files was backed up to the following directories in node's work directory: [" + + cacheDirNames + ']'); + } + + List backupFailedCaches = res.failedCaches(); + + if (backupFailedCaches != null && !backupFailedCaches.isEmpty()) { + String backupFailedCachesStr = String.join(", ", backupFailedCaches); + + logger.info("Failed to backup the following directories in node's work directory: [" + + backupFailedCachesStr + ']'); + } + } + } + + /** {@inheritDoc} */ + @Override public PersistenceArguments arg() { + return cleaningArgs; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + final String cacheNames = "cache1,cache2,cache3"; + + usage(logger, "Print information about potentially corrupted caches on local node:", + PERSISTENCE); + usage(logger, "The same information is printed when info subcommand is passed:", PERSISTENCE, + INFO.text()); + + usage(logger, "Clean directories of caches with corrupted data files:", PERSISTENCE, CLEAN.text(), + CORRUPTED.argName()); + usage(logger, "Clean directories of all caches:", PERSISTENCE, CLEAN.text(), + ALL.argName()); + usage(logger, "Clean directories of only given caches:", PERSISTENCE, CLEAN.text(), + CACHES.argName(), cacheNames); + + usage(logger, "Backup data files of corrupted caches only:", PERSISTENCE, BACKUP.text(), + CORRUPTED.argName()); + usage(logger, "Backup data files of all caches:", PERSISTENCE, BACKUP.text(), ALL.argName()); + usage(logger, "Backup data files of only given caches:", PERSISTENCE, BACKUP.text(), + CACHES.argName(), cacheNames); + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + if (!argIter.hasNextSubArg()) { + cleaningArgs = new PersistenceArguments.Builder(INFO).build(); + + return; + } + + PersistenceSubcommands cmd = of(argIter.nextArg("Expected persistence maintenance action")); + + if (cmd == null) + throw new IllegalArgumentException("Expected correct persistence maintenance action"); + + PersistenceArguments.Builder bldr = new PersistenceArguments.Builder(cmd); + + switch (cmd) { + case BACKUP: + case CLEAN: + CleanAndBackupSubcommandArg cleanAndBackupSubcommandArg = CommandArgUtils.of( + argIter.nextArg("Expected one of subcommand arguments"), CleanAndBackupSubcommandArg.class + ); + + if (cleanAndBackupSubcommandArg == null) + throw new IllegalArgumentException("Expected one of subcommand arguments"); + + bldr.withCleanAndBackupSubcommandArg(cleanAndBackupSubcommandArg); + + if (cleanAndBackupSubcommandArg == ALL || cleanAndBackupSubcommandArg == CORRUPTED) + break; + + if (cleanAndBackupSubcommandArg == CACHES) { + Set caches = argIter.nextStringSet("list of cache names"); + + if (F.isEmpty(caches)) + throw new IllegalArgumentException("Empty list of cache names"); + + bldr.withCacheNames(new ArrayList<>(caches)); + } + + break; + } + + cleaningArgs = bldr.build(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return PERSISTENCE.toCommandName(); + } + + /** */ + private PersistenceTaskArg convertArguments(PersistenceArguments args) { + PersistenceCleanAndBackupSettings cleanSettings = convertCleanAndBackupSettings(args); + + PersistenceTaskArg taskArgs = new PersistenceTaskArg(args.subcommand().operation(), cleanSettings); + + return taskArgs; + } + + /** */ + private PersistenceCleanAndBackupSettings convertCleanAndBackupSettings(PersistenceArguments args) { + if (args.subcommand() == INFO) + return null; + + PersistenceCleanAndBackupType type; + + switch (args.cleanArg()) { + case ALL: + type = PersistenceCleanAndBackupType.ALL; + + break; + case CORRUPTED: + type = PersistenceCleanAndBackupType.CORRUPTED; + + break; + + default: + type = PersistenceCleanAndBackupType.CACHES; + } + + return new PersistenceCleanAndBackupSettings(type, args.cachesList()); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/CleanAndBackupSubcommandArg.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/CleanAndBackupSubcommandArg.java new file mode 100644 index 0000000000000..08a0336172d8b --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/CleanAndBackupSubcommandArg.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.persistence; + +import org.apache.ignite.internal.commandline.argument.CommandArg; + +/** + * {@link PersistenceSubcommands#CLEAN} subcommand arguments. + */ +public enum CleanAndBackupSubcommandArg implements CommandArg { + /** Clean all caches data files. */ + ALL("all"), + /** Clean corrupted caches data files. */ + CORRUPTED("corrupted"), + /** Clean only specified caches data files. */ + CACHES("caches"); + + /** */ + private final String name; + + /** */ + CleanAndBackupSubcommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceArguments.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceArguments.java new file mode 100644 index 0000000000000..8971680223c05 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceArguments.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.persistence; + +import java.util.List; + +/** + * Arguments of "persistence cleaning" command. + */ +public class PersistenceArguments { + /** */ + private PersistenceSubcommands cmd; + + /** */ + private CleanAndBackupSubcommandArg cleanArg; + + /** */ + private List cachesList; + + /** + * @param cmd + */ + public PersistenceArguments(PersistenceSubcommands cmd, CleanAndBackupSubcommandArg cleanArg, List cachesList) { + this.cmd = cmd; + this.cleanArg = cleanArg; + this.cachesList = cachesList; + } + + /** */ + public PersistenceSubcommands subcommand() { + return cmd; + } + + /** */ + public CleanAndBackupSubcommandArg cleanArg() { + return cleanArg; + } + + /** */ + public List cachesList() { + return cachesList; + } + + /** Builder of {@link PersistenceArguments}. */ + public static class Builder { + /** */ + private PersistenceSubcommands subCmd; + + /** */ + private CleanAndBackupSubcommandArg cleanSubCmdArg; + + /** */ + private List cacheNames; + + /** + * @param subCmd Subcommand. + */ + public Builder(PersistenceSubcommands subCmd) { + this.subCmd = subCmd; + } + + /** */ + public Builder withCleanAndBackupSubcommandArg(CleanAndBackupSubcommandArg cleanSubCmdArg) { + this.cleanSubCmdArg = cleanSubCmdArg; + + return this; + } + + public Builder withCacheNames(List cacheNames) { + this.cacheNames = cacheNames; + + return this; + } + + public PersistenceArguments build() { + return new PersistenceArguments( + subCmd, + cleanSubCmdArg, + cacheNames + ); + } + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceSubcommands.java new file mode 100644 index 0000000000000..d674316abd51d --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/persistence/PersistenceSubcommands.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.persistence; + +import org.apache.ignite.internal.visor.persistence.PersistenceOperation; +import org.jetbrains.annotations.Nullable; + +/** + * + */ +public enum PersistenceSubcommands { + /** Collects information about corrupted caches and cache groups and their file system paths. */ + INFO("info", PersistenceOperation.INFO), + + /** Cleans partition files of corrupted caches and cache groups. */ + CLEAN("clean", PersistenceOperation.CLEAN), + + /** */ + BACKUP("backup", PersistenceOperation.BACKUP); + + /** Subcommand name. */ + private final String name; + + /** Operation this subcommand triggers. */ + private final PersistenceOperation operation; + + /** + * @param name String representation of subcommand. + * @param operation Operation this command triggers. + */ + PersistenceSubcommands(String name, PersistenceOperation operation) { + this.name = name; + this.operation = operation; + } + + /** + * @param strRep String representation of subcommand. + * @return Subcommand for its string representation. + */ + public static @Nullable PersistenceSubcommands of(String strRep) { + for (PersistenceSubcommands cmd : values()) { + if (cmd.text().equals(strRep)) + return cmd; + } + + return null; + } + + /** */ + public String text() { + return name; + } + + /** */ + public PersistenceOperation operation() { + return operation; + } +} diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index 5970ae43b4386..5557b5e5eb165 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -23,6 +23,7 @@ import java.io.Serializable; import java.lang.reflect.Field; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; @@ -33,6 +34,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; @@ -41,6 +43,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; +import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -65,6 +68,7 @@ import org.apache.ignite.internal.GridJobExecuteResponse; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteNodeAttributes; import org.apache.ignite.internal.TestRecordingCommunicationSpi; import org.apache.ignite.internal.client.GridClientFactory; import org.apache.ignite.internal.client.impl.GridClientImpl; @@ -84,6 +88,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockResponse; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; +import org.apache.ignite.internal.processors.cache.persistence.CheckpointState; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.db.IgniteCacheGroupsWithRestartsTest; import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.dumpprocessors.ToFileDumpProcessor; @@ -114,6 +119,7 @@ import org.apache.ignite.transactions.TransactionRollbackException; import org.apache.ignite.transactions.TransactionTimeoutException; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import org.junit.Test; import static java.io.File.separatorChar; @@ -141,6 +147,7 @@ import static org.apache.ignite.internal.processors.cache.verify.IdleVerifyUtility.GRID_NOT_IDLE_MSG; import static org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor.DEFAULT_TARGET_FOLDER; import static org.apache.ignite.testframework.GridTestUtils.assertContains; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; import static org.apache.ignite.testframework.GridTestUtils.runAsync; import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC; @@ -248,6 +255,365 @@ public void testClientsLeakage() throws Exception { assertTrue("Still opened clients: " + new ArrayList<>(clnts.values()), clntsBefore.equals(clntsAfter2)); } + private CacheConfiguration cacheConfiguration(String cacheName) { + CacheConfiguration ccfg = new CacheConfiguration(cacheName) + .setAtomicityMode(TRANSACTIONAL) + .setAffinity(new RendezvousAffinityFunction(false, 32)) + .setBackups(1); + + return ccfg; + } + + /** + * Starts cluster of two nodes and prepares situation of corrupted PDS on node2 + * so it enters maintenance mode on restart. + * + * @param cachesToStart Configurations of caches that should be started in cluster. + * @param cacheToCorrupt Function determining should cache with given name be corrupted or not. + */ + private File startGridAndPutNodeToMaintenance(CacheConfiguration[] cachesToStart, + @Nullable Function cacheToCorrupt) throws Exception { + assert cachesToStart != null && cachesToStart.length > 0; + + IgniteEx ig0 = startGrid(0); + IgniteEx ig1 = startGrid(1); + + String ig1Folder = ig1.context().pdsFolderResolver().resolveFolders().folderName(); + File dbDir = U.resolveWorkDirectory(ig1.configuration().getWorkDirectory(), "db", false); + + File ig1LfsDir = new File(dbDir, ig1Folder); + + ig0.cluster().baselineAutoAdjustEnabled(false); + ig0.cluster().state(ACTIVE); + + IgniteCache dfltCache = ig0.getOrCreateCache(cachesToStart[0]); + + if (cachesToStart.length > 1) { + for (int i = 1; i < cachesToStart.length; i++) + ig0.getOrCreateCache(cachesToStart[i]); + } + + for (int k = 0; k < 1000; k++) + dfltCache.put(k, k); + + GridCacheDatabaseSharedManager dbMrg0 = (GridCacheDatabaseSharedManager) ig0.context().cache().context().database(); + GridCacheDatabaseSharedManager dbMrg1 = (GridCacheDatabaseSharedManager) ig1.context().cache().context().database(); + + dbMrg0.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get(); + dbMrg1.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get(); + + Arrays.stream(cachesToStart) + .map(ccfg -> ccfg.getName()) + .filter(name -> cacheToCorrupt.apply(name)) + .forEach(name -> ig0.cluster().disableWal(name)); + + for (int k = 1000; k < 2000; k++) + dfltCache.put(k, k); + + stopGrid(1); + + File[] cpMarkers = new File(ig1LfsDir, "cp").listFiles(); + + for (File cpMark : cpMarkers) { + if (cpMark.getName().contains("-END")) + cpMark.delete(); + } + + assertThrows(log, () -> startGrid(1), Exception.class, null); + + return ig1LfsDir; + } + + /** + * Test verifies persistence clean command with explicit list of caches to be cleaned. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceCleanSpecifiedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + String cacheName2 = DEFAULT_CACHE_NAME + "2"; + String cacheName3 = DEFAULT_CACHE_NAME + "3"; + + String nonExistingCacheName = DEFAULT_CACHE_NAME + "4"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1), + cacheConfiguration(cacheName2), + cacheConfiguration(cacheName3) + }, + s -> !s.equals(cacheName3)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_INVALID_ARGUMENTS, execute("--persistence", "clean", "caches", + nonExistingCacheName, + "--host", "localhost", "--port", port)); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "caches", + cacheName0 + "," + cacheName1, + "--host", "localhost", "--port", port)); + + boolean cleanedEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(f -> f.getName().contains(cacheName0) || f.getName().contains(cacheName1)) + .map(f -> f.listFiles().length == 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(cleanedEmpty); + + boolean nonCleanedNonEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(f -> f.getName().contains(cacheName2) || f.getName().contains(cacheName3)) + .map(f -> f.listFiles().length > 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(nonCleanedNonEmpty); + + stopGrid(1); + + ig1 = startGrid(1); + + assertTrue(ig1.context().maintenanceRegistry().isMaintenanceMode()); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "caches", + cacheName2, + "--host", "localhost", "--port", port)); + + stopGrid(1); + + ig1 = startGrid(1); + + assertFalse(ig1.context().maintenanceRegistry().isMaintenanceMode()); + } + + /** + * Test verifies persistence clean command cleaning only corrupted caches and not touching others. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceCleanCorruptedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + String cacheName2 = DEFAULT_CACHE_NAME + "2"; + String cacheName3 = DEFAULT_CACHE_NAME + "3"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1), + cacheConfiguration(cacheName2), + cacheConfiguration(cacheName3) + }, + s -> !s.equals(cacheName3)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "corrupted", + "--host", "localhost", "--port", port)); + + boolean cleanedEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(f -> + f.getName().contains(cacheName0) + || f.getName().contains(cacheName1) + || f.getName().contains(cacheName2) + ) + .map(f -> f.listFiles().length == 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(cleanedEmpty); + + stopGrid(1); + + ig1 = startGrid(1); + + assertFalse(ig1.context().maintenanceRegistry().isMaintenanceMode()); + } + + /** + * Test verifies persistence clean all command that cleans all cache directories. + * + * @throws Exception + */ + @Test + public void testPersistenceCleanAllCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1) + }, + s -> s.equals(cacheName0)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "clean", "all", + "--host", "localhost", "--port", port)); + + boolean allEmpty = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("cache-")) + .map(f -> f.listFiles().length == 1) + .reduce(true, (t, u) -> t && u); + + assertTrue(allEmpty); + + stopGrid(1); + + ig1 = startGrid(1); + + assertFalse(ig1.context().maintenanceRegistry().isMaintenanceMode()); + } + + /** + * Test verifies that persistence backup command to backup all caches backs up all cache directories. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceBackupAllCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1) + }, + s -> s.equals(cacheName0)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "backup", "all", + "--host", "localhost", "--port", port)); + + Set backedUpCacheDirs = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .map(f -> f.getName().substring("backup_".length())) + .collect(Collectors.toCollection(TreeSet::new)); + + Set allCacheDirs = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("cache-")) + .map(File::getName) + .collect(Collectors.toCollection(TreeSet::new)); + + assertEqualsCollections(backedUpCacheDirs, allCacheDirs); + + checkCacheAndBackupDirsContent(mntcNodeWorkDir); + } + + /** + * Test verifies that persistence backup command copies all corrupted caches content to backup directory + * but does not touch other directories. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceBackupCorruptedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1) + }, + s -> s.equals(cacheName0)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "backup", "corrupted", + "--host", "localhost", "--port", port)); + + long backedUpCachesCnt = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .filter(f -> f.getName().contains(cacheName0)) + .count(); + + assertEquals(1, backedUpCachesCnt); + + checkCacheAndBackupDirsContent(mntcNodeWorkDir); + } + + /** + * Test verifies that persistence backup command with specified caches copied only content of that caches and + * doesn't touch other directories. + * + * @throws Exception If failed. + */ + @Test + public void testPersistenceBackupSpecifiedCachesCommand() throws Exception { + String cacheName0 = DEFAULT_CACHE_NAME + "0"; + String cacheName1 = DEFAULT_CACHE_NAME + "1"; + String cacheName2 = DEFAULT_CACHE_NAME + "2"; + + String nonExistingCacheName = "nonExistingCache"; + + File mntcNodeWorkDir = startGridAndPutNodeToMaintenance( + new CacheConfiguration[]{ + cacheConfiguration(cacheName0), + cacheConfiguration(cacheName1), + cacheConfiguration(cacheName2) + }, + s -> s.equals(cacheName0) || s.equals(cacheName2)); + + IgniteEx ig1 = startGrid(1); + + String port = ig1.localNode().attribute(IgniteNodeAttributes.ATTR_REST_TCP_PORT).toString(); + + assertEquals(EXIT_CODE_INVALID_ARGUMENTS, execute("--persistence", "backup", "caches", + nonExistingCacheName, + "--host", "localhost", "--port", port)); + + assertEquals(EXIT_CODE_OK, execute("--persistence", "backup", "caches", + cacheName0 + "," + cacheName2, + "--host", "localhost", "--port", port)); + + long backedUpCachesCnt = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .count(); + + assertEquals(2, backedUpCachesCnt); + + checkCacheAndBackupDirsContent(mntcNodeWorkDir); + } + + /** */ + private void checkCacheAndBackupDirsContent(File mntcNodeWorkDir) { + List backupDirs = Arrays.stream(mntcNodeWorkDir.listFiles()) + .filter(File::isDirectory) + .filter(f -> f.getName().startsWith("backup_")) + .collect(Collectors.toList()); + + Path mntcNodeWorkDirPath = mntcNodeWorkDir.toPath(); + + for (File bDir : backupDirs) { + File origCacheDir = mntcNodeWorkDirPath.resolve(bDir.getName().substring("backup_".length())).toFile(); + + assertTrue(origCacheDir.isDirectory()); + + assertEquals(origCacheDir.listFiles().length, bDir.listFiles().length); + } + } + /** * Test enabling/disabling read-only mode works via control.sh * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java index 8f85ceb64dc2a..6bc3e8ecaae94 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java @@ -162,12 +162,21 @@ public MaintenanceProcessor(GridKernalContext ctx) { ); } - if (!workflowCallbacks.isEmpty()) + if (!workflowCallbacks.isEmpty()) { + if (log.isInfoEnabled()) { + String mntcTasksNames = String.join(", ", workflowCallbacks.keySet()); + + log.info("Node requires maintenance, non-empty set of maintenance tasks is found: [" + + mntcTasksNames + ']'); + } + proceedWithMaintenance(); - else { - if (log.isInfoEnabled()) + } + else if (isMaintenanceMode()) { + if (log.isInfoEnabled()) { log.info("All maintenance tasks are fixed, no need to enter maintenance mode. " + "Restart the node to get it back to normal operations."); + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckCorruptedCacheStoresCleanAction.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckCorruptedCacheStoresCleanAction.java new file mode 100644 index 0000000000000..2073b9bc3ebeb --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckCorruptedCacheStoresCleanAction.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.io.File; + +import org.apache.ignite.maintenance.MaintenanceAction; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.CACHE_DATA_FILENAME; + +/** */ +public class CheckCorruptedCacheStoresCleanAction implements MaintenanceAction { + /** */ + public static final String ACTION_NAME = "check_cache_files_cleaned"; + + /** */ + private final File rootStoreDir; + + /** */ + private final String[] cacheStoreDirs; + + /** */ + public CheckCorruptedCacheStoresCleanAction(File rootStoreDir, String[] cacheStoreDirs) { + this.rootStoreDir = rootStoreDir; + this.cacheStoreDirs = cacheStoreDirs; + } + + /** {@inheritDoc} */ + @Override public Boolean execute() { + for (String cacheStoreDirName : cacheStoreDirs) { + File cacheStoreDir = new File(rootStoreDir, cacheStoreDirName); + + if (cacheStoreDir.exists() && cacheStoreDir.isDirectory()) { + for (File f : cacheStoreDir.listFiles()) { + if (!f.getName().equals(CACHE_DATA_FILENAME)) + return Boolean.FALSE; + } + } + } + + return Boolean.TRUE; + } + + /** {@inheritDoc} */ + @Override public @NotNull String name() { + return ACTION_NAME; + } + + /** {@inheritDoc} */ + @Override public @Nullable String description() { + return "Checks if all corrupted data files are cleaned from cache store directories"; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java index 52a8f6f45f15a..0173bca7facf7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java @@ -69,7 +69,9 @@ public CorruptedPdsMaintenanceCallback(@NotNull File workDir, /** {@inheritDoc} */ @Override public List allActions() { - return Arrays.asList(new CleanCacheStoresMaintenanceAction(workDir, cacheStoreDirs.toArray(new String[0]))); + return Arrays.asList( + new CleanCacheStoresMaintenanceAction(workDir, cacheStoreDirs.toArray(new String[0])), + new CheckCorruptedCacheStoresCleanAction(workDir, cacheStoreDirs.toArray(new String[0]))); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupSettings.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupSettings.java new file mode 100644 index 0000000000000..a5bf3278c45aa --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupSettings.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; + +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** */ +public class PersistenceCleanAndBackupSettings extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private PersistenceCleanAndBackupType cleanAndBackupType; + + /** */ + private List cacheNames; + + /** */ + public PersistenceCleanAndBackupSettings() { + // No-op. + } + + /** */ + public PersistenceCleanAndBackupSettings(PersistenceCleanAndBackupType cleanAndBackupType, List cacheNames) { + this.cleanAndBackupType = cleanAndBackupType; + this.cacheNames = cacheNames; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeEnum(out, cleanAndBackupType); + U.writeCollection(out, cacheNames); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + cleanAndBackupType = PersistenceCleanAndBackupType.fromOrdinal(in.readByte()); + cacheNames = U.readList(in); + } + + /** */ + public PersistenceCleanAndBackupType cleanAndBackupType() { + return cleanAndBackupType; + } + + /** */ + public List cacheNames() { + return cacheNames; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupType.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupType.java new file mode 100644 index 0000000000000..21988516bd829 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceCleanAndBackupType.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import org.jetbrains.annotations.Nullable; + +/** */ +public enum PersistenceCleanAndBackupType { + /** */ + ALL, + /** */ + CORRUPTED, + /** */ + CACHES; + + /** */ + private static final PersistenceCleanAndBackupType[] VALS = values(); + + /** + * @param ordinal Index of enum value. + * @return Value of {@link PersistenceCleanAndBackupType} enum. + */ + @Nullable public static PersistenceCleanAndBackupType fromOrdinal(int ordinal) { + return ordinal >= 0 && ordinal < VALS.length ? VALS[ordinal] : null; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceOperation.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceOperation.java new file mode 100644 index 0000000000000..2481af85f3095 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceOperation.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import org.jetbrains.annotations.Nullable; + +/** Persistence cleaning operations. */ +public enum PersistenceOperation { + /** */ + INFO, + + /** */ + CLEAN, + + /** */ + BACKUP; + + /** */ + private static final PersistenceOperation[] VALS = values(); + + /** + * @param ordinal Index of enum value. + * @return Value of {@link PersistenceOperation} enum. + */ + @Nullable public static PersistenceOperation fromOrdinal(int ordinal) { + return ordinal >= 0 && ordinal < VALS.length ? VALS[ordinal] : null; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java new file mode 100644 index 0000000000000..1ac23f4e2c0d7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java @@ -0,0 +1,401 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; +import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; +import org.apache.ignite.internal.processors.cache.GridCacheProcessor; +import org.apache.ignite.internal.processors.cache.persistence.CheckCorruptedCacheStoresCleanAction; +import org.apache.ignite.internal.processors.cache.persistence.CleanCacheStoresMaintenanceAction; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.processors.task.GridVisorManagementTask; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorOneNodeTask; +import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.maintenance.MaintenanceAction; +import org.apache.ignite.maintenance.MaintenanceRegistry; +import org.apache.ignite.maintenance.MaintenanceTask; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.CORRUPTED_DATA_FILES_MNTC_TASK_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.cacheDirName; + +/** */ +@GridInternal +@GridVisorManagementTask +public class PersistenceTask extends VisorOneNodeTask { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private static final String BACKUP_FOLDER_PREFIX = "backup_"; + + @Override protected VisorJob job(PersistenceTaskArg arg) { + return new PersistenceJob(arg, debug); + } + + /** */ + private static class PersistenceJob extends VisorJob { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected PersistenceJob(@Nullable PersistenceTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected PersistenceTaskResult run(@Nullable PersistenceTaskArg arg) throws IgniteException { + if (!ignite.context().maintenanceRegistry().isMaintenanceMode()) + return new PersistenceTaskResult(false); + + switch (arg.operation()) { + case CLEAN: + return clean(arg); + + case BACKUP: + return backup(arg); + + default: + return info(); + } + } + + /** */ + private PersistenceTaskResult backup(PersistenceTaskArg arg) { + PersistenceCleanAndBackupSettings backupSettings = arg.cleanAndBackupSettings(); + + MaintenanceRegistry mntcReg = ignite.context().maintenanceRegistry(); + MaintenanceTask task = mntcReg.activeMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + File workDir = ((FilePageStoreManager) ignite.context().cache().context().pageStore()).workDir(); + + switch (backupSettings.cleanAndBackupType()) { + case ALL: + return backupAll(workDir); + + case CORRUPTED: + return backupCaches(workDir, corruptedCacheDirectories(task)); + + default: + return backupCaches(workDir, cacheDirectoriesFromCacheNames(backupSettings.cacheNames())); + } + } + + /** */ + private PersistenceTaskResult backupAll(File workDir) { + GridCacheProcessor cacheProc = ignite.context().cache(); + + List allCacheDirs = cacheProc.cacheDescriptors() + .values() + .stream() + .map(desc -> cacheDirName(desc.cacheConfiguration())) + .distinct() + .collect(Collectors.toList()); + + return backupCaches(workDir, allCacheDirs); + } + + /** */ + private PersistenceTaskResult backupCaches(File workDir, List cacheDirs) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List backupCompletedCaches = new ArrayList<>(); + List backupFailedCaches = new ArrayList<>(); + + for (String dir : cacheDirs) { + String backupDirName = BACKUP_FOLDER_PREFIX + dir; + + File backupDir = new File(workDir, backupDirName); + + if (!backupDir.exists()) { + try { + U.ensureDirectory(backupDir, backupDirName, null); + + copyCacheFiles(workDir.toPath().resolve(dir).toFile(), backupDir); + + backupCompletedCaches.add(backupDirName); + } catch (IgniteCheckedException | IOException e) { + backupFailedCaches.add(dir); + } + } + } + + res.handledCaches(backupCompletedCaches); + res.failedCaches(backupFailedCaches); + + return res; + } + + /** */ + private void copyCacheFiles(File sourceDir, File backupDir) throws IOException { + for (File f : sourceDir.listFiles()) + Files.copy(f.toPath(), backupDir.toPath().resolve(f.getName()), StandardCopyOption.REPLACE_EXISTING); + } + + /** */ + private PersistenceTaskResult clean(PersistenceTaskArg arg) { + PersistenceTaskResult res = new PersistenceTaskResult(); + + PersistenceCleanAndBackupSettings cleanSettings = arg.cleanAndBackupSettings(); + + GridCacheProcessor cacheProc = ignite.context().cache(); + MaintenanceRegistry mntcReg = ignite.context().maintenanceRegistry(); + + switch (cleanSettings.cleanAndBackupType()) { + case ALL: + return cleanAll(cacheProc, mntcReg); + + case CORRUPTED: + return cleanCorrupted(mntcReg); + + case CACHES: + return cleanCaches(cacheProc, mntcReg, cleanSettings.cacheNames()); + } + + return res; + } + + /** */ + private PersistenceTaskResult cleanCaches( + GridCacheProcessor cacheProc, + MaintenanceRegistry mntcReg, + List cacheNames + ) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List cleanedCaches = new ArrayList<>(); + List failedToCleanCaches = new ArrayList<>(); + + DataStorageConfiguration dsCfg = ignite.context().config().getDataStorageConfiguration(); + IgnitePageStoreManager pageStore = cacheProc.context().pageStore(); + + AtomicReference missedCache = new AtomicReference<>(); + + Boolean allExist = cacheNames + .stream() + .map(name -> { + if (cacheProc.cacheDescriptor(name) != null) + return true; + else { + missedCache.set(name); + + return false; + } + }) + .reduce(true, (t, u) -> t && u); + + if (!allExist) + throw new IllegalArgumentException("Cache with name " + missedCache.get() + + " not found, no caches will be cleaned."); + + for (String name : cacheNames) { + DynamicCacheDescriptor cacheDescr = cacheProc.cacheDescriptor(name); + + if (CU.isPersistentCache(cacheDescr.cacheConfiguration(), dsCfg)) { + try { + pageStore.cleanupPersistentSpace(cacheDescr.cacheConfiguration()); + + cleanedCaches.add(cacheDirName(cacheDescr.cacheConfiguration())); + } + catch (IgniteCheckedException e) { + failedToCleanCaches.add(name); + } + } + } + + res.handledCaches(cleanedCaches); + + if (!failedToCleanCaches.isEmpty()) + res.failedCaches(failedToCleanCaches); + + List actions = mntcReg.actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + Optional checkActionOpt = actions.stream().filter(a -> a.name().equals(CheckCorruptedCacheStoresCleanAction.ACTION_NAME)) + .findFirst(); + + if (checkActionOpt.isPresent()) { + MaintenanceAction action = checkActionOpt.get(); + + Boolean mntcTaskCompleted = action.execute(); + + res.maintenanceTaskCompleted(mntcTaskCompleted); + + if (mntcTaskCompleted) + mntcReg.unregisterMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + } + + return res; + } + + /** */ + private PersistenceTaskResult cleanAll(GridCacheProcessor cacheProc, MaintenanceRegistry mntcReg) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List allCacheDirs = cacheProc.cacheDescriptors() + .values() + .stream() + .map(desc -> cacheDirName(desc.cacheConfiguration())) + .collect(Collectors.toList()); + + try { + cacheProc.cleanupCachesDirectories(); + } catch (IgniteCheckedException e) { + throw U.convertException(e); + } + + mntcReg.unregisterMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + res.maintenanceTaskCompleted(true); + res.handledCaches(allCacheDirs); + + return res; + } + + /** */ + private PersistenceTaskResult cleanCorrupted(MaintenanceRegistry mntcReg) { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + List actions = mntcReg + .actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + Optional cleanCorruptedActionOpt = actions + .stream() + .filter(a -> a.name().equals(CleanCacheStoresMaintenanceAction.ACTION_NAME)) + .findFirst(); + + if (cleanCorruptedActionOpt.isPresent()) { + cleanCorruptedActionOpt.get().execute(); + + MaintenanceTask corruptedTask = mntcReg.activeMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + mntcReg.unregisterMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME); + + res.handledCaches( + corruptedCacheDirectories(corruptedTask) + ); + + res.maintenanceTaskCompleted(true); + } + + return res; + } + + /** */ + private PersistenceTaskResult info() { + PersistenceTaskResult res = new PersistenceTaskResult(true); + + GridCacheProcessor cacheProc = ignite.context().cache(); + DataStorageConfiguration dsCfg = ignite.context().config().getDataStorageConfiguration(); + + List corruptedCacheNames = corruptedCacheDirectories(ignite.context().maintenanceRegistry() + .activeMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME)); + + Map> cachesInfo = new HashMap<>(); + + for (DynamicCacheDescriptor desc : cacheProc.cacheDescriptors().values()) { + if (!CU.isPersistentCache(desc.cacheConfiguration(), dsCfg)) + continue; + + CacheGroupDescriptor grpDesc = desc.groupDescriptor(); + + if (grpDesc != null) { + boolean globalWalEnabled = grpDesc.walEnabled(); + boolean localWalEnabled = true; + + if (globalWalEnabled && corruptedCacheNames.contains(desc.cacheName())) + localWalEnabled = false; + + cachesInfo.put(desc.cacheName(), new IgniteBiTuple<>(globalWalEnabled, localWalEnabled)); + } + } + + res.cachesInfo(cachesInfo); + + return res; + } + + /** */ + private List corruptedCacheDirectories(MaintenanceTask task) { + String params = task.parameters(); + + String[] namesArr = params.split(File.separator); + + return Arrays.asList(namesArr); + } + + /** */ + private List cacheDirectoriesFromCacheNames(List cacheNames) { + GridCacheProcessor cacheProc = ignite.context().cache(); + + DataStorageConfiguration dsCfg = ignite.configuration().getDataStorageConfiguration(); + + AtomicReference missedCache = new AtomicReference<>(); + + Boolean allExist = cacheNames.stream() + .map(s -> { + if (cacheProc.cacheDescriptor(s) != null) + return true; + else { + missedCache.set(s); + + return false; + } + }) + .reduce(true, (u, v) -> u && v); + + if (!allExist) + throw new IllegalArgumentException("Cache with name " + missedCache.get() + + " not found, no caches will be backed up."); + + return cacheNames.stream() + .filter(s -> cacheProc.cacheDescriptor(s) != null) + .filter(s -> + CU.isPersistentCache(cacheProc.cacheDescriptor(s).cacheConfiguration(), dsCfg)) + .map(s -> cacheProc.cacheDescriptor(s).cacheConfiguration()) + .map(FilePageStoreManager::cacheDirName) + .distinct() + .collect(Collectors.toList()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskArg.java new file mode 100644 index 0000000000000..c48f936764193 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskArg.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; + +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * + */ +public class PersistenceTaskArg extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private PersistenceOperation op; + + /** */ + private PersistenceCleanAndBackupSettings cleanAndBackupSettings; + + /** + * Default constructor. + */ + public PersistenceTaskArg() { + // No-op. + } + + /** + * @param op {@link PersistenceOperation} requested for execution. + * @param cleanAndBackupSettings {@link PersistenceCleanAndBackupSettings} specific settings for clean and backup + * commands. + */ + public PersistenceTaskArg(PersistenceOperation op, PersistenceCleanAndBackupSettings cleanAndBackupSettings) { + this.op = op; + this.cleanAndBackupSettings = cleanAndBackupSettings; + } + + /** + * @return {@link PersistenceOperation} operation requested for execution. + */ + public PersistenceOperation operation() { + return op; + } + + /** + * @return {@link PersistenceCleanAndBackupSettings} specific settings for clean and backup commands. + */ + public PersistenceCleanAndBackupSettings cleanAndBackupSettings() { + return cleanAndBackupSettings; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeEnum(out, op); + out.writeObject(cleanAndBackupSettings); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + op = PersistenceOperation.fromOrdinal(in.readByte()); + cleanAndBackupSettings = (PersistenceCleanAndBackupSettings) in.readObject(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskResult.java new file mode 100644 index 0000000000000..5a0a0fedb19fd --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTaskResult.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.persistence; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; +import java.util.Map; + +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteBiTuple; + +public class PersistenceTaskResult extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private boolean inMaintenanceMode; + + /** */ + private boolean maintenanceTaskCompleted; + + /** */ + private List handledCaches; + + /** */ + private List failedToHandleCaches; + + /** */ + private Map> cachesInfo; + + /** */ + public PersistenceTaskResult() { + // No-op. + } + + /** + * + */ + public PersistenceTaskResult(boolean inMaintenanceMode) { + this.inMaintenanceMode = inMaintenanceMode; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeBoolean(inMaintenanceMode); + out.writeBoolean(maintenanceTaskCompleted); + U.writeCollection(out, handledCaches); + U.writeCollection(out, failedToHandleCaches); + U.writeMap(out, cachesInfo); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + inMaintenanceMode = in.readBoolean(); + maintenanceTaskCompleted = in.readBoolean(); + handledCaches = U.readList(in); + failedToHandleCaches = U.readList(in); + cachesInfo = U.readMap(in); + } + + /** */ + public boolean inMaintenanceMode() { + return inMaintenanceMode; + } + + /** */ + public boolean maintenanceTaskCompleted() { + return maintenanceTaskCompleted; + } + + /** */ + public void maintenanceTaskCompleted(boolean maintenanceTaskCompleted) { + this.maintenanceTaskCompleted = maintenanceTaskCompleted; + } + + /** */ + public List handledCaches() { + return handledCaches; + } + + /** */ + public void handledCaches(List handledCaches) { + this.handledCaches = handledCaches; + } + + /** */ + public List failedCaches() { + return failedToHandleCaches; + } + + /** */ + public void failedCaches(List failedToHandleCaches) { + this.failedToHandleCaches = failedToHandleCaches; + } + + /** */ + public Map> cachesInfo() { + return cachesInfo; + } + + /** */ + public void cachesInfo(Map> cachesInfo) { + this.cachesInfo = cachesInfo; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java index 49795f1a92cf7..c600952866592 100644 --- a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java +++ b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceTask.java @@ -24,8 +24,8 @@ /** * Represents request to handle maintenance situation. * - * It can be created automatically or by user request by any component needed maintenance and should be registered - * in Maintenance Registry with the method {@link MaintenanceRegistry#registerMaintenanceTask(MaintenanceTask)}. + * Maintenance request can be created programmatically + * with {@link MaintenanceRegistry#registerMaintenanceTask(MaintenanceTask)} public API call. * * Lifecycle of Maintenance Task is managed by {@link MaintenanceRegistry}. * diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties index deac9f8e57689..92d01dda9d92d 100644 --- a/modules/core/src/main/resources/META-INF/classnames.properties +++ b/modules/core/src/main/resources/META-INF/classnames.properties @@ -350,20 +350,7 @@ org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg org.apache.ignite.internal.commandline.cache.argument.ListCommandArg org.apache.ignite.internal.commandline.cache.argument.PartitionReconciliationCommandArg org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg -org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesResult -org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesTask org.apache.ignite.internal.commandline.cache.check_indexes_inline_size.CheckIndexInlineSizesTask$CheckIndexInlineSizesJob -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionGroup -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionNode -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionPartition -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTask -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTask$CacheDistributionJob -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskArg -org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskResult -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTask -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTask$CacheResetLostPartitionsJob -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTaskArg -org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTaskResult org.apache.ignite.internal.commandline.diagnostic.DiagnosticSubCommand org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand$PageLocksCommandArg org.apache.ignite.internal.commandline.dr.DrSubCommandsList @@ -373,16 +360,8 @@ org.apache.ignite.internal.commandline.dr.subcommands.DrCacheCommand$SenderGroup org.apache.ignite.internal.commandline.management.ManagementCommandList org.apache.ignite.internal.commandline.management.ManagementURLCommandArg org.apache.ignite.internal.commandline.meta.subcommands.MetadataAbstractSubCommand.VoidDto -org.apache.ignite.internal.commandline.meta.tasks.MetadataListResult -org.apache.ignite.internal.commandline.meta.tasks.MetadataInfoTask -org.apache.ignite.internal.commandline.meta.tasks.MetadataInfoTask.MetadataListJob -org.apache.ignite.internal.commandline.meta.tasks.MetadataMarshalled -org.apache.ignite.internal.commandline.meta.tasks.MetadataRemoveTask org.apache.ignite.internal.commandline.meta.tasks.MetadataRemoveTask$MetadataRemoveJob org.apache.ignite.internal.commandline.meta.tasks.MetadataRemoveTask$DropAllThinSessionsJob -org.apache.ignite.internal.commandline.meta.tasks.MetadataTypeArgs -org.apache.ignite.internal.commandline.meta.tasks.MetadataUpdateTask -org.apache.ignite.internal.commandline.meta.tasks.MetadataUpdateTask.MetadataUpdateJob org.apache.ignite.internal.commandline.property.tasks.PropertiesListResult org.apache.ignite.internal.commandline.property.tasks.PropertiesListTask org.apache.ignite.internal.commandline.property.tasks.PropertyOperationResult @@ -2285,6 +2264,7 @@ org.apache.ignite.internal.visor.query.VisorRunningQuery org.apache.ignite.internal.visor.query.VisorScanQueryTask org.apache.ignite.internal.visor.query.VisorScanQueryTask$VisorScanQueryJob org.apache.ignite.internal.visor.query.VisorScanQueryTaskArg +org.apache.ignite.internal.visor.persistence.PersistenceTaskResult org.apache.ignite.internal.visor.service.VisorCancelServiceTask org.apache.ignite.internal.visor.service.VisorCancelServiceTask$VisorCancelServiceJob org.apache.ignite.internal.visor.service.VisorCancelServiceTaskArg diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index 2806cd60552cc..60ef386e8f938 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -197,6 +197,30 @@ If the file name isn't specified the output file name is: '.bin' name - Name of the metric which value should be printed. If name of the metric registry is specified, value of all its metrics will be printed. node_id - ID of the node to get the metric values from. If not set, random node will be chosen. + Print information about potentially corrupted caches on local node: + control.(sh|bat) --persistence + + The same information is printed when info subcommand is passed: + control.(sh|bat) --persistence info + + Clean directories of caches with corrupted data files: + control.(sh|bat) --persistence clean corrupted + + Clean directories of all caches: + control.(sh|bat) --persistence clean all + + Clean directories of only given caches: + control.(sh|bat) --persistence clean caches cache1,cache2,cache3 + + Backup data files of corrupted caches only: + control.(sh|bat) --persistence backup corrupted + + Backup data files of all caches: + control.(sh|bat) --persistence backup all + + Backup data files of only given caches: + control.(sh|bat) --persistence backup caches cache1,cache2,cache3 + By default commands affecting the cluster require interactive confirmation. Use --yes option to disable it. diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index 2806cd60552cc..60ef386e8f938 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -197,6 +197,30 @@ If the file name isn't specified the output file name is: '.bin' name - Name of the metric which value should be printed. If name of the metric registry is specified, value of all its metrics will be printed. node_id - ID of the node to get the metric values from. If not set, random node will be chosen. + Print information about potentially corrupted caches on local node: + control.(sh|bat) --persistence + + The same information is printed when info subcommand is passed: + control.(sh|bat) --persistence info + + Clean directories of caches with corrupted data files: + control.(sh|bat) --persistence clean corrupted + + Clean directories of all caches: + control.(sh|bat) --persistence clean all + + Clean directories of only given caches: + control.(sh|bat) --persistence clean caches cache1,cache2,cache3 + + Backup data files of corrupted caches only: + control.(sh|bat) --persistence backup corrupted + + Backup data files of all caches: + control.(sh|bat) --persistence backup all + + Backup data files of only given caches: + control.(sh|bat) --persistence backup caches cache1,cache2,cache3 + By default commands affecting the cluster require interactive confirmation. Use --yes option to disable it. From 3bc59a892b3abdca2343968d81661eb58a565546 Mon Sep 17 00:00:00 2001 From: Nikolay Date: Tue, 3 Nov 2020 17:28:10 +0300 Subject: [PATCH 017/110] IGNITE-13627 Metric registry remove not thread safe. (#8410) --- .../processors/metric/GridMetricManager.java | 36 ++++---- .../internal/metric/MetricsSelfTest.java | 89 +++++++++++++++++++ 2 files changed, 109 insertions(+), 16 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java index 17e69678ccb70..fddf7ff91fab2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java @@ -410,28 +410,32 @@ private T readFromMetastorage(String key) { * @param regName Metric registry name. */ public void remove(String regName) { - ReadOnlyMetricRegistry mreg = registries.remove(regName); + GridCompoundFuture opsFut = new GridCompoundFuture<>(); - if (mreg == null) - return; - - notifyListeners(mreg, metricRegRemoveLsnrs, log); + registries.computeIfPresent(regName, (key, mreg) -> { + notifyListeners(mreg, metricRegRemoveLsnrs, log); - DistributedMetaStorage metastorage0 = metastorage; + DistributedMetaStorage metastorage0 = metastorage; - if (metastorage0 == null) - return; + if (metastorage0 == null) + return null; - try { - GridCompoundFuture opsFut = new GridCompoundFuture<>(); - - for (Metric m : mreg) { - if (m instanceof HitRateMetric) - opsFut.add(metastorage0.removeAsync(metricName(HITRATE_CFG_PREFIX, m.name()))); - else if (m instanceof HistogramMetric) - opsFut.add(metastorage0.removeAsync(metricName(HISTOGRAM_CFG_PREFIX, m.name()))); + try { + for (Metric m : mreg) { + if (m instanceof HitRateMetric) + opsFut.add(metastorage0.removeAsync(metricName(HITRATE_CFG_PREFIX, m.name()))); + else if (m instanceof HistogramMetric) + opsFut.add(metastorage0.removeAsync(metricName(HISTOGRAM_CFG_PREFIX, m.name()))); + } + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); } + return null; + }); + + try { opsFut.markInitialized(); opsFut.get(); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java index 45ac6a2b5f3ce..4ac203dceaa14 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/metric/MetricsSelfTest.java @@ -22,7 +22,9 @@ import java.util.List; import java.util.Set; import java.util.Spliterators; +import java.util.concurrent.CountDownLatch; import java.util.stream.StreamSupport; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.metric.impl.AtomicLongMetric; @@ -34,14 +36,20 @@ import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.metric.BooleanMetric; import org.apache.ignite.spi.metric.DoubleMetric; import org.apache.ignite.spi.metric.IntMetric; import org.apache.ignite.spi.metric.LongMetric; import org.apache.ignite.spi.metric.Metric; +import org.apache.ignite.spi.metric.MetricExporterSpi; import org.apache.ignite.spi.metric.ObjectMetric; +import org.apache.ignite.spi.metric.ReadOnlyMetricManager; +import org.apache.ignite.spi.metric.noop.NoopMetricExporterSpi; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.GridTestKernalContext; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.Nullable; import org.junit.Before; import org.junit.Test; @@ -361,6 +369,87 @@ public void testFromFullName() { assertEquals(new T2<>("org", "apache"), fromFullName("org.apache")); } + /** */ + @Test + public void testAddBeforeRemoveCompletes() throws Exception { + MetricExporterSpi checkSpi = new NoopMetricExporterSpi() { + private ReadOnlyMetricManager registry; + + private Set names = new HashSet<>(); + + @Override public void spiStart(@Nullable String igniteInstanceName) throws IgniteSpiException { + registry.addMetricRegistryCreationListener(mreg -> { + assertFalse(mreg.name() + " should be unique", names.contains(mreg.name())); + + names.add(mreg.name()); + }); + + registry.addMetricRegistryRemoveListener(mreg -> names.remove(mreg.name())); + } + + @Override public void setMetricRegistry(ReadOnlyMetricManager registry) { + this.registry = registry; + } + }; + + CountDownLatch rmvStarted = new CountDownLatch(1); + CountDownLatch rmvCompleted = new CountDownLatch(1); + + MetricExporterSpi blockingSpi = new NoopMetricExporterSpi() { + private ReadOnlyMetricManager registry; + + @Override public void spiStart(@Nullable String igniteInstanceName) throws IgniteSpiException { + registry.addMetricRegistryRemoveListener(mreg -> { + rmvStarted.countDown(); + try { + rmvCompleted.await(); + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + } + + @Override public void setMetricRegistry(ReadOnlyMetricManager registry) { + this.registry = registry; + } + }; + + IgniteConfiguration cfg = new IgniteConfiguration().setMetricExporterSpi(blockingSpi, checkSpi); + + GridTestKernalContext ctx = new GridTestKernalContext(log(), cfg); + + ctx.start(); + + // Add metric registry. + ctx.metric().registry("test"); + + // Removes it async, blockingSpi will block remove procedure. + IgniteInternalFuture rmvFut = runAsync(() -> ctx.metric().remove("test")); + + rmvStarted.await(); + + CountDownLatch addStarted = new CountDownLatch(1); + + IgniteInternalFuture addFut = runAsync(() -> { + addStarted.countDown(); + + ctx.metric().registry("test"); + }); + + // Waiting for creation to start. + addStarted.await(); + + Thread.sleep(100); + + // Complete removal. + rmvCompleted.countDown(); + + rmvFut.get(getTestTimeout()); + + addFut.get(getTestTimeout()); + } + /** */ private void run(Runnable r, int cnt) throws org.apache.ignite.IgniteCheckedException { List futs = new ArrayList<>(); From fcc76f65835b41cd952b5bfe23a897977e94b6f7 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Tue, 3 Nov 2020 18:31:01 +0300 Subject: [PATCH 018/110] IGNITE-2890 .NET: Add CacheConfiguration.NodeFilter * Add CacheConfiguration.NodeFilter * Add AttributeNodeFilter - the only allowed IClusterNodeFilter implementation for cache node filter, maps to the same class in Java --- .../utils/PlatformConfigurationUtils.java | 49 +++- .../ignite/util/AttributeNodeFilter.java | 14 +- ...Apache.Ignite.Core.Tests.DotNetCore.csproj | 1 + .../Apache.Ignite.Core.Tests.csproj | 4 + .../Cache/CacheNodeFilterTest.cs | 274 ++++++++++++++++++ .../Config/cache-attribute-node-filter.xml | 81 ++++++ .../Config/full-config.xml | 7 + .../IgniteConfigurationSerializerTest.cs | 9 + .../Apache.Ignite.Core.csproj | 1 + .../Cache/Configuration/CacheConfiguration.cs | 32 +- .../Cluster/AttributeNodeFilter.cs | 129 +++++++++ .../IgniteConfigurationSection.xsd | 12 + 12 files changed, 609 insertions(+), 4 deletions(-) create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheNodeFilterTest.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-attribute-node-filter.xml create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Cluster/AttributeNodeFilter.cs diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java index 9cbef1264f559..750c3511a223e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java @@ -111,6 +111,7 @@ import org.apache.ignite.ssl.SslContextFactory; import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionIsolation; +import org.apache.ignite.util.AttributeNodeFilter; /** * Configuration utils. @@ -246,6 +247,8 @@ public static CacheConfiguration readCacheConfiguration(BinaryRawReaderEx in) { ccfg.setAffinity(readAffinityFunction(in)); ccfg.setExpiryPolicyFactory(readExpiryPolicyFactory(in)); + ccfg.setNodeFilter(readAttributeNodeFilter(in)); + int keyCnt = in.readInt(); if (keyCnt > 0) { @@ -346,6 +349,48 @@ public static PlatformCacheConfiguration readPlatformCacheConfiguration(BinaryRa .setKeepBinary(in.readBoolean()); } + /** + * Reads the node filter config. + * + * @param in Stream. + * @return AttributeNodeFilter. + */ + public static AttributeNodeFilter readAttributeNodeFilter(BinaryRawReader in) { + if (!in.readBoolean()) + return null; + + int cnt = in.readInt(); + + Map attrs = new HashMap<>(cnt); + for (int i = 0; i < cnt; i++) + attrs.put(in.readString(), in.readObject()); + + return new AttributeNodeFilter(attrs); + } + + /** + * Writes the node filter. + * @param out Stream. + * @param nodeFilter IgnitePredicate. + */ + private static void writeAttributeNodeFilter(BinaryRawWriter out, IgnitePredicate nodeFilter) { + if (!(nodeFilter instanceof AttributeNodeFilter)) { + out.writeBoolean(false); + return; + } + + out.writeBoolean(true); + + Map attrs = ((AttributeNodeFilter) nodeFilter).getAttrs(); + + out.writeInt(attrs.size()); + + for (Map.Entry entry : attrs.entrySet()) { + out.writeString(entry.getKey()); + out.writeObject(entry.getValue()); + } + } + /** * Reads the eviction policy. * @@ -417,7 +462,7 @@ public static PlatformAffinityFunction readAffinityFunction(BinaryRawReaderEx in } /** - * Reads the near config. + * Writes the near config. * * @param out Stream. * @param cfg NearCacheConfiguration. @@ -1084,6 +1129,8 @@ public static void writeCacheConfiguration(BinaryRawWriter writer, CacheConfigur writeAffinityFunction(writer, ccfg.getAffinity()); writeExpiryPolicyFactory(writer, ccfg.getExpiryPolicyFactory()); + writeAttributeNodeFilter(writer, ccfg.getNodeFilter()); + CacheKeyConfiguration[] keys = ccfg.getKeyConfiguration(); if (keys != null) { diff --git a/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java b/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java index fed0d43f26019..70c5a2998e599 100644 --- a/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java +++ b/modules/core/src/main/java/org/apache/ignite/util/AttributeNodeFilter.java @@ -18,6 +18,7 @@ package org.apache.ignite.util; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import org.apache.ignite.cluster.ClusterGroup; import org.apache.ignite.cluster.ClusterNode; @@ -42,7 +43,7 @@ * attribute set to value {@code data}: *
  * <property name="nodeFilter">
- *     <bean class="org.apache.ignite.util.ClusterAttributeNodeFilter">
+ *     <bean class="org.apache.ignite.util.AttributeNodeFilter">
  *         <constructor-arg value="group"/>
  *         <constructor-arg value="data"/>
  *     </bean>
@@ -51,7 +52,7 @@
  * You can also specify multiple attributes for the filter:
  * 
  * <property name="nodeFilter">
- *     <bean class="org.apache.ignite.util.ClusterAttributeNodeFilter">
+ *     <bean class="org.apache.ignite.util.AttributeNodeFilter">
  *         <constructor-arg>
  *             <map>
  *                 <entry key="cpu-group" value="high"/>
@@ -105,4 +106,13 @@ public AttributeNodeFilter(Map attrs) {
 
         return true;
     }
+
+    /**
+     * Gets attributes.
+     *
+     * @return Attributes collection.
+     */
+    public Map getAttrs() {
+        return new HashMap<>(attrs);
+    }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj
index ef97e9ab2dea8..884009a4ccf72 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.DotNetCore.csproj
@@ -89,6 +89,7 @@
     
     
     
+    
     
       PreserveNewest
     
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
index 0f67ed407b754..b28268d01a47c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
@@ -112,6 +112,7 @@
     
     
     
+    
     
     
     
@@ -439,6 +440,9 @@
     
   
   
+    
+      PreserveNewest
+    
     
       PreserveNewest
     
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheNodeFilterTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheNodeFilterTest.cs
new file mode 100644
index 0000000000000..2c94775f6f447
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheNodeFilterTest.cs
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.Core.Tests.Cache
+{
+    using System;
+    using System.Collections.Generic;
+    using System.IO;
+    using System.Linq;
+    using Apache.Ignite.Core.Cache.Configuration;
+    using Apache.Ignite.Core.Cluster;
+    using NUnit.Framework;
+
+    /// 
+    /// Cache node filter tests.
+    /// 
+    [TestFixture]
+    public class CacheNodeFilterTest
+    {
+        /** */
+        private const string AttrKey2 = "attr2";
+
+        /**  */
+        private const int AttrVal2 = 3;
+
+        /** */
+        private const string AttrKey3 = "my-key";
+
+        /**  */
+        private const string AttrVal3 = "my-val";
+
+        /** Grid instances. */
+        private IIgnite _grid1, _grid2, _grid3;
+
+        /// 
+        ///  Fixture setup.
+        /// 
+        [TestFixtureSetUp]
+        public void TestFixtureSetUp()
+        {
+            var springConfig = new IgniteConfiguration(TestUtils.GetTestConfiguration())
+            {
+                SpringConfigUrl = Path.Combine("Config", "cache-attribute-node-filter.xml"),
+                IgniteInstanceName = "springGrid"
+            };
+            _grid1 = Ignition.Start(springConfig);
+
+            _grid2 = Ignition.Start(GetTestConfiguration("Ignite2",
+                new Dictionary
+                {
+                    {AttrKey2, AttrVal2}
+                }));
+
+            _grid3 = Ignition.Start(GetTestConfiguration("Ignite3",
+                new Dictionary
+                {
+                    {AttrKey2, AttrVal2},
+                    {AttrKey3, AttrVal3}
+                }));
+        }
+
+        /// 
+        ///  Fixture tear down.
+        /// 
+        [TestFixtureTearDown]
+        public void TestFixtureTearDown()
+        {
+            Ignition.StopAll(true);
+        }
+
+        /// 
+        /// Gets a test configuration.
+        /// 
+        /// Grid name.
+        /// User attributes.
+        /// 
+        private IgniteConfiguration GetTestConfiguration(string gridName, Dictionary userAttributes)
+        {
+            IgniteConfiguration cfg = TestUtils.GetTestConfiguration(name: gridName);
+            cfg.UserAttributes = userAttributes;
+            return cfg;
+        }
+
+        /// 
+        /// Tests attribute node filter with a custom user attribute name
+        /// and null value always matches.
+        /// 
+        [Test]
+        public void TestUserAttributeWithNullValueMatchesAllNodes()
+        {
+            const int replicatedPartitionsCount = 512;
+
+            var cacheCfg = new CacheConfiguration
+            {
+                Name = Guid.NewGuid().ToString(),
+                NodeFilter = new AttributeNodeFilter("my.custom.attr", null),
+                CacheMode = CacheMode.Replicated,
+            };
+            var cache = _grid1.CreateCache(cacheCfg);
+
+            var affinity = _grid1.GetAffinity(cache.Name);
+
+            Assert.AreEqual(3, _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes().Count);
+
+            var parts1 = affinity.GetAllPartitions(_grid1.GetCluster().GetLocalNode());
+            var parts2 = affinity.GetAllPartitions(_grid2.GetCluster().GetLocalNode());
+            var parts3 = affinity.GetAllPartitions(_grid3.GetCluster().GetLocalNode());
+
+            Assert.AreEqual(replicatedPartitionsCount, parts1.Length);
+            Assert.AreEqual(parts1, parts2);
+            Assert.AreEqual(parts2, parts3);
+        }
+
+        /// 
+        /// Tests attribute node filter matches the specified attribute.
+        /// 
+        [Test]
+        public void TestAttributeNodeFilterMatchesCustomNode()
+        {
+            const int itemsCount = 10;
+
+            var cacheCfg = new CacheConfiguration
+            {
+                Name = Guid.NewGuid().ToString(),
+                NodeFilter = new AttributeNodeFilter(AttrKey2, AttrVal2),
+                CacheMode = CacheMode.Replicated,
+            };
+            var cache = _grid1.CreateCache(cacheCfg);
+
+            for (int i = 0; i < itemsCount; i++)
+            {
+                cache.Put(i, i);
+            }
+
+            Assert.AreEqual(2, _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes().Count);
+
+            Assert.AreEqual(0, cache.GetLocalEntries().Count());
+
+            var cache2 = _grid2.GetCache(cache.Name);
+            var cache3 = _grid2.GetCache(cache.Name);
+
+            Assert.AreEqual(itemsCount, cache2.GetLocalEntries().Count());
+            Assert.AreEqual(itemsCount, cache3.GetLocalEntries().Count());
+        }
+
+        /// 
+        /// Tests node filter with multiple attributes matches single node.
+        /// 
+        [Test]
+        public void TestNodeFilterWithMultipleUserAttributes()
+        {
+            var cacheCfg = new CacheConfiguration
+            {
+                Name = Guid.NewGuid().ToString(),
+                NodeFilter = new AttributeNodeFilter
+                {
+                    Attributes = new Dictionary
+                    {
+                        {AttrKey2, AttrVal2},
+                        {AttrKey3, AttrVal3}
+                    }
+                },
+                CacheMode = CacheMode.Replicated,
+            };
+            var cache = _grid1.CreateCache(cacheCfg);
+
+            ICollection dataNodes = _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes();
+            Assert.AreEqual(1, dataNodes.Count);
+            Assert.AreEqual(_grid3.GetCluster().GetLocalNode(), dataNodes.Single());
+        }
+
+        /// 
+        /// Tests Java and .NET nodes can utilize the same
+        /// attribute node filter configuration.
+        /// 
+        [Test]
+        public void TestSpringAttributeNodeFilter()
+        {
+            var cache = _grid1.GetCache("cache");
+            Assert.AreEqual(2, _grid1.GetCluster().ForDataNodes(cache.Name).GetNodes().Count);
+
+            var nodeFilter = cache.GetConfiguration().NodeFilter as AttributeNodeFilter;
+            Assert.IsNotNull(nodeFilter);
+
+            Assert.AreEqual(1, nodeFilter.Attributes.Count);
+
+            var expected = new KeyValuePair(AttrKey3, AttrVal3);
+            Assert.AreEqual(expected, nodeFilter.Attributes.Single());
+        }
+
+        /// 
+        /// Tests that java node filter is not being read on .NET side.
+        /// 
+        [Test]
+        public void TestJavaNodeFilterIsNotAccessedByNetConfig()
+        {
+            var cache = _grid1.GetCache("cacheWithJavaFilter");
+
+            Assert.IsNull(cache.GetConfiguration().NodeFilter);
+        }
+
+        /// 
+        /// Tests that custom node filter is not supported.
+        /// 
+        [Test]
+        public void TestCustomFilterIsNotSupported()
+        {
+            var cacheCfg = new CacheConfiguration
+            {
+                Name = Guid.NewGuid().ToString(),
+                CacheMode = CacheMode.Replicated,
+                NodeFilter = new CustomFilter()
+            };
+
+            TestDelegate action = () => { _grid1.CreateCache(cacheCfg); };
+
+            var ex = Assert.Throws(action);
+            Assert.AreEqual("Unsupported CacheConfiguration.NodeFilter: " +
+                            "'CustomFilter'. " +
+                            "Only predefined implementations are supported: " +
+                            "'AttributeNodeFilter'", ex.Message);
+        }
+
+        /// 
+        /// Tests that attribute node filter with Null
+        /// Attributes value is not supported.
+        /// 
+        [Test]
+        public void TestAttributeFilterWithNullValues()
+        {
+            TestDelegate action = () =>
+            {
+                var _ = new CacheConfiguration
+                {
+                    NodeFilter = new AttributeNodeFilter
+                    {
+                        Attributes = null
+                    },
+                };
+            };
+
+            var ex = Assert.Throws(action);
+            StringAssert.Contains("value", ex.Message);
+        }
+
+        /// 
+        /// Custom node filter.
+        /// 
+        public class CustomFilter : IClusterNodeFilter
+        {
+            /// 
+            /// 
+            /// 
+            public bool Invoke(IClusterNode node)
+            {
+                return true;
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-attribute-node-filter.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-attribute-node-filter.xml
new file mode 100644
index 0000000000000..8b1c543035806
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-attribute-node-filter.xml
@@ -0,0 +1,81 @@
+
+
+
+
+
+  
+    
+    
+      
+    
+
+    
+      
+        
+      
+    
+
+
+    
+      
+        
+          
+          
+            
+              
+                
+                  
+                
+              
+            
+          
+        
+        
+          
+          
+            
+            
+          
+        
+      
+    
+
+    
+      
+        
+          
+            
+              
+                
+                127.0.0.1:47500
+              
+            
+          
+        
+        
+      
+    
+  
+
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml
index e9e5ff461b66f..32eed223a00a9 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml
@@ -91,6 +91,13 @@
             
                 
             
+            
+              
+                
+                
+                null
+              
+            
         
         
     
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
index 7941175af4d46..1211d3832ff5c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
@@ -39,6 +39,7 @@ namespace Apache.Ignite.Core.Tests
     using Apache.Ignite.Core.Cache.Eviction;
     using Apache.Ignite.Core.Cache.Expiry;
     using Apache.Ignite.Core.Cache.Store;
+    using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Ssl;
     using Apache.Ignite.Core.Common;
     using Apache.Ignite.Core.Communication.Tcp;
@@ -159,6 +160,14 @@ public void TestPredefinedXml()
             Assert.IsNotNull(nearCfg);
             Assert.AreEqual(7, nearCfg.NearStartSize);
 
+            var nodeFilter = (AttributeNodeFilter)cacheCfg.NodeFilter;
+            Assert.IsNotNull(nodeFilter);
+            var attributes = nodeFilter.Attributes.ToList();
+            Assert.AreEqual(3, nodeFilter.Attributes.Count);
+            Assert.AreEqual(new KeyValuePair("myNode", "true"), attributes[0]);
+            Assert.AreEqual(new KeyValuePair("foo", null), attributes[1]);
+            Assert.AreEqual(new KeyValuePair("baz", null), attributes[2]);
+
             var plc = nearCfg.EvictionPolicy as FifoEvictionPolicy;
             Assert.IsNotNull(plc);
             Assert.AreEqual(10, plc.BatchSize);
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj
index 99bf6fd202bc7..50e3db755ad6d 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj
@@ -71,6 +71,7 @@
     
     
     
+    
     
     
     
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs
index 82eb16c801805..56681cf7457fe 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs
@@ -34,6 +34,7 @@ namespace Apache.Ignite.Core.Cache.Configuration
     using Apache.Ignite.Core.Cache.Eviction;
     using Apache.Ignite.Core.Cache.Expiry;
     using Apache.Ignite.Core.Cache.Store;
+    using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Common;
     using Apache.Ignite.Core.Configuration;
     using Apache.Ignite.Core.Impl;
@@ -249,7 +250,7 @@ public CacheConfiguration(string name, params QueryEntity[] queryEntities) : thi
         /// Initializes a new instance of the  class,
         /// performing a deep copy of specified cache configuration.
         /// 
-        /// The other configuration to perfrom deep copy from.
+        /// The other configuration to perform deep copy from.
         public CacheConfiguration(CacheConfiguration other)
         {
             if (other != null)
@@ -340,6 +341,8 @@ private void Read(BinaryReader reader)
             AffinityFunction = AffinityFunctionSerializer.Read(reader);
             ExpiryPolicyFactory = ExpiryPolicySerializer.ReadPolicyFactory(reader);
 
+            NodeFilter = reader.ReadBoolean() ? new AttributeNodeFilter(reader) : null;
+
             KeyConfiguration = reader.ReadCollectionRaw(r => new CacheKeyConfiguration(r));
             
             if (reader.ReadBoolean())
@@ -448,6 +451,26 @@ internal void Write(BinaryWriter writer)
             AffinityFunctionSerializer.Write(writer, AffinityFunction);
             ExpiryPolicySerializer.WritePolicyFactory(writer, ExpiryPolicyFactory);
 
+            if (NodeFilter != null)
+            {
+                writer.WriteBoolean(true);
+
+                var attributeNodeFilter = NodeFilter as AttributeNodeFilter;
+                if (attributeNodeFilter == null)
+                {
+                    throw new NotSupportedException(string.Format(
+                        "Unsupported CacheConfiguration.NodeFilter: '{0}'. " +
+                        "Only predefined implementations are supported: '{1}'",
+                        NodeFilter.GetType().Name, typeof(AttributeNodeFilter).Name));
+                }
+
+                attributeNodeFilter.Write(writer);
+            }
+            else
+            {
+                writer.WriteBoolean(false);
+            }
+
             writer.WriteCollectionRaw(KeyConfiguration);
             
             if (PlatformCacheConfiguration != null)
@@ -949,5 +972,12 @@ public string MemoryPolicyName
         /// 
         [IgniteExperimental]
         public PlatformCacheConfiguration PlatformCacheConfiguration { get; set; }
+
+        /// 
+        /// Gets or sets the cluster node filter. Cache will be started only on nodes that match the filter.
+        /// 
+        /// Only predefined implementations are supported: .
+        /// 
+        public IClusterNodeFilter NodeFilter { get; set; }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cluster/AttributeNodeFilter.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cluster/AttributeNodeFilter.cs
new file mode 100644
index 0000000000000..4c01c0ea5c102
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cluster/AttributeNodeFilter.cs
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.Core.Cluster
+{
+    using System;
+    using System.Collections.Generic;
+    using System.Diagnostics;
+    using System.Diagnostics.CodeAnalysis;
+    using Apache.Ignite.Core.Binary;
+    using Apache.Ignite.Core.Impl.Common;
+
+    /// 
+    /// Attribute node filter.
+    /// 
+    /// The filter will evaluate to true if a node has all specified attributes with corresponding values.
+    /// 
+    /// You can set node attributes using  property. 
+    /// 
+    public sealed class AttributeNodeFilter : IClusterNodeFilter
+    {
+        /** */
+        private IDictionary _attributes;
+
+        /// 
+        /// Attributes dictionary match.
+        /// 
+        [SuppressMessage("Microsoft.Usage", "CA2227:CollectionPropertiesShouldBeReadOnly")]
+        public IDictionary Attributes
+        {
+            get { return _attributes; }
+            set
+            {
+                if (value == null)
+                {
+                    throw new ArgumentNullException("value");
+                }
+
+                _attributes = value;
+            }
+        }
+
+        /// 
+        /// Initializes a new instance of .
+        /// 
+        public AttributeNodeFilter()
+        {
+            // No-op.
+        }
+
+        /// 
+        /// Initializes a new instance of .
+        /// 
+        /// Attribute name.
+        /// Attribute value.
+        public AttributeNodeFilter(string attrName, object attrValue)
+        {
+            IgniteArgumentCheck.NotNullOrEmpty(attrName, "attrName");
+
+            Attributes = new Dictionary(1)
+            {
+                {attrName, attrValue}
+            };
+        }
+
+        /**  */
+        public bool Invoke(IClusterNode node)
+        {
+            throw new NotSupportedException("Should not be called from .NET side.");
+        }
+
+        /// 
+        /// Initializes a new instance of  from a binary reader.
+        /// 
+        /// Reader.
+        internal AttributeNodeFilter(IBinaryRawReader reader)
+        {
+            IgniteArgumentCheck.NotNull(reader, "reader");
+
+            int count = reader.ReadInt();
+
+            Debug.Assert(count > 0);
+
+            Attributes = new Dictionary(count);
+
+            while (count > 0)
+            {
+                string attrKey = reader.ReadString();
+                object attrVal = reader.ReadObject();
+
+                Debug.Assert(attrKey != null);
+
+                Attributes[attrKey] = attrVal;
+
+                count--;
+            }
+        }
+
+        /// 
+        /// Writes the instance to a writer.
+        /// 
+        /// Writer.
+        internal void Write(IBinaryRawWriter writer)
+        {
+            writer.WriteInt(Attributes.Count);
+
+            // Does not preserve ordering, it's fine.
+            foreach (KeyValuePair attr in Attributes)
+            {
+                writer.WriteString(attr.Key);
+                writer.WriteObject(attr.Value);
+            }
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd
index 0ab59ec78bea8..314b839312e45 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd
@@ -623,6 +623,18 @@
                                                 
                                             
                                         
+                                        
+                                          
+                                            Node filter to match selected nodes. Only predefined AttributeNodeFilter is supported.
+                                          
+                                          
+                                            
+                                              
+                                                Assembly-qualified type name.
+                                              
+                                            
+                                          
+                                        
                                         
                                             
                                                 Cache key configuration collection.

From 1da8c31375ca2f1334b03eb2840285b5071340f9 Mon Sep 17 00:00:00 2001
From: Denis Magda 
Date: Tue, 3 Nov 2020 08:16:46 -0800
Subject: [PATCH 019/110] Update README.adoc

ignite docs: added a troubleshooting section for the Jekyll installation
---
 docs/README.adoc | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/docs/README.adoc b/docs/README.adoc
index 856b993f9f45c..710f7847fc678 100644
--- a/docs/README.adoc
+++ b/docs/README.adoc
@@ -68,6 +68,46 @@ $ docker run -v "$PWD:/srv/jekyll" -p 4000:4000 jekyll/jekyll:latest jekyll s
 
 Open `http://localhost:4000/docs[window=_blank]` in your browser.
 
+=== Troubleshooting
+
+Below are some issues you might hit during an installation of the Jekyll environment or while building the tutorials.
+Let us know if you come across a new and found a workaround.
+
+==== MacOS: Issues with FFI library during Jekyll installation
+
+You should see an error trace similar to this: https://github.com/ffi/ffi/issues/653
+
+Attempt to fix the problem by following this sequence of commands (typically it's the last command only):
+
+[source, text]
+----
+brew reinstall libffi
+export LDFLAGS="-L/usr/local/opt/libffi/lib"
+export CPPFLAGS="-I/usr/local/opt/libffi/include"
+export PKG_CONFIG_PATH="/usr/local/opt/libffi/lib/pkgconfig"
+gem install --user-install bundler jekyll
+----
+
+==== MacOS: jekyll-asciidoc gem is not installed by default
+
+Try to follow this procedure to fix the issue.
+
+* Comment out the `rm -rf $tmp_dir` at the very end of the `build.sh` script, so that the temp folder is not deleted after the execution.
+* Run `build.sh` (fails with `Could not find gem 'jekyll-asciidoc'...` error).
+* Go to `tmp/web_site` folder.
+* Run `bundle install`.
+* Revert the `build.sh` script and run it again.
+
+==== MacOS: can't build project due to inability to load openssl
+
+You should see an error like this:
+
+`LoadError: dlopen(/Users/dmagda/.rbenv/versions/2.6.2/lib/ruby/2.6.0/x86_64-darwin18/digest/sha1.bundle, 9): Library not loaded: /usr/local/opt/openssl/lib/libssl.1.0.0.dylib
+   Referenced from: /Users/dmagda/.rbenv/versions/2.6.2/lib/ruby/2.6.0/x86_64-darwin18/digest/sha1.bundle`
+
+Try to upgrade Ruby, rbenv to the latest version (2.7.1) and then reinstall Jekyll. Use the official instructions:
+https://jekyllrb.com/docs/installation/
+
 == How to Contribute
 
 If you want to contribute to the documentation, add or modify the relevant page in the `docs/_docs` directory.

From 23b63b41fa3978e84e64541e0a215f695c30323c Mon Sep 17 00:00:00 2001
From: Denis Garus 
Date: Tue, 3 Nov 2020 22:41:37 +0300
Subject: [PATCH 020/110] IGNITE-13652 Wrong GitHub link for Apache Ignite With
 Spring Data/Example (#8420)

---
 docs/_docs/extensions-and-integrations/spring/spring-data.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/_docs/extensions-and-integrations/spring/spring-data.adoc b/docs/_docs/extensions-and-integrations/spring/spring-data.adoc
index ece798bd92b7d..65b1f23e43872 100644
--- a/docs/_docs/extensions-and-integrations/spring/spring-data.adoc
+++ b/docs/_docs/extensions-and-integrations/spring/spring-data.adoc
@@ -220,7 +220,7 @@ System.out.println("\n>>> Top Person with surname 'Smith': " +
 
 == Example
 
-The complete example is available on link:{githubUrl}/examples/src/main/java/org/apache/ignite/examples/springdata[GitHub, window=_blank].
+The complete example is available on link: https://github.com/apache/ignite-extensions/tree/master/modules/spring-data-2.0-ext/examples/main[GitHub, windows="_blank"]
 
 == Tutorial
 

From b986cf6f2250858b92e4eda52f9269077659229c Mon Sep 17 00:00:00 2001
From: Mark Andreev 
Date: Wed, 4 Nov 2020 12:52:24 +0300
Subject: [PATCH 021/110] IGNITE-13531: Code cleanup in Util classes (#8336)

---
 .../persistence/DistributedMetaStorageUtil.java   |  9 ++++++++-
 .../ml/clustering/kmeans/KMeansTrainer.java       |  9 +++++----
 .../ml/dataset/feature/ObjectHistogram.java       |  8 ++++----
 .../ml/dataset/impl/cache/util/ComputeUtils.java  | 15 +++++++++++----
 .../ml/inference/IgniteModelStorageUtil.java      |  9 ++++++++-
 .../main/java/org/apache/ignite/ml/math/Blas.java |  6 +++++-
 .../org/apache/ignite/ml/math/util/MapUtil.java   |  9 ++++++++-
 .../apache/ignite/ml/math/util/MatrixUtil.java    |  9 ++++++++-
 .../java/org/apache/ignite/ml/nn/Activators.java  |  6 +++---
 .../ml/preprocessing/imputing/ImputerTrainer.java |  7 +++----
 .../maxabsscaling/MaxAbsScalerTrainer.java        |  4 ++--
 .../minmaxscaling/MinMaxScalerTrainer.java        |  7 +++----
 .../java/org/apache/ignite/ml/util/Utils.java     |  9 ++++++++-
 13 files changed, 76 insertions(+), 31 deletions(-)

diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java
index 01be742421895..309b7a09ddcb3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageUtil.java
@@ -24,7 +24,14 @@
 import org.apache.ignite.marshaller.jdk.JdkMarshaller;
 
 /** */
-class DistributedMetaStorageUtil {
+final class DistributedMetaStorageUtil {
+    /**
+     *
+     */
+    private DistributedMetaStorageUtil() {
+        // No-op.
+    }
+
     /**
      * Common prefix for everything that is going to be written into {@link MetaStorage}. Something that has minimal
      * chance of collision with the existing keys.
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java
index 05f41b505955e..caec370e63472 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java
@@ -21,6 +21,7 @@
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import java.util.Random;
 import java.util.Set;
@@ -114,13 +115,13 @@ public class KMeansTrainer extends SingleLabelDatasetTrainer {
 
                 converged = true;
 
-                for (Integer ind : totalRes.sums.keySet()) {
-                    Vector massCenter = totalRes.sums.get(ind).times(1.0 / totalRes.counts.get(ind));
+                for (Map.Entry entry : totalRes.sums.entrySet()) {
+                    Vector massCenter = entry.getValue().times(1.0 / totalRes.counts.get(entry.getKey()));
 
-                    if (converged && distance.compute(massCenter, centers[ind]) > epsilon * epsilon)
+                    if (converged && distance.compute(massCenter, centers[entry.getKey()]) > epsilon * epsilon)
                         converged = false;
 
-                    newCentroids[ind] = massCenter;
+                    newCentroids[entry.getKey()] = massCenter;
                 }
 
                 iteration++;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java
index a66cb48b4523c..17c22ca1b2643 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java
@@ -60,9 +60,9 @@ public abstract class ObjectHistogram implements Histogram res = new TreeMap<>();
 
         double accum = 0.0;
-        for (Integer bucket : hist.keySet()) {
-            accum += hist.get(bucket);
-            res.put(bucket, accum);
+        for (Map.Entry entry : hist.entrySet()) {
+            accum += entry.getValue();
+            res.put(entry.getKey(), accum);
         }
 
         return res;
@@ -71,7 +71,7 @@ public abstract class ObjectHistogram implements Histogram plus(ObjectHistogram other) {
         ObjectHistogram res = newInstance();
-        addTo(this.hist, res.hist);
+        addTo(hist, res.hist);
         addTo(other.hist, res.hist);
         return res;
     }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java
index b53c27b6077f6..5b50c9428f709 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/cache/util/ComputeUtils.java
@@ -56,7 +56,14 @@
 /**
  * Util class that provides common methods to perform computations on top of the Ignite Compute Grid.
  */
-public class ComputeUtils {
+public final class ComputeUtils {
+    /**
+     *
+     */
+    private ComputeUtils() {
+        // No-op.
+    }
+
     /** Template of the key used to store partition {@code data} in local storage. */
     private static final String DATA_STORAGE_KEY_TEMPLATE = "part_data_storage_%s";
 
@@ -110,11 +117,11 @@ public static  Collection affinityCallWithRetries(Ignite ignite, Collectio
                 }
 
             // Collects results.
-            for (int part : futures.keySet())
+            for (Map.Entry> entry : futures.entrySet())
                 try {
-                    R res = futures.get(part).get();
+                    R res = entry.getValue().get();
                     results.add(res);
-                    completionFlags.set(part);
+                    completionFlags.set(entry.getKey());
                 }
                 catch (IgniteException ignore) {
                 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java
index e751ecc71aca8..238d90074ccf7 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/IgniteModelStorageUtil.java
@@ -42,7 +42,14 @@
 /**
  * Utils class that helps to operate with model storage and Ignite models.
  */
-public class IgniteModelStorageUtil {
+public final class IgniteModelStorageUtil {
+    /**
+     *
+     */
+    private IgniteModelStorageUtil(){
+        // No-op.
+    }
+
     /** Folder to be used to store Ignite models. */
     private static final String IGNITE_MDL_FOLDER = "/ignite_models";
 
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java
index 137c64c1afcac..69a349b5a81ef 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/Blas.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.ml.math;
 
+import java.io.Serializable;
 import java.util.Set;
 import com.github.fommil.netlib.BLAS;
 import com.github.fommil.netlib.F2jBLAS;
@@ -35,7 +36,10 @@
  * Useful subset of BLAS operations.
  * This class is based on 'BLAS' class from Apache Spark MLlib.
  */
-public class Blas {
+public class Blas implements Serializable {
+    /** */
+    private static final long serialVersionUID = 124309657712638021L;
+
     /** F2J implementation of BLAS. */
     private static transient BLAS f2jBlas = new F2jBLAS();
 
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java
index 9190901bf215e..c632d1bd9293a 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MapUtil.java
@@ -27,7 +27,14 @@
 /**
  * Some {@link Map} related utils.
  */
-public class MapUtil {
+public final class MapUtil {
+    /**
+     *
+     */
+    private MapUtil(){
+        // No-op.
+    }
+
     /** */
     public static > M mergeMaps(M m1, M m2, BinaryOperator op, Supplier mapSupplier) {
         return Stream.of(m1, m2)
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java
index 7cc7f276f8e14..21a5f0b1fe65d 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/util/MatrixUtil.java
@@ -32,7 +32,14 @@
 /**
  * Utility class for various matrix operations.
  */
-public class MatrixUtil {
+public final class MatrixUtil {
+    /**
+     *
+     */
+    private MatrixUtil() {
+        // No-op.
+    }
+
     /**
      * Create the like matrix with read-only matrices support.
      *
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java b/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java
index 4c34cd2677247..7665164121dae 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/nn/Activators.java
@@ -26,7 +26,7 @@ public class Activators {
     /**
      * Sigmoid activation function.
      */
-    public static IgniteDifferentiableDoubleToDoubleFunction SIGMOID = new IgniteDifferentiableDoubleToDoubleFunction() {
+    public static final IgniteDifferentiableDoubleToDoubleFunction SIGMOID = new IgniteDifferentiableDoubleToDoubleFunction() {
         /** {@inheritDoc} */
         @Override public double differential(double pnt) {
             double v = apply(pnt);
@@ -42,7 +42,7 @@ public class Activators {
     /**
      * Rectified linear unit (ReLU) activation function.
      */
-    public static IgniteDifferentiableDoubleToDoubleFunction RELU = new IgniteDifferentiableDoubleToDoubleFunction() {
+    public static final IgniteDifferentiableDoubleToDoubleFunction RELU = new IgniteDifferentiableDoubleToDoubleFunction() {
         /**
          * Differential of ReLU at pnt. Formally, function is not differentiable at 0, but we let differential at 0 be 0.
          *
@@ -62,7 +62,7 @@ public class Activators {
     /**
      * Linear unit activation function.
      */
-    public static IgniteDifferentiableDoubleToDoubleFunction LINEAR = new IgniteDifferentiableDoubleToDoubleFunction() {
+    public static final IgniteDifferentiableDoubleToDoubleFunction LINEAR = new IgniteDifferentiableDoubleToDoubleFunction() {
         /** {@inheritDoc} */
         @Override public double differential(double pnt) {
             return 1.0;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java
index e33504eae0509..449bb2228c5d8 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputing/ImputerTrainer.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.ml.preprocessing.imputing;
 
+import java.util.Arrays;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
@@ -442,8 +443,7 @@ private int[] updateTheCounts(LabeledVector row, int[] counts) {
     private double[] updateTheMins(LabeledVector row, double[] mins) {
         if (mins == null) {
             mins = new double[row.size()];
-            for (int i = 0; i < mins.length; i++)
-                mins[i] = Double.POSITIVE_INFINITY;
+            Arrays.fill(mins, Double.POSITIVE_INFINITY);
         }
 
         else
@@ -468,8 +468,7 @@ private double[] updateTheMins(LabeledVector row, double[] mins) {
     private double[] updateTheMaxs(LabeledVector row, double[] maxs) {
         if (maxs == null) {
             maxs = new double[row.size()];
-            for (int i = 0; i < maxs.length; i++)
-                maxs[i] = Double.NEGATIVE_INFINITY;
+            Arrays.fill(maxs, Double.NEGATIVE_INFINITY);
         }
 
         else
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java
index b7678f9c7bc35..978fcb44bdfa0 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.ml.preprocessing.maxabsscaling;
 
+import java.util.Arrays;
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.dataset.UpstreamEntry;
@@ -50,8 +51,7 @@ public class MaxAbsScalerTrainer implements PreprocessingTrainer {
 
                     if (maxAbs == null) {
                         maxAbs = new double[row.size()];
-                        for (int i = 0; i < maxAbs.length; i++)
-                            maxAbs[i] = .0;
+                        Arrays.fill(maxAbs, .0);
                     }
                     else
                         assert maxAbs.length == row.size() : "Base preprocessor must return exactly " + maxAbs.length
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java
index 54a6d59d98ee0..272dab11587c6 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/minmaxscaling/MinMaxScalerTrainer.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.ml.preprocessing.minmaxscaling;
 
+import java.util.Arrays;
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.dataset.PartitionContextBuilder;
@@ -53,8 +54,7 @@ public class MinMaxScalerTrainer implements PreprocessingTrainer {
 
                     if (min == null) {
                         min = new double[row.size()];
-                        for (int i = 0; i < min.length; i++)
-                            min[i] = Double.MAX_VALUE;
+                        Arrays.fill(min, Double.MAX_VALUE);
                     }
                     else
                         assert min.length == row.size() : "Base preprocessor must return exactly " + min.length
@@ -62,8 +62,7 @@ public class MinMaxScalerTrainer implements PreprocessingTrainer {
 
                     if (max == null) {
                         max = new double[row.size()];
-                        for (int i = 0; i < max.length; i++)
-                            max[i] = -Double.MAX_VALUE;
+                        Arrays.fill(max, -Double.MAX_VALUE);
                     }
                     else
                         assert max.length == row.size() : "Base preprocessor must return exactly " + min.length
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java b/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java
index 8100f93799fbf..333ade4175497 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/util/Utils.java
@@ -34,7 +34,14 @@
 /**
  * Class with various utility methods.
  */
-public class Utils {
+public final class Utils {
+    /**
+     *
+     */
+    private Utils(){
+        // No-op.
+    }
+
     /**
      * Perform deep copy of an object.
      *

From f806b014c02f55490e09e01673aa729e8b67e6b2 Mon Sep 17 00:00:00 2001
From: zstan 
Date: Thu, 5 Nov 2020 17:50:20 +0300
Subject: [PATCH 022/110] IGNITE-13658 Introduce volatileDsMemPlc for volatile
 data structures caches

Implement checking for volatile data region feature.
Always start and use volatile data region, since only a subset of nodes may be persistent.

Fixes #8423.

Signed-off-by: Ilya Kasnacheev 
---
 .../GridCommandHandlerClusterByClassTest.java |  18 +-
 .../ignite/internal/IgniteFeatures.java       |   3 +
 .../dht/GridDhtTopologyFutureAdapter.java     |   9 +-
 .../IgniteCacheDatabaseSharedManager.java     |  34 +++-
 .../DataStructuresProcessor.java              |  29 ++-
 .../internal/GridNodeMetricsLogSelfTest.java  |   6 +-
 ...dCacheConfigurationValidationSelfTest.java |  10 +
 .../OutOfMemoryVolatileRegionTest.java        | 175 ++++++++++++++++++
 .../MemoryPolicyInitializationTest.java       |  12 +-
 ...gniteCacheDataStructuresSelfTestSuite.java |   2 +
 .../Cache/DataRegionMetricsTest.cs            |  13 +-
 .../Cache/MemoryMetricsTest.cs                |  10 +-
 12 files changed, 293 insertions(+), 28 deletions(-)
 create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/OutOfMemoryVolatileRegionTest.java

diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java
index 4403e38ed3411..a8dc39878157c 100644
--- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java
+++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerClusterByClassTest.java
@@ -891,15 +891,18 @@ public void testCacheIdleVerifyDumpForCorruptedDataOnSystemCache() throws Except
         corruptDataEntry(storedSysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("sq" + parts / 2,
             "default-ds-group"), false, true);
 
-        CacheGroupContext memorySysCacheCtx = ignite.context().cache().cacheGroup(CU.cacheId("default-volatile-ds-group"));
+        CacheGroupContext memoryVolatileCacheCtx = ignite.context().cache().cacheGroup(CU.cacheId(
+            "default-volatile-ds-group@volatileDsMemPlc"));
 
-        assertNotNull(memorySysCacheCtx);
+        assertNotNull(memoryVolatileCacheCtx);
+        assertEquals("volatileDsMemPlc", memoryVolatileCacheCtx.dataRegion().config().getName());
+        assertEquals(false, memoryVolatileCacheCtx.dataRegion().config().isPersistenceEnabled());
 
-        corruptDataEntry(memorySysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s0",
-            "default-volatile-ds-group"), true, false);
+        corruptDataEntry(memoryVolatileCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s0",
+            "default-volatile-ds-group@volatileDsMemPlc"), true, false);
 
-        corruptDataEntry(memorySysCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s" + parts / 2,
-            "default-volatile-ds-group"), false, true);
+        corruptDataEntry(memoryVolatileCacheCtx.caches().get(0), new GridCacheInternalKeyImpl("s" + parts / 2,
+            "default-volatile-ds-group@volatileDsMemPlc"), false, true);
 
         assertEquals(EXIT_CODE_OK, execute("--cache", "idle_verify", "--dump", "--cache-filter", "SYSTEM"));
 
@@ -910,7 +913,8 @@ public void testCacheIdleVerifyDumpForCorruptedDataOnSystemCache() throws Except
 
             U.log(log, dumpWithConflicts);
 
-            assertContains(log, dumpWithConflicts, "found 4 conflict partitions: [counterConflicts=2, " +
+            // Non-persistent caches do not have counter conflicts
+            assertContains(log, dumpWithConflicts, "found 3 conflict partitions: [counterConflicts=1, " +
                 "hashConflicts=2]");
         }
         else
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java
index e1f09e5b7f59a..f9704aecadc30 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java
@@ -108,6 +108,9 @@ public enum IgniteFeatures {
     /** Distributed change timeout for dump long operations. */
     DISTRIBUTED_CHANGE_LONG_OPERATIONS_DUMP_TIMEOUT(30),
 
+    /** New region for volatile data. */
+    VOLATILE_DATA_STRUCTURES_REGION(33),
+
     /** Check secondary indexes inline size on join/by control utility request. */
     CHECK_INDEX_INLINE_SIZES(36),
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java
index b5e55906af35c..3f637c525200e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java
@@ -33,7 +33,6 @@
 import static java.lang.String.format;
 import static org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_ALL;
 import static org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_SAFE;
-import static org.apache.ignite.internal.processors.cache.GridCacheProcessor.CLUSTER_READ_ONLY_MODE_ERROR_MSG_FORMAT;
 import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isSystemCache;
 
 /**
@@ -41,6 +40,10 @@
  */
 public abstract class GridDhtTopologyFutureAdapter extends GridFutureAdapter
     implements GridDhtTopologyFuture {
+    /** Error message format if cluster in read-only mode and write operation tries to execute.*/
+    private static final String CLUSTER_READ_ONLY_ERROR_MSG =
+        "Failed to perform cache operation (cluster is in read-only mode) [cacheGrp=%s, cache=%s]";
+
     /** Cache groups validation results. */
     protected volatile Map grpValidRes = Collections.emptyMap();
 
@@ -85,7 +88,7 @@ protected final CacheGroupValidation validateCacheGroup(CacheGroupContext grp, C
 
         if (!clusterIsActive) {
             return new CacheInvalidStateException(
-                    "Failed to perform cache operation (cluster is not activated): " + cctx.name());
+                "Failed to perform cache operation (cluster is not activated): " + cctx.name());
         }
 
         if (cctx.cache() == null)
@@ -96,7 +99,7 @@ protected final CacheGroupValidation validateCacheGroup(CacheGroupContext grp, C
 
         if (cctx.shared().readOnlyMode() && !read && !isSystemCache(cctx.name())) {
             return new CacheInvalidStateException(new IgniteClusterReadOnlyException(
-                format(CLUSTER_READ_ONLY_MODE_ERROR_MSG_FORMAT, "cache", cctx.group().name(), cctx.name())
+                format(CLUSTER_READ_ONLY_ERROR_MSG, grp.name(), cctx.name())
             ));
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java
index 5f937342dc7a4..1d775669c07b8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java
@@ -103,6 +103,7 @@
 import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_WAL_HISTORY_SIZE;
 import static org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog.TX_LOG_CACHE_NAME;
 import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.METASTORE_DATA_REGION_NAME;
+import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.VOLATILE_DATA_REGION_NAME;
 
 /**
  *
@@ -363,6 +364,8 @@ protected void initDataRegions(DataStorageConfiguration memCfg) throws IgniteChe
     protected void initDataRegions0(DataStorageConfiguration memCfg) throws IgniteCheckedException {
         DataRegionConfiguration[] dataRegionCfgs = memCfg.getDataRegionConfigurations();
 
+        boolean persistenceEnabled = CU.isPersistenceEnabled(memCfg);
+
         if (dataRegionCfgs != null) {
             for (DataRegionConfiguration dataRegionCfg : dataRegionCfgs)
                 addDataRegion(memCfg, dataRegionCfg, dataRegionCfg.isPersistenceEnabled());
@@ -379,9 +382,18 @@ protected void initDataRegions0(DataStorageConfiguration memCfg) throws IgniteCh
             createSystemDataRegion(
                 memCfg.getSystemRegionInitialSize(),
                 memCfg.getSystemRegionMaxSize(),
-                CU.isPersistenceEnabled(memCfg)
+                persistenceEnabled
+            ),
+            persistenceEnabled
+        );
+
+        addDataRegion(
+            memCfg,
+            createVolatileDataRegion(
+                memCfg.getSystemRegionInitialSize(),
+                memCfg.getSystemRegionMaxSize()
             ),
-            CU.isPersistenceEnabled(memCfg)
+            false
         );
 
         for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext()))
@@ -533,8 +545,24 @@ private DataRegionConfiguration createSystemDataRegion(
     }
 
     /**
-     * Validation of memory configuration.
+     * @param volatileCacheInitSize Initial size of PageMemory to be created for volatile cache.
+     * @param volatileCacheMaxSize Maximum size of PageMemory to be created for volatile cache.
      *
+     * @return {@link DataRegionConfiguration configuration} of DataRegion for volatile cache.
+     */
+    private DataRegionConfiguration createVolatileDataRegion(long volatileCacheInitSize, long volatileCacheMaxSize) {
+        DataRegionConfiguration res = new DataRegionConfiguration();
+
+        res.setName(VOLATILE_DATA_REGION_NAME);
+        res.setInitialSize(volatileCacheInitSize);
+        res.setMaxSize(volatileCacheMaxSize);
+        res.setPersistenceEnabled(false);
+        res.setLazyMemoryAllocation(true);
+
+        return res;
+    }
+
+    /**
      * @param memCfg configuration to validate.
      * @throws IgniteCheckedException In case of validation violation.
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java
index e72692d6de314..232f5fc54e1e2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java
@@ -105,6 +105,9 @@
  * Manager of data structures.
  */
 public final class DataStructuresProcessor extends GridProcessorAdapter implements IgniteChangeGlobalStateSupport {
+    /** DataRegionConfiguration name reserved for volatile caches. */
+    public static final String VOLATILE_DATA_REGION_NAME = "volatileDsMemPlc";
+
     /** */
     public static final String DEFAULT_VOLATILE_DS_GROUP_NAME = "default-volatile-ds-group";
 
@@ -359,8 +362,9 @@ public static boolean isDataStructureCache(String cacheName) {
      * @return {@code True} if group name is reserved to store data structures.
      */
     public static boolean isReservedGroup(@Nullable String grpName) {
-        return DEFAULT_DS_GROUP_NAME.equals(grpName) ||
-            DEFAULT_VOLATILE_DS_GROUP_NAME.equals(grpName);
+        return grpName != null &&
+            (DEFAULT_DS_GROUP_NAME.equals(grpName) ||
+            grpName.startsWith(DEFAULT_VOLATILE_DS_GROUP_NAME));
     }
 
     /**
@@ -511,11 +515,18 @@ public final IgniteAtomicLong atomicLong(final String name,
             cfg = dfltAtomicCfg;
         }
 
+        String dataRegionName = null;
         final String grpName;
 
-        if (type.isVolatile())
-            grpName = DEFAULT_VOLATILE_DS_GROUP_NAME;
-        else if (cfg.getGroupName() != null)
+        if (type.isVolatile()) {
+            String volatileGrpName = DEFAULT_VOLATILE_DS_GROUP_NAME;
+
+            dataRegionName = VOLATILE_DATA_REGION_NAME;
+
+            volatileGrpName += "@" + dataRegionName;
+
+            grpName = volatileGrpName;
+        } else if (cfg.getGroupName() != null)
             grpName = cfg.getGroupName();
         else
             grpName = DEFAULT_DS_GROUP_NAME;
@@ -528,7 +539,7 @@ else if (cfg.getGroupName() != null)
             if (!create && ctx.cache().cacheDescriptor(cacheName) == null)
                 return null;
 
-            ctx.cache().dynamicStartCache(cacheConfiguration(cfg, cacheName, grpName),
+            ctx.cache().dynamicStartCache(cacheConfiguration(cfg, cacheName, grpName, dataRegionName),
                 cacheName,
                 null,
                 CacheType.DATA_STRUCTURES,
@@ -888,9 +899,12 @@ private boolean isCollocated(CollectionConfiguration cfg) {
      * @param cfg Atomic configuration.
      * @param name Cache name.
      * @param grpName Group name.
+     * @param dataRegionName Name of data region for this cache.
+     *
      * @return Cache configuration.
      */
-    private CacheConfiguration cacheConfiguration(AtomicConfiguration cfg, String name, String grpName) {
+    private CacheConfiguration cacheConfiguration(AtomicConfiguration cfg, String name, String grpName,
+        String dataRegionName) {
         CacheConfiguration ccfg = new CacheConfiguration();
 
         ccfg.setName(name);
@@ -901,6 +915,7 @@ private CacheConfiguration cacheConfiguration(AtomicConfiguration cfg, String na
         ccfg.setCacheMode(cfg.getCacheMode());
         ccfg.setNodeFilter(CacheConfiguration.ALL_NODES);
         ccfg.setAffinity(cfg.getAffinity());
+        ccfg.setDataRegionName(dataRegionName);
 
         if (cfg.getCacheMode() == PARTITIONED)
             ccfg.setBackups(cfg.getBackups());
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java
index 35579949c3730..57a159941e933 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/GridNodeMetricsLogSelfTest.java
@@ -25,6 +25,7 @@
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.configuration.ExecutorConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.testframework.GridStringLogger;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
@@ -163,11 +164,14 @@ protected void checkDataRegionsMetrics(String logOutput) {
             } else
                 assertTrue(F.isEmpty(matcher.group("total")));
 
-            regions.add(matcher.group("name").trim());
+            String regName = matcher.group("name").trim();
+
+            regions.add(regName);
         }
 
         Set expRegions = grid(0).context().cache().context().database().dataRegions().stream()
             .map(v -> v.config().getName().trim())
+            .filter(regName -> !DataStructuresProcessor.VOLATILE_DATA_REGION_NAME.equals(regName))
             .collect(Collectors.toSet());
 
         assertFalse("No data regions in the log.", regions.isEmpty());
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java
index b57c208084203..2a084f591fe91 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationValidationSelfTest.java
@@ -63,6 +63,10 @@ public class GridCacheConfigurationValidationSelfTest extends GridCommonAbstract
     private static final String RESERVED_FOR_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME =
             "reservedForDsCacheGroupNameCheckFails";
 
+    /** */
+    private static final String RESERVED_FOR_VOLATILE_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME =
+        "reservedForVolatileDsCacheGroupNameCheckFails";
+
     /** */
     private static final String CACHE_NAME_WITH_SPECIAL_CHARACTERS_REPLICATED = "--â„–=+:(replicated)";
 
@@ -135,6 +139,9 @@ else if (igniteInstanceName.contains(DUP_DFLT_CACHES_IGNITE_INSTANCE_NAME))
         if (igniteInstanceName.contains(RESERVED_FOR_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME))
             namedCacheCfg.setGroupName("default-ds-group");
 
+        if (igniteInstanceName.contains(RESERVED_FOR_VOLATILE_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME))
+            namedCacheCfg.setGroupName("default-volatile-ds-group@volatileDsMemPlc");
+
         return cfg;
     }
 
@@ -178,6 +185,9 @@ public void testCacheAttributesValidation() throws Exception {
             // This grid should not start.
             startInvalidGrid(RESERVED_FOR_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME);
 
+            // This grid should not start.
+            startInvalidGrid(RESERVED_FOR_VOLATILE_DATASTRUCTURES_CACHE_GROUP_NAME_IGNITE_INSTANCE_NAME);
+
             // This grid will start normally.
             startGrid(1);
         }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/OutOfMemoryVolatileRegionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/OutOfMemoryVolatileRegionTest.java
new file mode 100644
index 0000000000000..58e6290d4fb0c
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/OutOfMemoryVolatileRegionTest.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.datastructures;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.failure.AbstractFailureHandler;
+import org.apache.ignite.failure.FailureContext;
+import org.apache.ignite.internal.mem.IgniteOutOfMemoryException;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.junit.Test;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+
+/**
+ * Tests behavior of volatile data structures and regular caches
+ * when {@link IgniteOutOfMemoryException} is thrown.
+ */
+public class OutOfMemoryVolatileRegionTest extends GridCommonAbstractTest {
+    /** Minimal region size. */
+    private static final long DATA_REGION_SIZE = 15L * 1024 * 1024;
+
+    /** */
+    private static final int ATTEMPTS_NUM = 3;
+
+    /** Failure handler triggered. */
+    private static volatile boolean failure;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setDataStorageConfiguration(new DataStorageConfiguration()
+            .setPageSize(4096)
+            .setSystemRegionInitialSize(DATA_REGION_SIZE)
+            .setSystemRegionMaxSize(DATA_REGION_SIZE)
+            .setDefaultDataRegionConfiguration(
+                new DataRegionConfiguration()
+                    .setPersistenceEnabled(true)
+                    .setMetricsEnabled(true)));
+
+        cfg.setFailureHandler(new AbstractFailureHandler() {
+            /** {@inheritDoc} */
+            @Override protected boolean handle(Ignite ignite, FailureContext failureCtx) {
+                failure = true;
+
+                // Do not invalidate a node context.
+                return false;
+            }
+        });
+
+        cfg.setCacheConfiguration(cacheConfiguration(ATOMIC), cacheConfiguration(TRANSACTIONAL));
+
+        return cfg;
+    }
+
+    /**
+     * Creates a new cache configuration with the given cache atomicity mode.
+     *
+     * @param mode Cache atomicity mode.
+     * @return Cache configuration.
+     */
+    private CacheConfiguration cacheConfiguration(CacheAtomicityMode mode) {
+        return new CacheConfiguration(mode.name())
+            .setAtomicityMode(mode)
+            .setAffinity(new RendezvousAffinityFunction(false, 32));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        cleanPersistenceDir();
+
+        startGrid(0);
+        startGrid(1);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    @Test
+    public void testLoadAndClearAtomicCache() throws Exception {
+        loadAndClearCache(ATOMIC, ATTEMPTS_NUM);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    @Test
+    public void testLoadAndClearTransactionalCache() throws Exception {
+        loadAndClearCache(TRANSACTIONAL, ATTEMPTS_NUM);
+    }
+
+    /**
+     * Creates a new cache with the given atomicity node and tries to load & clear it in a loop.
+     * It is assumed that {@link IgniteOutOfMemoryException} is thrown during loading the cache,
+     * however {@link IgniteCache#clear()} should return the cache to the operable state.
+     *
+     * @param mode Cache atomicity mode.
+     * @param attempts Number of attempts to load and clear the cache.
+     */
+    private void loadAndClearCache(CacheAtomicityMode mode, int attempts) {
+        grid(0).cluster().active(true);
+
+        failure = false;
+
+        IgniteCache cache = grid(0).cache(mode.name());
+
+        for (int i = 0; i < attempts; ++i) {
+            for (int key = 0; key < 5_000; ++key)
+                cache.put(key, new byte[40]);
+
+            cache.clear();
+        }
+
+        assertFalse("Failure handler should not be notified", failure);
+
+        try {
+            for (int j = 0; j < 100000; j++) {
+                grid(0).reentrantLock("l" + getClass().getName() + j,
+                    j % 2 == 0, j % 3 == 0, true);
+                grid(1).semaphore("s" + getClass().getName() + j,
+                    1 + (j % 7), j % 3 == 0, true);
+                grid(0).countDownLatch("c" + getClass().getName() + j,
+                    1 + (j % 13), j % 2 == 0, true);
+            }
+
+            fail("OutOfMemoryException hasn't been thrown");
+        }
+        catch (Exception e) {
+            if (!X.hasCause(e, IgniteOutOfMemoryException.class))
+                fail("Unexpected exception" + e);
+
+            log.info("Expected exception, n: " + e);
+        }
+
+        assertTrue("Failure handler wasn't notified", failure);
+
+        for (int i = 0; i < attempts; ++i) {
+            for (int key = 0; key < 5_000; ++key)
+                cache.put(key, new byte[40]);
+
+            cache.clear();
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java
index 1864c0b53886b..cee869f66e93e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java
@@ -30,6 +30,7 @@
 import org.junit.Test;
 
 import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEM_PLC_DEFAULT_NAME;
+import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.VOLATILE_DATA_REGION_NAME;
 
 /**
  *
@@ -77,7 +78,7 @@ public void testNoConfigProvided() throws Exception {
 
         Collection allMemPlcs = ignite.context().cache().context().database().dataRegions();
 
-        assertEquals(3, allMemPlcs.size());
+        assertEquals(4, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
     }
@@ -94,7 +95,7 @@ public void testCustomConfigNoDefault() throws Exception {
 
         Collection allMemPlcs = ignite.context().cache().context().database().dataRegions();
 
-        assertEquals(4, allMemPlcs.size());
+        assertEquals(5, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
 
@@ -116,7 +117,7 @@ public void testCustomConfigOverridesDefault() throws Exception {
 
         Collection allMemPlcs = dbMgr.dataRegions();
 
-        assertEquals(3, allMemPlcs.size());
+        assertEquals(4, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
 
@@ -141,7 +142,7 @@ public void testCustomConfigOverridesDefaultNameAndDeclaresDefault() throws Exce
 
         Collection allMemPlcs = dbMgr.dataRegions();
 
-        assertEquals(4, allMemPlcs.size());
+        assertEquals(5, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
 
@@ -290,6 +291,9 @@ private void verifyDefaultAndSystemMemoryPolicies(Collection allMemP
 
         assertTrue("System memory policy is not presented",
                 isMemoryPolicyPresented(allMemPlcs, IgniteCacheDatabaseSharedManager.SYSTEM_DATA_REGION_NAME));
+
+        assertTrue("Volatile memory policy is not presented",
+                isMemoryPolicyPresented(allMemPlcs, VOLATILE_DATA_REGION_NAME));
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java
index 0e09d4cb62e01..4a1abc1350047 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java
@@ -41,6 +41,7 @@
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteQueueClusterReadOnlyTest;
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteSequenceInternalCleanupTest;
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteSetClusterReadOnlyTest;
+import org.apache.ignite.internal.processors.cache.datastructures.OutOfMemoryVolatileRegionTest;
 import org.apache.ignite.internal.processors.cache.datastructures.SemaphoreFailoverNoWaitingAcquirerTest;
 import org.apache.ignite.internal.processors.cache.datastructures.SemaphoreFailoverSafeReleasePermitsTest;
 import org.apache.ignite.internal.processors.cache.datastructures.local.GridCacheLocalAtomicQueueApiSelfTest;
@@ -134,6 +135,7 @@
     IgniteReplicatedLockSelfTest.class,
     IgniteCacheAtomicReplicatedNodeRestartSelfTest.class,
     GridCacheReplicatedQueueRemoveSelfTest.class,
+    OutOfMemoryVolatileRegionTest.class,
 
     GridCachePartitionedSequenceApiSelfTest.class,
     GridCachePartitionedSequenceMultiNodeSelfTest.class,
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs
index d0b7332cec6ae..4c26d0ab6edc0 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs
@@ -73,7 +73,8 @@ public void TestMemoryMetrics()
                     RegionWithMetrics,
                     RegionWithMetricsAndPersistence,
                     "sysMemPlc",
-                    "TxLog"
+                    "TxLog",
+                    "volatileDsMemPlc"
                 },
                 names,
                 string.Join(", ", names));
@@ -96,11 +97,15 @@ public void TestMemoryMetrics()
                 memMetrics.PhysicalMemoryPages * (memMetrics.PageSize + PageOverhead));
             Assert.Greater(memMetrics.OffHeapSize, memMetrics.PhysicalMemoryPages);
             Assert.Greater(memMetrics.OffheapUsedSize, memMetrics.PhysicalMemoryPages);
-            
+
             var sysMetrics = metrics[4];
             Assert.AreEqual("sysMemPlc", sysMetrics.Name);
             AssertMetricsAreEmpty(sysMetrics);
 
+            var volatileMetrics = metrics[6];
+            Assert.AreEqual("volatileDsMemPlc", volatileMetrics.Name);
+            AssertMetricsAreEmpty(volatileMetrics);
+
             // Metrics by name.
             // In-memory region.
             emptyMetrics = ignite.GetDataRegionMetrics(RegionNoMetrics);
@@ -120,6 +125,10 @@ public void TestMemoryMetrics()
             Assert.AreEqual("sysMemPlc", sysMetrics.Name);
             AssertMetricsAreEmpty(sysMetrics);
 
+            volatileMetrics = ignite.GetDataRegionMetrics("volatileDsMemPlc");
+            Assert.AreEqual("volatileDsMemPlc", volatileMetrics.Name);
+            AssertMetricsAreEmpty(volatileMetrics);
+
             // Invalid name.
             Assert.IsNull(ignite.GetDataRegionMetrics("boo"));
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs
index 352c1e9dfaa61..a2b1c872eee39 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs
@@ -44,7 +44,7 @@ public void TestMemoryMetrics()
 
             // Verify metrics.
             var metrics = ignite.GetMemoryMetrics().OrderBy(x => x.Name).ToArray();
-            Assert.AreEqual(4, metrics.Length);  // two defined plus system and plus TxLog.
+            Assert.AreEqual(5, metrics.Length);  // two defined plus system, TxLog and volatile.
 
             var emptyMetrics = metrics[0];
             Assert.AreEqual(MemoryPolicyNoMetrics, emptyMetrics.Name);
@@ -62,6 +62,14 @@ public void TestMemoryMetrics()
             Assert.AreEqual("sysMemPlc", sysMetrics.Name);
             AssertMetricsAreEmpty(sysMetrics);
 
+            var txLogMetrics = metrics[3];
+            Assert.AreEqual("TxLog", txLogMetrics.Name);
+            AssertMetricsAreEmpty(txLogMetrics);
+
+            var volatileMetrics = metrics[4];
+            Assert.AreEqual("volatileDsMemPlc", volatileMetrics.Name);
+            AssertMetricsAreEmpty(volatileMetrics);
+
             // Metrics by name.
             emptyMetrics = ignite.GetMemoryMetrics(MemoryPolicyNoMetrics);
             Assert.AreEqual(MemoryPolicyNoMetrics, emptyMetrics.Name);

From 8ac005f94b6f15a82cd7b9dc3b5ced2352e389eb Mon Sep 17 00:00:00 2001
From: Aleksey Plekhanov 
Date: Thu, 5 Nov 2020 17:57:41 +0300
Subject: [PATCH 023/110] IGNITE-13676 Java thin client: Fix message read after
 SECURITY_VIOLATION error - Fixes #8428.

Signed-off-by: Aleksey Plekhanov 
---
 .../ignite/internal/client/thin/TcpClientChannel.java    | 6 ++++--
 .../security/client/ThinClientPermissionCheckTest.java   | 9 +++++++++
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java
index 4f3ee40985122..c3576720092b9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java
@@ -461,9 +461,11 @@ private void processNextMessage() throws ClientProtocolError, ClientConnectionEx
             if (msgSize > hdrSize)
                 res = dataInput.spinRead(msgSize - hdrSize);
         }
-        else if (status == ClientStatus.SECURITY_VIOLATION)
+        else if (status == ClientStatus.SECURITY_VIOLATION) {
+            dataInput.spinRead(msgSize - hdrSize); // Read message to the end.
+
             err = new ClientAuthorizationException();
-        else {
+        } else {
             resIn = new BinaryHeapInputStream(dataInput.spinRead(msgSize - hdrSize));
 
             String errMsg = ClientUtils.createBinaryReader(null, resIn).readString();
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java
index 1230aba3ebca5..152e2570a38c6 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/security/client/ThinClientPermissionCheckTest.java
@@ -262,6 +262,15 @@ public void testSysOperation() throws Exception {
             assertThrowsWithCause(() -> runOperation(CLIENT, op), ClientAuthorizationException.class);
     }
 
+    /** */
+    @Test
+    public void testAllowedOperationAfterSecurityViolation() throws Exception {
+        try (IgniteClient client = startClient(CLIENT_READ)) {
+            assertThrowsWithCause(() -> client.cache(CACHE).put("key", "value"), ClientAuthorizationException.class);
+            assertNull(client.cache(CACHE).get("key"));
+        }
+    }
+
     /**
      * Gets all operations.
      *

From 3e24202e51700a48e21774b35586a7fb3a50c8fe Mon Sep 17 00:00:00 2001
From: Slava Koptilin 
Date: Fri, 6 Nov 2020 16:17:05 +0300
Subject: [PATCH 024/110] IGNITE-13664 Quoting of File.separator to support
 Windows-style FS separators. - Fixes #8427.

Signed-off-by: Sergey Chugunov 
---
 .../cache/persistence/GridCacheDatabaseSharedManager.java     | 3 ++-
 .../ignite/internal/visor/persistence/PersistenceTask.java    | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java
index 0789a0cd9f9f6..37bd4642eb762 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java
@@ -50,6 +50,7 @@
 import java.util.function.Consumer;
 import java.util.function.Predicate;
 import java.util.function.ToLongFunction;
+import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 
 import org.apache.ignite.DataRegionMetricsProvider;
@@ -1595,7 +1596,7 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException {
 
             mntcRegistry.registerWorkflowCallback(CORRUPTED_DATA_FILES_MNTC_TASK_NAME,
                 new CorruptedPdsMaintenanceCallback(workDir,
-                    Arrays.asList(mntcTask.parameters().split(File.separator)))
+                    Arrays.asList(mntcTask.parameters().split(Pattern.quote(File.separator))))
             );
 
             return;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java
index 1ac23f4e2c0d7..8f18115f5a55f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java
@@ -28,8 +28,8 @@
 import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.regex.Pattern;
 import java.util.stream.Collectors;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.configuration.DataStorageConfiguration;
@@ -359,7 +359,7 @@ private PersistenceTaskResult info() {
         private List corruptedCacheDirectories(MaintenanceTask task) {
             String params = task.parameters();
 
-            String[] namesArr = params.split(File.separator);
+            String[] namesArr = params.split(Pattern.quote(File.separator));
 
             return Arrays.asList(namesArr);
         }

From 7c4121f6a427f15af3083186bcb4e55ce554e5a2 Mon Sep 17 00:00:00 2001
From: Vladislav Pyatkov 
Date: Sun, 8 Nov 2020 20:32:41 +0300
Subject: [PATCH 025/110] IGNITE-13594 Fixed an issue where compute jobs could
 not load user-defined classes through peer class loading.

Signed-off-by: Slava Koptilin 
---
 .../ignite/internal/GridJobSessionImpl.java   |  10 +
 .../internal/binary/BinaryEnumObjectImpl.java |   7 +-
 .../internal/binary/BinaryObjectImpl.java     |  13 +-
 .../binary/BinaryObjectOffheapImpl.java       |   7 +-
 .../GridDeploymentPerVersionStore.java        |   2 +-
 .../processors/cache/CacheLazyEntry.java      |  58 ++---
 .../processors/cache/CacheObject.java         |  10 +
 .../cache/CacheObjectByteArrayImpl.java       |   5 +
 .../processors/cache/CacheObjectContext.java  |   6 +-
 .../processors/cache/CacheObjectImpl.java     |  23 +-
 .../processors/cache/CacheObjectUtils.java    |  45 ++--
 .../processors/cache/GridCacheAdapter.java    |  20 +-
 .../processors/cache/GridCacheContext.java    |  28 ++-
 .../cache/GridCacheDeploymentManager.java     | 155 ++++----------
 .../cache/GridCacheEventManager.java          |  12 +-
 .../processors/cache/GridCacheMapEntry.java   |  20 +-
 .../processors/cache/GridCacheReturn.java     |  41 +++-
 .../cache/IgniteCacheOffheapManagerImpl.java  |   4 +-
 .../processors/cache/KeyCacheObjectImpl.java  |   5 +
 .../GridDistributedTxRemoteAdapter.java       |   2 +-
 .../dht/CacheDistributedGetFutureAdapter.java |   6 +
 .../dht/GridDhtTxAbstractEnlistFuture.java    |   5 +
 .../dht/GridDhtTxEnlistFuture.java            |   3 +-
 .../dht/GridDhtTxPrepareFuture.java           |  33 ++-
 .../dht/GridPartitionedGetFuture.java         |   6 +-
 .../dht/GridPartitionedSingleGetFuture.java   |  13 +-
 .../dht/atomic/GridDhtAtomicCache.java        |  14 +-
 .../GridNearAtomicAbstractUpdateFuture.java   |  19 +-
 .../GridNearAtomicSingleUpdateFuture.java     |   2 +-
 .../atomic/GridNearAtomicUpdateFuture.java    |   6 +-
 .../dht/colocated/GridDhtColocatedCache.java  |   6 +-
 .../distributed/near/GridNearGetFuture.java   |   9 +-
 .../distributed/near/GridNearTxLocal.java     |  55 +++--
 .../GridNearReadRepairCheckOnlyFuture.java    |   6 +-
 .../consistency/GridNearReadRepairFuture.java |   6 +-
 .../local/atomic/GridLocalAtomicCache.java    |  16 +-
 .../cache/query/GridCacheQueryManager.java    |   8 +-
 .../continuous/CacheContinuousQueryEvent.java |   6 +-
 .../store/GridCacheStoreManagerAdapter.java   |  14 +-
 .../cache/transactions/IgniteTxAdapter.java   |  17 +-
 .../transactions/IgniteTxLocalAdapter.java    |  13 +-
 .../GridCacheLazyPlainVersionedEntry.java     |   4 +-
 .../UserCacheObjectByteArrayImpl.java         |   7 +-
 .../datastreamer/DataStreamerEntry.java       |   4 +-
 .../processors/job/GridJobProcessor.java      |  18 +-
 .../ignite/internal/util/IgniteUtils.java     |  68 ++++++
 .../internal/GridAffinityNoCacheSelfTest.java |   4 +
 .../P2PCacheOperationIntoComputeTest.java     | 146 +++++++++++++
 .../IgniteIncompleteCacheObjectSelfTest.java  |   5 +
 .../database/CacheFreeListSelfTest.java       |   4 +
 .../testsuites/IgniteP2PSelfTestSuite.java    |   4 +-
 .../compute/AveragePersonSalaryCallable.java  | 200 ++++++++++++++++++
 52 files changed, 902 insertions(+), 298 deletions(-)
 create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/managers/deployment/P2PCacheOperationIntoComputeTest.java
 create mode 100644 modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/AveragePersonSalaryCallable.java

diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java
index 75434d39eb6d0..6f1fb32f976db 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSessionImpl.java
@@ -25,6 +25,7 @@
 import org.apache.ignite.compute.ComputeJobSibling;
 import org.apache.ignite.compute.ComputeTaskSessionAttributeListener;
 import org.apache.ignite.compute.ComputeTaskSessionScope;
+import org.apache.ignite.internal.managers.deployment.GridDeployment;
 import org.apache.ignite.internal.util.future.IgniteFinishedFutureImpl;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -62,6 +63,15 @@ public GridJobSessionImpl(GridKernalContext ctx, GridTaskSessionImpl ses, Ignite
         this.jobId = jobId;
     }
 
+    /**
+     * Grid job deployment.
+     *
+     * @return Grid deployment.
+     */
+    public GridDeployment deployment() {
+        return ses.deployment();
+    }
+
     /** {@inheritDoc} */
     @Override public GridTaskSessionInternal session() {
         return ses;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java
index 5d5eb3ef3636f..b8a3bc24068f7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java
@@ -320,7 +320,12 @@ private  T uncachedValue(Class cls) throws BinaryObjectException {
 
     /** {@inheritDoc} */
     @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
-        return deserialize();
+        return value(ctx, cpy, null);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
+        return deserialize(ldr);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java
index dfef1b900065c..6b4eea249b778 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java
@@ -136,10 +136,19 @@ public BinaryObjectImpl(BinaryContext ctx, byte[] arr, int start) {
 
     /** {@inheritDoc} */
     @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
+        return value(ctx, cpy, null);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
         Object obj0 = obj;
 
-        if (obj0 == null || (cpy && needCopy(ctx)))
-            obj0 = deserializeValue(ctx);
+        if (obj0 == null || (cpy && needCopy(ctx))) {
+            if (ldr != null)
+                obj0 = deserialize(ldr);
+            else
+                obj0 = deserializeValue(ctx);
+        }
 
         return (T)obj0;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java
index efe1c20723595..b9ff9c2a61886 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectOffheapImpl.java
@@ -456,7 +456,12 @@ else if (fieldOffLen == BinaryUtils.OFFSET_2)
 
     /** {@inheritDoc} */
     @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
-        return (T)deserializeValue();
+        return value(ctx, cpy, null);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
+        return deserialize(ldr);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java
index f317024ade88d..5d536d0bf5bd2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java
@@ -328,7 +328,7 @@ else if (log.isDebugEnabled())
                 if (isDeadClassLoader(meta))
                     return null;
 
-                if (meta.participants() != null && !meta.participants().isEmpty()) {
+                if (!F.isEmpty(meta.participants())) {
                     Map participants = new LinkedHashMap<>();
 
                     for (Map.Entry e : meta.participants().entrySet()) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java
index 1a1645fd0b67d..45adbfebf7598 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java
@@ -21,13 +21,17 @@
 import org.apache.ignite.cache.CacheInterceptorEntry;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
 
 /**
  *
  */
 public class CacheLazyEntry extends CacheInterceptorEntry {
     /** Cache context. */
-    protected GridCacheContext cctx;
+    protected final GridCacheContext cctx;
+
+    /** Keep binary flag. */
+    private final boolean keepBinary;
 
     /** Key cache object. */
     protected KeyCacheObject keyObj;
@@ -43,9 +47,6 @@ public class CacheLazyEntry extends CacheInterceptorEntry {
     @GridToStringInclude(sensitive = true)
     protected V val;
 
-    /** Keep binary flag. */
-    private boolean keepBinary;
-
     /** Update counter. */
     private Long updateCntr;
 
@@ -56,10 +57,13 @@ public class CacheLazyEntry extends CacheInterceptorEntry {
      * @param keepBinary Keep binary flag.
      */
     public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, CacheObject valObj, boolean keepBinary) {
-        this.cctx = cctx;
-        this.keyObj = keyObj;
-        this.valObj = valObj;
-        this.keepBinary = keepBinary;
+        this(cctx,
+            keyObj,
+            null,
+            valObj,
+            null,
+            keepBinary,
+            null);
     }
 
     /**
@@ -69,10 +73,13 @@ public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, CacheObject
      * @param cctx Cache context.
      */
     public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, V val, boolean keepBinary) {
-        this.cctx = cctx;
-        this.keyObj = keyObj;
-        this.val = val;
-        this.keepBinary = keepBinary;
+        this(cctx,
+            keyObj,
+            null,
+            null,
+            val,
+            keepBinary,
+            null);
     }
 
     /**
@@ -81,7 +88,6 @@ public CacheLazyEntry(GridCacheContext cctx, KeyCacheObject keyObj, V val, boole
      * @param key Key value.
      * @param valObj Cache object
      * @param keepBinary Keep binary flag.
-     * @param updateCntr Partition update counter.
      * @param val Cache value.
      */
     public CacheLazyEntry(GridCacheContext ctx,
@@ -89,16 +95,15 @@ public CacheLazyEntry(GridCacheContext ctx,
         K key,
         CacheObject valObj,
         V val,
-        boolean keepBinary,
-        Long updateCntr
+        boolean keepBinary
     ) {
-        this.cctx = ctx;
-        this.keyObj = keyObj;
-        this.key = key;
-        this.valObj = valObj;
-        this.val = val;
-        this.keepBinary = keepBinary;
-        this.updateCntr = updateCntr;
+        this(ctx,
+            keyObj,
+            key,
+            valObj,
+            val,
+            keepBinary,
+            null);
     }
 
     /**
@@ -107,6 +112,7 @@ public CacheLazyEntry(GridCacheContext ctx,
      * @param key Key value.
      * @param valObj Cache object
      * @param keepBinary Keep binary flag.
+     * @param updateCntr Partition update counter.
      * @param val Cache value.
      */
     public CacheLazyEntry(GridCacheContext ctx,
@@ -114,7 +120,8 @@ public CacheLazyEntry(GridCacheContext ctx,
         K key,
         CacheObject valObj,
         V val,
-        boolean keepBinary
+        boolean keepBinary,
+        Long updateCntr
     ) {
         this.cctx = ctx;
         this.keyObj = keyObj;
@@ -122,12 +129,13 @@ public CacheLazyEntry(GridCacheContext ctx,
         this.valObj = valObj;
         this.val = val;
         this.keepBinary = keepBinary;
+        this.updateCntr = updateCntr;
     }
 
     /** {@inheritDoc} */
     @Override public K getKey() {
         if (key == null)
-            key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary);
+            key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), U.contextDeploymentClassLoaderId(cctx.kernalContext())));
 
         return key;
     }
@@ -145,7 +153,7 @@ public CacheLazyEntry(GridCacheContext ctx,
      */
     public V getValue(boolean keepBinary) {
         if (val == null)
-            val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true);
+            val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true, U.deploymentClassLoader(cctx.kernalContext(), U.contextDeploymentClassLoaderId(cctx.kernalContext())));
 
         return val;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java
index f9f384a7f9702..0f21c77c43852 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java
@@ -45,6 +45,16 @@ public interface CacheObject extends Message {
      */
     @Nullable public  T value(CacheObjectValueContext ctx, boolean cpy);
 
+    /**
+     * Deserializes a value from an internal representation.
+     *
+     * @param ctx Context.
+     * @param cpy If {@code true} need to copy value.
+     * @param ldr Class loader, if it is {@code null}, default class loader will be used.
+     * @return Value.
+     */
+    @Nullable public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr);
+
     /**
      * @param ctx Context.
      * @return Value bytes.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java
index de5a9191950c0..5c033b6421208 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java
@@ -63,6 +63,11 @@ public CacheObjectByteArrayImpl(byte[] val) {
 
     /** {@inheritDoc} */
     @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
+        return value(ctx, cpy, null);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
         if (cpy)
             return (T)Arrays.copyOf(val, val.length);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java
index d121a5ecc3b99..01f751b426e16 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectContext.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.cache.affinity.AffinityKeyMapper;
 import org.apache.ignite.internal.GridKernalContext;
+import org.jetbrains.annotations.Nullable;
 
 /**
  *
@@ -127,12 +128,13 @@ public boolean customAffinityMapper() {
      * @param o Object to unwrap.
      * @param keepBinary Keep binary flag.
      * @param cpy Copy value flag.
+     * @param ldr Class loader, used for deserialization from binary representation.
      * @return Unwrapped object.
      */
-    public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy) {
+    public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy, @Nullable ClassLoader ldr) {
         if (o == null)
             return null;
 
-        return CacheObjectUtils.unwrapBinaryIfNeeded(this, o, keepBinary, cpy);
+        return CacheObjectUtils.unwrapBinaryIfNeeded(this, o, keepBinary, cpy, ldr);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java
index 6ca700b6efffc..46ec782ee9c48 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java
@@ -54,7 +54,12 @@ public CacheObjectImpl(Object val, byte[] valBytes) {
     }
 
     /** {@inheritDoc} */
-    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
+    @Override public  @Nullable T value(CacheObjectValueContext ctx, boolean cpy) {
+        return value(ctx, cpy, null);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
         cpy = cpy && needCopy(ctx);
 
         try {
@@ -69,16 +74,14 @@ public CacheObjectImpl(Object val, byte[] valBytes) {
                     valBytes = proc.marshal(ctx, val);
                 }
 
-                ClassLoader clsLdr;
-
-                if (val != null)
-                    clsLdr = val.getClass().getClassLoader();
-                else if (kernalCtx.config().isPeerClassLoadingEnabled())
-                    clsLdr = kernalCtx.cache().context().deploy().globalLoader();
-                else
-                    clsLdr = null;
+                if (ldr == null) {
+                    if (val != null)
+                        ldr = val.getClass().getClassLoader();
+                    else if (kernalCtx.config().isPeerClassLoadingEnabled())
+                        ldr = kernalCtx.cache().context().deploy().globalLoader();
+                }
 
-                return (T)proc.unmarshal(ctx, valBytes, clsLdr);
+                return (T)proc.unmarshal(ctx, valBytes, ldr);
             }
 
             if (val != null)
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java
index ed2d32491f0e7..ed278b1e0dd93 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.internal.util.MutableSingletonList;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
 
 /**
  * Cache object utility methods.
@@ -36,16 +37,24 @@ public class CacheObjectUtils {
      * @return Unwrapped object.
      */
     public static Object unwrapBinaryIfNeeded(CacheObjectValueContext ctx, CacheObject o, boolean keepBinary, boolean cpy) {
-        return unwrapBinary(ctx, o, keepBinary, cpy);
+        return unwrapBinary(ctx, o, keepBinary, cpy, null);
     }
 
     /**
+     * @param ctx Cache object context.
      * @param o Object to unwrap.
      * @param keepBinary Keep binary flag.
      * @param cpy Copy value flag.
+     * @param ldr Class loader, used for deserialization from binary representation.
      * @return Unwrapped object.
      */
-    public static Object unwrapBinaryIfNeeded(CacheObjectValueContext ctx, Object o, boolean keepBinary, boolean cpy) {
+    public static Object unwrapBinaryIfNeeded(
+        CacheObjectValueContext ctx,
+        Object o,
+        boolean keepBinary,
+        boolean cpy,
+        @Nullable ClassLoader ldr
+    ) {
         if (o == null)
             return null;
 
@@ -55,16 +64,16 @@ public static Object unwrapBinaryIfNeeded(CacheObjectValueContext ctx, Object o,
 
             Object key = entry.getKey();
 
-            Object uKey = unwrapBinary(ctx, key, keepBinary, cpy);
+            Object uKey = unwrapBinary(ctx, key, keepBinary, cpy, ldr);
 
             Object val = entry.getValue();
 
-            Object uVal = unwrapBinary(ctx, val, keepBinary, cpy);
+            Object uVal = unwrapBinary(ctx, val, keepBinary, cpy, ldr);
 
             return (key != uKey || val != uVal) ? F.t(uKey, uVal) : o;
         }
 
-        return unwrapBinary(ctx, o, keepBinary, cpy);
+        return unwrapBinary(ctx, o, keepBinary, cpy, ldr);
     }
 
     /**
@@ -90,7 +99,7 @@ private static Collection unwrapKnownCollection(CacheObjectValueContext
         assert col0 != null;
 
         for (Object obj : col)
-            col0.add(unwrapBinary(ctx, obj, keepBinary, cpy));
+            col0.add(unwrapBinary(ctx, obj, keepBinary, cpy, null));
 
         return (col0 instanceof MutableSingletonList) ? U.convertToSingletonList(col0) : col0;
     }
@@ -112,8 +121,8 @@ private static Map unwrapBinariesIfNeeded(CacheObjectValueContex
         for (Map.Entry e : map.entrySet())
             // TODO why don't we use keepBinary parameter here?
             map0.put(
-                unwrapBinary(ctx, e.getKey(), false, cpy),
-                unwrapBinary(ctx, e.getValue(), false, cpy));
+                unwrapBinary(ctx, e.getKey(), false, cpy, null),
+                unwrapBinary(ctx, e.getValue(), false, cpy, null));
 
         return map0;
     }
@@ -132,7 +141,7 @@ private static Collection unwrapBinariesIfNeeded(CacheObjectValueContext
             col0 = new ArrayList<>(col.size());
 
         for (Object obj : col)
-            col0.add(unwrapBinaryIfNeeded(ctx, obj, keepBinary, cpy));
+            col0.add(unwrapBinaryIfNeeded(ctx, obj, keepBinary, cpy, null));
 
         return col0;
     }
@@ -153,16 +162,28 @@ private static Object[] unwrapBinariesInArrayIfNeeded(CacheObjectValueContext ct
         Object[] res = new Object[arr.length];
 
         for (int i = 0; i < arr.length; i++)
-            res[i] = unwrapBinary(ctx, arr[i], keepBinary, cpy);
+            res[i] = unwrapBinary(ctx, arr[i], keepBinary, cpy, null);
 
         return res;
     }
 
     /**
+     * Unwraps an object for end user.
+     *
+     * @param ctx Cache object context.
      * @param o Object to unwrap.
+     * @param keepBinary False when need to deserialize object from a binary one, true otherwise.
+     * @param cpy True means the object will be copied before return, false otherwise.
+     * @param ldr Class loader, used for deserialization from binary representation.
      * @return Unwrapped object.
      */
-    private static Object unwrapBinary(CacheObjectValueContext ctx, Object o, boolean keepBinary, boolean cpy) {
+    private static Object unwrapBinary(
+        CacheObjectValueContext ctx,
+        Object o,
+        boolean keepBinary,
+        boolean cpy,
+        @Nullable ClassLoader ldr
+    ) {
         if (o == null)
             return o;
 
@@ -173,7 +194,7 @@ private static Object unwrapBinary(CacheObjectValueContext ctx, Object o, boolea
                 return o;
 
             // It may be a collection of binaries
-            o = co.value(ctx, cpy);
+            o = co.value(ctx, cpy, ldr);
         }
 
         if (BinaryUtils.knownCollection(o))
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
index fa956bc9fc00a..2a06f1a92400d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
@@ -952,7 +952,7 @@ else if (modes.heap) {
             }
         }
 
-        Object val = ctx.unwrapBinaryIfNeeded(cacheVal, ctx.keepBinary(), false);
+        Object val = ctx.unwrapBinaryIfNeeded(cacheVal, ctx.keepBinary(), false, null);
 
         return (V)val;
     }
@@ -1479,7 +1479,7 @@ private boolean evictx(K key, GridCacheVersion ver,
         V val = repairableGet(key, !keepBinary, false);
 
         if (ctx.config().getInterceptor() != null) {
-            key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false) : key;
+            key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false, null) : key;
 
             val = (V)ctx.config().getInterceptor().onGet(key, val);
         }
@@ -1507,13 +1507,13 @@ private boolean evictx(K key, GridCacheVersion ver,
             = (EntryGetResult)repairableGet(key, !keepBinary, true);
 
         CacheEntry val = t != null ? new CacheEntryImplEx<>(
-            keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false) : key,
+            keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false, null) : key,
             (V)t.value(),
             t.version())
             : null;
 
         if (ctx.config().getInterceptor() != null) {
-            key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false) : key;
+            key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key, true, false, null) : key;
 
             V val0 = (V)ctx.config().getInterceptor().onGet(key, t != null ? val.getValue() : null);
 
@@ -1549,7 +1549,7 @@ private boolean evictx(K key, GridCacheVersion ver,
         if (ctx.config().getInterceptor() != null)
             fut = fut.chain(new CX1, V>() {
                 @Override public V applyx(IgniteInternalFuture f) throws IgniteCheckedException {
-                    K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false) : key0;
+                    K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false, null) : key0;
 
                     return (V)ctx.config().getInterceptor().onGet(key, f.get());
                 }
@@ -1590,7 +1590,7 @@ private boolean evictx(K key, GridCacheVersion ver,
                     throws IgniteCheckedException {
                     EntryGetResult t = f.get();
 
-                    K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false) : key0;
+                    K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false, null) : key0;
 
                     CacheEntry val = t != null ? new CacheEntryImplEx<>(
                         key,
@@ -3173,7 +3173,7 @@ protected V getAndRemove0(final K key) throws IgniteCheckedException {
                 V ret = fut.get().value();
 
                 if (ctx.config().getInterceptor() != null) {
-                    K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false) : key0;
+                    K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false, null) : key0;
 
                     return (V)ctx.config().getInterceptor().onBeforeRemove(new CacheEntryImpl(key, ret)).get2();
                 }
@@ -5374,8 +5374,8 @@ private void advance() {
 
         KeyCacheObject key = entry.key();
 
-        Object key0 = ctx.unwrapBinaryIfNeeded(key, !deserializeBinary, true);
-        Object val0 = ctx.unwrapBinaryIfNeeded(val, !deserializeBinary, true);
+        Object key0 = ctx.unwrapBinaryIfNeeded(key, !deserializeBinary, true, null);
+        Object val0 = ctx.unwrapBinaryIfNeeded(val, !deserializeBinary, true, null);
 
         return new CacheEntryImpl<>((K)key0, (V)val0, entry.version());
     }
@@ -7237,7 +7237,7 @@ private KeySetIterator(Iterator internalIterator, boolean kee
         @Override public K next() {
             current = internalIterator.next();
 
-            return (K)ctx.unwrapBinaryIfNeeded(current.key(), keepBinary, true);
+            return (K)ctx.unwrapBinaryIfNeeded(current.key(), keepBinary, true, null);
         }
 
         /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
index a8def49124f45..613a6be330a4b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
@@ -1805,10 +1805,11 @@ public Collection unwrapBinariesIfNeeded(Collection col, boolean
      *
      * @param o Object to unwrap.
      * @param keepBinary Keep binary flag.
+     * @param ldr Class loader, used for deserialization from binary representation.
      * @return Unwrapped object.
      */
-    public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary) {
-        return unwrapBinaryIfNeeded(o, keepBinary, true);
+    public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, @Nullable ClassLoader ldr) {
+        return unwrapBinaryIfNeeded(o, keepBinary, true, ldr);
     }
 
     /**
@@ -1817,10 +1818,11 @@ public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary) {
      * @param o Object to unwrap.
      * @param keepBinary Keep binary flag.
      * @param cpy Copy value flag.
+     * @param ldr Class loader, used for deserialization from binary representation.
      * @return Unwrapped object.
      */
-    public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy) {
-        return cacheObjCtx.unwrapBinaryIfNeeded(o, keepBinary, cpy);
+    public Object unwrapBinaryIfNeeded(Object o, boolean keepBinary, boolean cpy, @Nullable ClassLoader ldr) {
+        return cacheObjCtx.unwrapBinaryIfNeeded(o, keepBinary, cpy, ldr);
     }
 
     /**
@@ -1836,7 +1838,7 @@ public Map unwrapInvokeResult(@Nullable Map resMap
 
                     if (invokeRes.result() != null)
                         res = CacheInvokeResult.fromResult(unwrapBinaryIfNeeded(invokeRes.result(),
-                            keepBinary, false));
+                            keepBinary, false, null));
                 }
 
                 return res;
@@ -1924,6 +1926,7 @@ public void validateKeyAndValue(KeyCacheObject key, CacheObject val) throws Igni
      * @param deserializeBinary Deserialize binary flag.
      * @param cpy Copy flag.
      * @param ver GridCacheVersion.
+     * @param ldr Class loader, used for deserialization from binary representation.
      */
     public  void addResult(Map map,
         KeyCacheObject key,
@@ -1934,10 +1937,11 @@ public  void addResult(Map map,
         boolean cpy,
         final GridCacheVersion ver,
         final long expireTime,
-        final long ttl) {
+        final long ttl,
+        @Nullable ClassLoader ldr) {
         // Creates EntryGetResult
         addResult(map, key, val, skipVals, keepCacheObjects, deserializeBinary, cpy, null,
-            ver, expireTime, ttl, ver != null);
+            ver, expireTime, ttl, ver != null, ldr);
     }
 
     /**
@@ -1960,7 +1964,7 @@ public  void addResult(Map map,
         boolean needVer) {
         // Uses getRes as result.
         addResult(map, key, getRes.value(), skipVals, keepCacheObjects, deserializeBinary, cpy, getRes,
-            null, 0, 0, needVer);
+            null, 0, 0, needVer, null);
     }
 
     /**
@@ -1976,6 +1980,7 @@ public  void addResult(Map map,
      * @param expireTime Entry expire time.
      * @param ttl Entry TTL.
      * @param needVer Need version flag.
+     * @param ldr Class loader, used for deserialization from binary representation.
      */
     public  void addResult(Map map,
         KeyCacheObject key,
@@ -1988,14 +1993,15 @@ public  void addResult(Map map,
         final GridCacheVersion ver,
         final long expireTime,
         final long ttl,
-        boolean needVer) {
+        boolean needVer,
+        @Nullable ClassLoader ldr) {
         assert key != null;
         assert val != null || skipVals;
 
         if (!keepCacheObjects) {
-            Object key0 = unwrapBinaryIfNeeded(key, !deserializeBinary, cpy);
+            Object key0 = unwrapBinaryIfNeeded(key, !deserializeBinary, cpy, ldr);
 
-            Object val0 = skipVals ? true : unwrapBinaryIfNeeded(val, !deserializeBinary, cpy);
+            Object val0 = skipVals ? true : unwrapBinaryIfNeeded(val, !deserializeBinary, cpy, ldr);
 
             assert key0 != null : key;
             assert val0 != null : val;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java
index 971de98985c95..afd67b27dda05 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java
@@ -36,11 +36,9 @@
 import org.apache.ignite.events.DiscoveryEvent;
 import org.apache.ignite.events.Event;
 import org.apache.ignite.internal.managers.deployment.GridDeployment;
-import org.apache.ignite.internal.managers.deployment.GridDeploymentInfo;
 import org.apache.ignite.internal.managers.deployment.GridDeploymentInfoBean;
 import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
-import org.apache.ignite.internal.util.IgniteUtils;
 import org.apache.ignite.internal.util.lang.GridPeerDeployAware;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.CA;
@@ -73,9 +71,6 @@ public class GridCacheDeploymentManager extends GridCacheSharedManagerAdap
     /** Per-thread deployment context. */
     private ConcurrentMap> deps = new ConcurrentHashMap<>();
 
-    /** Collection of all known participants (Node ID -> Loader ID). */
-    private Map allParticipants = new ConcurrentHashMap<>();
-
     /** Discovery listener. */
     private GridLocalEventListener discoLsnr;
 
@@ -93,7 +88,7 @@ public class GridCacheDeploymentManager extends GridCacheSharedManagerAdap
     private boolean depEnabled;
 
     /** Class loader id for local thread. */
-    private ThreadLocal localLdrId = new ThreadLocal<>();
+    private final ThreadLocal localLdrId = new ThreadLocal<>();
 
     /** {@inheritDoc} */
     @Override public void start0() throws IgniteCheckedException {
@@ -124,8 +119,6 @@ public class GridCacheDeploymentManager extends GridCacheSharedManagerAdap
                                 log.debug("Removed cached info [d=" + d + ", deps=" + deps + ']');
                         }
                     }
-
-                    allParticipants.remove(id);
                 }
             };
 
@@ -454,104 +447,27 @@ public void p2pContext(
             break;
         }
 
-        if (cctx.discovery().node(sndId) == null) {
-            // Sender has left.
+        // Sender has left.
+        if (cctx.discovery().node(sndId) == null)
             deps.remove(ldrId, depInfo);
 
-            allParticipants.remove(sndId);
-        }
-
         if (participants != null) {
             for (UUID id : participants.keySet()) {
                 if (cctx.discovery().node(id) == null) {
                     if (depInfo.removeParticipant(id))
                         deps.remove(ldrId, depInfo);
-
-                    allParticipants.remove(id);
                 }
             }
         }
     }
 
     /**
-     * Adds deployment info to deployment contexts queue.
+     * Gets a local class loader id.
      *
-     * @param info Info to add.
-     */
-    public void addDeploymentContext(GridDeploymentInfo info) {
-        IgniteUuid ldrId = info.classLoaderId();
-
-        while (true) {
-            CachedDeploymentInfo depInfo = deps.get(ldrId);
-
-            if (depInfo == null) {
-                depInfo = new CachedDeploymentInfo<>(ldrId.globalId(), ldrId, info.userVersion(), info.deployMode(),
-                    info.participants());
-
-                CachedDeploymentInfo old = deps.putIfAbsent(ldrId, depInfo);
-
-                if (old != null)
-                    depInfo = old;
-                else
-                    break;
-            }
-
-            Map participants = info.participants();
-
-            if (participants != null) {
-                if (!depInfo.addParticipants(participants, cctx)) {
-                    deps.remove(ldrId, depInfo);
-
-                    continue;
-                }
-            }
-
-            break;
-        }
-    }
-
-    /**
-     * @param sndNodeId Sender node ID.
-     * @param sndLdrId Sender loader ID.
-     * @param participants Participants.
-     * @param locDepOwner {@code True} if local deployment owner.
-     * @return Added participants.
+     * @return Class loader uuid.
      */
-    @Nullable private Map addGlobalParticipants(UUID sndNodeId, IgniteUuid sndLdrId,
-        Map participants, boolean locDepOwner) {
-        Map added = null;
-
-        if (participants != null) {
-            for (Map.Entry entry : participants.entrySet()) {
-                UUID nodeId = entry.getKey();
-                IgniteUuid ldrVer = entry.getValue();
-
-                if (!ldrVer.equals(allParticipants.get(nodeId))) {
-                    allParticipants.put(nodeId, ldrVer);
-
-                    if (added == null)
-                        added = IgniteUtils.newHashMap(participants.size());
-
-                    added.put(nodeId, ldrVer);
-                }
-            }
-        }
-
-        if (locDepOwner) {
-            assert sndNodeId != null;
-            assert sndLdrId != null;
-
-            if (!sndLdrId.equals(allParticipants.get(sndNodeId))) {
-                allParticipants.put(sndNodeId, sndLdrId);
-
-                if (added == null)
-                    added = U.newHashMap(1);
-
-                added.put(sndNodeId, sndLdrId);
-            }
-        }
-
-        return added;
+    public IgniteUuid locLoaderId() {
+        return localLdrId.get();
     }
 
     /**
@@ -730,25 +646,42 @@ private void checkDeploymentIsCorrect(GridDeploymentInfoBean deployment, GridCac
         if (cctx.gridConfig().getDeploymentMode() == CONTINUOUS)
             return null;
 
+        IgniteUuid localLdrId0 = localLdrId.get();
+
+        if (localLdrId0 != null) {
+            GridDeploymentInfoBean deploymentInfoBean = getDepBean(deps.get(localLdrId.get()));
+
+            if (deploymentInfoBean != null)
+                return deploymentInfoBean;
+        }
+
         for (CachedDeploymentInfo d : deps.values()) {
-            if (cctx.discovery().node(d.senderId()) == null)
-                // Sender has left.
-                continue;
+            GridDeploymentInfoBean deploymentInfoBean = getDepBean(d);
+            if (deploymentInfoBean != null)
+                return deploymentInfoBean;
+        }
+
+        return null;
+    }
 
-            // Participants map.
-            Map participants = d.participants();
+    @Nullable private GridDeploymentInfoBean getDepBean(CachedDeploymentInfo d) {
+        if (d == null || cctx.discovery().node(d.senderId()) == null)
+            // Sender has left.
+            return null;
 
-            if (participants != null) {
-                for (UUID id : participants.keySet()) {
-                    if (cctx.discovery().node(id) != null) {
-                        // At least 1 participant is still in the grid.
-                        return new GridDeploymentInfoBean(
-                            d.loaderId(),
-                            d.userVersion(),
-                            d.mode(),
-                            participants
-                        );
-                    }
+        // Participants map.
+        Map participants = d.participants();
+
+        if (participants != null) {
+            for (UUID id : participants.keySet()) {
+                if (cctx.discovery().node(id) != null) {
+                    // At least 1 participant is still in the grid.
+                    return new GridDeploymentInfoBean(
+                        d.loaderId(),
+                        d.userVersion(),
+                        d.mode(),
+                        participants
+                    );
                 }
             }
         }
@@ -762,7 +695,6 @@ private void checkDeploymentIsCorrect(GridDeploymentInfoBean deployment, GridCac
         X.println(">>> Cache deployment manager memory stats [igniteInstanceName=" + cctx.igniteInstanceName() + ']');
         X.println(">>>   Undeploys: " + undeploys.size());
         X.println(">>>   Cached deployments: " + deps.size());
-        X.println(">>>   All participants: " + allParticipants.size());
     }
 
     /**
@@ -955,8 +887,7 @@ private CachedDeploymentInfo(UUID sndId, IgniteUuid ldrId, String userVer, Deplo
             this.ldrId = ldrId;
             this.userVer = userVer;
             this.depMode = depMode;
-            this.participants = participants == null || participants.isEmpty() ? null :
-                new ConcurrentLinkedHashMap<>(participants);
+            this.participants = F.isEmpty(participants) ? null : new ConcurrentLinkedHashMap<>(participants);
         }
 
         /**
@@ -974,8 +905,8 @@ boolean addParticipants(Map newParticipants, GridCacheSharedCo
                 for (Map.Entry e : newParticipants.entrySet()) {
                     assert e.getKey().equals(e.getValue().globalId());
 
-                    if (cctx.discovery().node(e.getKey()) != null)
-                        // Participant has left.
+                    // Participant has been left.
+                    if (cctx.discovery().node(e.getKey()) == null)
                         continue;
 
                     if (participants == null)
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java
index c095ebe27fdc7..e17d4cd16742c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java
@@ -313,9 +313,9 @@ public void addEvent(
             Object oldVal0;
 
             try {
-                key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false);
-                val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, keepBinary, false);
-                oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, keepBinary, false);
+                key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false, null);
+                val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, keepBinary, false, null);
+                oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, keepBinary, false, null);
             }
             catch (Exception e) {
                 if (!cctx.cacheObjectContext().kernalContext().cacheObjects().isBinaryEnabled(cctx.config()))
@@ -330,9 +330,9 @@ public void addEvent(
 
                 forceKeepBinary = true;
 
-                key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, true, false);
-                val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, true, false);
-                oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, true, false);
+                key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, true, false, null);
+                val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, true, false, null);
+                oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, true, false, null);
             }
 
             IgniteUuid xid = tx == null ? null : tx.xid();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java
index b38b8944f6cfe..5f39b50ac2b3b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java
@@ -1520,7 +1520,7 @@ else if (res.resultType() == ResultType.LOCKED) {
                 intercept = !skipInterceptor(explicitVer);
 
             if (intercept) {
-                val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false);
+                val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false, null);
 
                 CacheLazyEntry e = new CacheLazyEntry(cctx, key, old, keepBinary);
 
@@ -2005,7 +2005,7 @@ else if (ttl == CU.TTL_NOT_CHANGED)
                         updateTtl(expiryPlc);
 
                     Object val = retval ?
-                        cctx.cacheObjectContext().unwrapBinaryIfNeeded(CU.value(old, cctx, false), keepBinary, false)
+                        cctx.cacheObjectContext().unwrapBinaryIfNeeded(CU.value(old, cctx, false), keepBinary, false, null)
                         : null;
 
                     return new T3<>(false, val, null);
@@ -2244,7 +2244,7 @@ else if (op == DELETE && transformOp)
         return new GridTuple3<>(res,
             cctx.unwrapTemporary(interceptorRes != null ?
                 interceptorRes.get2() :
-                cctx.cacheObjectContext().unwrapBinaryIfNeeded(old, keepBinary, false)),
+                cctx.cacheObjectContext().unwrapBinaryIfNeeded(old, keepBinary, false, null)),
             invokeRes);
     }
 
@@ -2565,7 +2565,7 @@ else if (updateMetrics && REMOVE_NO_VAL.equals(updateRes.outcome())
         if (val != null)
             return val;
 
-        return cctx.unwrapBinaryIfNeeded(cacheObj, keepBinary, cpy);
+        return cctx.unwrapBinaryIfNeeded(cacheObj, keepBinary, cpy, null);
     }
 
     /**
@@ -5726,12 +5726,12 @@ private LazyValueEntry(KeyCacheObject key, boolean keepBinary) {
 
         /** {@inheritDoc} */
         @Override public K getKey() {
-            return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, true);
+            return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, true, null);
         }
 
         /** {@inheritDoc} */
         @Override public V getValue() {
-            return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(peekVisibleValue(), keepBinary, true);
+            return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(peekVisibleValue(), keepBinary, true, null);
         }
 
         /** {@inheritDoc} */
@@ -6430,7 +6430,7 @@ else if (newSysTtl == CU.TTL_ZERO) {
             }
 
             if (intercept && (conflictVer == null || !skipInterceptorOnConflict)) {
-                Object updated0 = cctx.unwrapBinaryIfNeeded(updated, keepBinary, false);
+                Object updated0 = cctx.unwrapBinaryIfNeeded(updated, keepBinary, false, null);
 
                 CacheLazyEntry interceptEntry =
                     new CacheLazyEntry<>(cctx, entry.key, null, oldVal, null, keepBinary);
@@ -6781,6 +6781,12 @@ private IgniteBiTuple runEntryProcessor(CacheInvokeEntry res0 = res.error() == null ?
-                    CacheInvokeResult.fromResult(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.result(), true, false)) :
+                    CacheInvokeResult.fromResult(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.result(), true, false, null)) :
                     CacheInvokeResult.fromError(res.error());
 
-                map0.put(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.key(), true, false), res0);
+                map0.put(ctx.cacheObjectContext().unwrapBinaryIfNeeded(res.key(), true, false, null), res0);
             }
 
             v = map0;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java
index a8ead2c0dfd27..048179af505c6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java
@@ -805,8 +805,8 @@ private Iterator cacheData(boolean primary, boolean backup, Affi
                     KeyCacheObject key = nextRow.key();
                     CacheObject val = nextRow.value();
 
-                    Object key0 = cctx.unwrapBinaryIfNeeded(key, keepBinary, false);
-                    Object val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false);
+                    Object key0 = cctx.unwrapBinaryIfNeeded(key, keepBinary, false, null);
+                    Object val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false, null);
 
                     next = new CacheEntryImplEx(key0, val0, nextRow.version());
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObjectImpl.java
index 553c6aae7d8af..1d8595f3fb47b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObjectImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObjectImpl.java
@@ -96,6 +96,11 @@ public KeyCacheObjectImpl(Object val, byte[] valBytes, int part) {
 
     /** {@inheritDoc} */
     @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
+        return value(ctx, cpy, null);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
         assert val != null;
 
         return (T)val;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java
index 231d8e6e8316a..173888bd0025e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java
@@ -484,7 +484,7 @@ private void commitIfLocked() throws IgniteCheckedException {
 
                     if (!near() && !local() && onePhaseCommit()) {
                         if (needReturnValue()) {
-                            ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, true);
+                            ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, null, true);
 
                             UUID origNodeId = otherNodeId(); // Originating node.
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java
index 15730b2574a49..96147362d5b2e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CacheDistributedGetFutureAdapter.java
@@ -43,6 +43,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.C1;
 import org.apache.ignite.internal.util.typedef.CIX1;
@@ -128,6 +129,10 @@ public abstract class CacheDistributedGetFutureAdapter
     /** */
     protected final boolean recovery;
 
+    /** Deployment class loader id which will be used for deserialization of entries on a distributed task. */
+    @GridToStringExclude
+    protected final IgniteUuid deploymentLdrId;
+
     /** */
     protected Map>> invalidNodes = Collections.emptyMap();
 
@@ -175,6 +180,7 @@ protected CacheDistributedGetFutureAdapter(
         this.needVer = needVer;
         this.keepCacheObjects = keepCacheObjects;
         this.recovery = recovery;
+        this.deploymentLdrId = U.contextDeploymentClassLoaderId(cctx.kernalContext());
 
         futId = IgniteUuid.randomUuid();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java
index 8d45780db4dce..21b895d9d3698 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java
@@ -187,6 +187,10 @@ public abstract class GridDhtTxAbstractEnlistFuture extends GridCacheFutureAd
     /** Map for tracking nodes to which first request was already sent in order to send smaller subsequent requests. */
     private final Set firstReqSent = new HashSet<>();
 
+    /** Deployment class loader id which will be used for deserialization of entries on a distributed task. */
+    @GridToStringExclude
+    protected final IgniteUuid deploymentLdrId;
+
     /**
      * @param nearNodeId Near node ID.
      * @param nearLockVer Near lock version.
@@ -227,6 +231,7 @@ protected GridDhtTxAbstractEnlistFuture(UUID nearNodeId,
         this.tx = tx;
         this.filter = filter;
         this.keepBinary = keepBinary;
+        this.deploymentLdrId = U.contextDeploymentClassLoaderId(cctx.kernalContext());
 
         lockVer = tx.xidVersion();
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java
index 1b14cf359a110..5f7a3332dd790 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxEnlistFuture.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.internal.processors.query.EnlistOperation;
 import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
 import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteUuid;
 import org.jetbrains.annotations.Nullable;
 
@@ -130,7 +131,7 @@ public GridDhtTxEnlistFuture(UUID nearNodeId,
                 res.addEntryProcessResult(cctx, key, null, invokeRes.result(), invokeRes.error(), keepBinary);
         }
         else if (needRes)
-            res.set(cctx, txRes.prevValue(), txRes.success(), keepBinary);
+            res.set(cctx, txRes.prevValue(), txRes.success(), keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
index 0aa3579622c46..5afa379aad4b2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
@@ -226,6 +226,10 @@ public final class GridDhtTxPrepareFuture extends GridCacheCompoundFuture invokeEntry = new CacheInvokeEntry<>(key, val,
                                     txEntry.cached().version(), keepBinary, txEntry.cached());
 
+                                EntryProcessor processor = t.get1();
+
                                 IgniteThread.onEntryProcessorEntered(false);
 
-                                try {
-                                    EntryProcessor processor = t.get1();
+                                if (cctx.kernalContext().deploy().enabled() &&
+                                    cctx.kernalContext().deploy().isGlobalLoader(processor.getClass().getClassLoader())) {
+                                    U.restoreDeploymentContext(cctx.kernalContext(), cctx.kernalContext()
+                                        .deploy().getClassLoaderId(processor.getClass().getClassLoader()));
+                                }
 
+                                try {
                                     procRes = processor.process(invokeEntry, t.get2());
 
                                     val = cacheCtx.toCacheObject(invokeEntry.getValue(true));
@@ -495,7 +506,7 @@ private void onEntriesLocked() {
                             }
                         }
                         else if (retVal)
-                            ret.value(cacheCtx, val, keepBinary);
+                            ret.value(cacheCtx, val, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                     }
 
                     if (hasFilters && !cacheCtx.isAll(cached, txEntry.filters())) {
@@ -1237,7 +1248,7 @@ private IgniteTxOptimisticCheckedException versionCheckError(IgniteTxEntry entry
         GridCacheContext cctx = entry.context();
 
         try {
-            Object key = cctx.unwrapBinaryIfNeeded(entry.key(), entry.keepBinary(), false);
+            Object key = cctx.unwrapBinaryIfNeeded(entry.key(), entry.keepBinary(), false, null);
 
             assert key != null : entry.key();
 
@@ -1253,7 +1264,7 @@ private IgniteTxOptimisticCheckedException versionCheckError(IgniteTxEntry entry
 
             CacheObject cacheVal = entryEx != null ? entryEx.rawGet() : null;
 
-            Object val = cacheVal != null ? cctx.unwrapBinaryIfNeeded(cacheVal, entry.keepBinary(), false) : null;
+            Object val = cacheVal != null ? cctx.unwrapBinaryIfNeeded(cacheVal, entry.keepBinary(), false, null) : null;
 
             if (val != null) {
                 if (S.includeSensitive())
@@ -1980,8 +1991,14 @@ void onResult(GridDhtTxPrepareResponse res) {
                                         null, null, EVT_CACHE_REBALANCE_OBJECT_LOADED, info.value(), true, null,
                                         false, null, null, null, false);
 
-                                if (retVal && !invoke)
-                                    ret.value(cacheCtx, info.value(), false);
+                                if (retVal && !invoke) {
+                                    ret.value(
+                                        cacheCtx,
+                                        info.value(),
+                                        false,
+                                        U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)
+                                    );
+                                }
                             }
 
                             break;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java
index e3ba39ab8fbfe..84c3a3af71dea 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java
@@ -567,7 +567,8 @@ private boolean localGet(AffinityTopologyVersion topVer, KeyCacheObject key, int
                         ver,
                         0,
                         0,
-                        needVer);
+                        needVer,
+                        U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
 
                     return true;
                 }
@@ -650,7 +651,8 @@ private Map createResultMap(Collection infos) {
                     false,
                     needVer ? info.version() : null,
                     0,
-                    0);
+                    0,
+                    U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
             }
 
             return map;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java
index 57cfbd1b4fb0d..1dcf4e4f0c5e4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java
@@ -57,6 +57,7 @@
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.lang.GridPlainRunnable;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.CU;
@@ -145,6 +146,10 @@ public class GridPartitionedSingleGetFuture extends GridCacheFutureAdapter IgniteInternalFuture> invoke0(
 
                         if (invokeRes.result() != null)
                             res = CacheInvokeResult.fromResult((T)ctx.unwrapBinaryIfNeeded(invokeRes.result(),
-                                keepBinary, false));
+                                keepBinary, false, null));
                     }
 
                     return res;
@@ -1566,7 +1566,8 @@ private IgniteInternalFuture> getAllAsync0(@Nullable Collection> getAllAsync0(@Nullable Collection> loadAsync(
                                     row.version(),
                                     0,
                                     0,
-                                    needVer);
+                                    needVer,
+                                    U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
 
                                 if (evt) {
                                     ctx.events().readEvent(key,
@@ -651,7 +652,8 @@ public final IgniteInternalFuture> loadAsync(
                                             ver,
                                             0,
                                             0,
-                                            needVer);
+                                            needVer,
+                                            U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
                                     }
                                 }
                                 else
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java
index 1c7fa8151eeaa..4b4f194b4cd04 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java
@@ -586,13 +586,13 @@ private void addResult(KeyCacheObject key, CacheObject v, GridCacheVersion ver)
             add(new GridFinishedFuture<>(Collections.singletonMap(key0, val0)));
         }
         else {
-            K key0 = (K)cctx.unwrapBinaryIfNeeded(key, !deserializeBinary, false);
+            K key0 = (K)cctx.unwrapBinaryIfNeeded(key, !deserializeBinary, false, null);
             V val0 = needVer ?
                 (V)new EntryGetResult(!skipVals ?
-                    (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false) :
+                    (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false, null) :
                     (V)Boolean.TRUE, ver) :
                 !skipVals ?
-                    (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false) :
+                    (V)cctx.unwrapBinaryIfNeeded(v, !deserializeBinary, false, null) :
                     (V)Boolean.TRUE;
 
             add(new GridFinishedFuture<>(Collections.singletonMap(key0, val0)));
@@ -676,7 +676,8 @@ private Map loadEntries(
                         false,
                         needVer ? info.version() : null,
                         0,
-                        0);
+                        0,
+                        U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                 }
                 catch (GridCacheEntryRemovedException ignore) {
                     if (log.isDebugEnabled())
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java
index 8b45677332111..f1f99d37e507c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java
@@ -1493,7 +1493,13 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx,
                         entryProcessor != null ? TRANSFORM : old != null ? UPDATE : CREATE;
 
                     if (old != null && hasFilters && !filter(entry.context(), cacheKey, old, filter)) {
-                        ret.set(cacheCtx, old, false, keepBinary);
+                        ret.set(
+                            cacheCtx,
+                            old,
+                            false,
+                            keepBinary,
+                            U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)
+                        );
 
                         if (!readCommitted()) {
                             if (optimistic() && serializable()) {
@@ -1581,7 +1587,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx,
                                 assert txEntry.op() != TRANSFORM : txEntry;
 
                                 if (retval)
-                                    ret.set(cacheCtx, null, true, keepBinary);
+                                    ret.set(cacheCtx, null, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                                 else
                                     ret.success(true);
                             }
@@ -1594,7 +1600,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx,
                             }
 
                             if (retval && !transform)
-                                ret.set(cacheCtx, old, true, keepBinary);
+                                ret.set(cacheCtx, old, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                             else {
                                 if (txEntry.op() == TRANSFORM) {
                                     GridCacheVersion ver;
@@ -1622,7 +1628,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx,
                     // Pessimistic.
                     else {
                         if (retval && !transform)
-                            ret.set(cacheCtx, old, true, keepBinary);
+                            ret.set(cacheCtx, old, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                         else
                             ret.success(true);
                     }
@@ -1648,7 +1654,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx,
 
             if (!del) {
                 if (hasFilters && !filter(entry.context(), cacheKey, v, filter)) {
-                    ret.set(cacheCtx, v, false, keepBinary);
+                    ret.set(cacheCtx, v, false, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
 
                     return loadMissed;
                 }
@@ -1702,7 +1708,7 @@ private boolean enlistWriteEntry(GridCacheContext cacheCtx,
                 txEntry.markValid();
 
                 if (retval && !transform)
-                    ret.set(cacheCtx, v, true, keepBinary);
+                    ret.set(cacheCtx, v, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                 else
                     ret.success(true);
             }
@@ -1924,7 +1930,7 @@ private  IgniteInternalFuture removeAllAsync0(
                         try {
                             txFut.get();
 
-                            return new GridCacheReturn(cacheCtx, true, keepBinary,
+                            return new GridCacheReturn(cacheCtx, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId),
                                 implicitRes.value(), implicitRes.success());
                         }
                         catch (IgniteCheckedException | RuntimeException e) {
@@ -2147,7 +2153,7 @@ private IgniteInternalFuture updateAsync(GridCacheContext cache
                         val = cacheCtx.unwrapInvokeResult((Map)val, keepBinary);
                     }
 
-                    return new GridCacheReturn(cacheCtx, true, keepBinary, val, futRes.success());
+                    return new GridCacheReturn(cacheCtx, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId), val, futRes.success());
                 }
             }));
         }
@@ -2313,7 +2319,7 @@ public  IgniteInternalFuture> getAllAsync(
                             K keyVal = (K)
                                 (keepCacheObjects ? cacheKey :
                                     cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary,
-                                        true));
+                                        true, null));
 
                             if (retMap.containsKey(keyVal))
                                 // We already have a return value.
@@ -2391,7 +2397,8 @@ public  IgniteInternalFuture> getAllAsync(
                                             readVer,
                                             0,
                                             0,
-                                            needVer);
+                                            needVer,
+                                            U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
 
                                         if (readVer != null)
                                             txEntry.entryReadVersion(readVer);
@@ -2480,7 +2487,8 @@ public  IgniteInternalFuture> getAllAsync(
                                                     getRes.version(),
                                                     0,
                                                     0,
-                                                    needVer);
+                                                    needVer,
+                                                    U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                                             }
 
                                             return Collections.emptyMap();
@@ -2541,7 +2549,7 @@ public  IgniteInternalFuture> getAllAsync(
 
                             K keyVal = (K)(keepCacheObjects ? cacheKey
                                 : cacheCtx.cacheObjectContext()
-                                .unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false));
+                                .unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false, null));
 
                             if (retMap.containsKey(keyVal))
                                 it.remove();
@@ -2685,7 +2693,7 @@ private  Collection enlistRead(
                             }
 
                             cacheCtx.addResult(map, key, val, skipVals, keepCacheObjects, deserializeBinary, false,
-                                ver, 0, 0);
+                                ver, 0, 0, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                         }
                     }
                     else {
@@ -2751,7 +2759,8 @@ private  Collection enlistRead(
                                         readVer,
                                         0,
                                         0,
-                                        needVer);
+                                        needVer,
+                                        U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                                 }
                                 else
                                     missed.put(key, txEntry.cached().version());
@@ -2831,7 +2840,8 @@ private  Collection enlistRead(
                                         readVer,
                                         0,
                                         0,
-                                        needVer);
+                                        needVer,
+                                        U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                                 }
                                 else
                                     missed.put(key, ver);
@@ -2944,7 +2954,7 @@ private IgniteInternalFuture loadMissing(
                         assert !hasFilters && !retval;
                         assert val == null || Boolean.TRUE.equals(val) : val;
 
-                        ret.set(cacheCtx, null, val != null, keepBinary);
+                        ret.set(cacheCtx, null, val != null, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                     }
                     else {
                         CacheObject cacheVal = cacheCtx.toCacheObject(val);
@@ -2983,7 +2993,7 @@ private IgniteInternalFuture loadMissing(
                             else
                                 success = true;
 
-                            ret.set(cacheCtx, cacheVal, success, keepBinary);
+                            ret.set(cacheCtx, cacheVal, success, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                         }
                     }
                 }
@@ -3044,7 +3054,7 @@ private IgniteInternalFuture optimisticPutFuture(
                                 res = cacheCtx.unwrapInvokeResult((Map)res, keepBinary);
                             }
 
-                            return new GridCacheReturn(cacheCtx, true, keepBinary, res, implicitRes.success());
+                            return new GridCacheReturn(cacheCtx, true, keepBinary, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId), res, implicitRes.success());
                         }
                         catch (IgniteCheckedException | RuntimeException e) {
                             if (!(e instanceof NodeStoppingException))
@@ -4077,6 +4087,9 @@ public IgniteInternalFuture prepareNearTxLocal() {
 
         mapExplicitLocks();
 
+        if (cctx.kernalContext().deploy().enabled() && deploymentLdrId != null)
+            U.restoreDeploymentContext(cctx.kernalContext(), deploymentLdrId);
+
         fut.prepare();
 
         return fut;
@@ -4950,7 +4963,8 @@ private  IgniteInternalFuture> checkMissed(
                                     false,
                                     needVer ? loadVer : null,
                                     0,
-                                    0);
+                                    0,
+                                    U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                             }
                         }
                         else {
@@ -4974,7 +4988,8 @@ private  IgniteInternalFuture> checkMissed(
                                     false,
                                     needVer ? loadVer : null,
                                     0,
-                                    0);
+                                    0,
+                                    U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
                             }
                         }
                     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java
index e04ea2704b42b..63638e103f496 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairCheckOnlyFuture.java
@@ -138,7 +138,8 @@ public  IgniteInternalFuture single() {
                         getRes.version(),
                         0,
                         0,
-                        needVer);
+                        needVer,
+                        null);
                 }
 
                 if (skipVals) {
@@ -179,7 +180,8 @@ public  IgniteInternalFuture> multi() {
                         getRes.version(),
                         0,
                         0,
-                        needVer);
+                        needVer,
+                        null);
                 }
 
                 return map;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java
index f546fd76ec086..21c748826421b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/consistency/GridNearReadRepairFuture.java
@@ -132,7 +132,8 @@ private void recordConsistencyViolation(Map fixe
                 false,
                 null,
                 0,
-                0);
+                0,
+                null);
         }
 
         Map> originalMap = new HashMap<>();
@@ -162,7 +163,8 @@ private void recordConsistencyViolation(Map fixe
                         false,
                         null,
                         0,
-                        0);
+                        0,
+                        null);
                 }
             }
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
index e430a7183d5d9..cde847ba3207c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
@@ -428,7 +428,8 @@ private Map getAllInternal(@Nullable Collection keys,
                                 row.version(),
                                 0,
                                 0,
-                                needVer);
+                                needVer,
+                                null);
 
                             if (ctx.statisticsEnabled() && !skipVals)
                                 metrics0().onRead(true);
@@ -510,7 +511,8 @@ private Map getAllInternal(@Nullable Collection keys,
                                             true,
                                             null,
                                             0,
-                                            0);
+                                            0,
+                                            null);
                                     }
                                     else
                                         success = false;
@@ -1011,8 +1013,14 @@ else if (res == null)
         if (err != null)
             throw err;
 
-        Object ret = res == null ? null : rawRetval ? new GridCacheReturn(ctx, true, keepBinary, res.get2(), res.get1()) :
-            (retval || op == TRANSFORM) ? res.get2() : res.get1();
+        Object ret = res == null ? null : rawRetval ? new GridCacheReturn(
+            ctx,
+            true,
+            keepBinary,
+            U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())),
+            res.get2(),
+            res.get1()
+        ) : (retval || op == TRANSFORM) ? res.get2() : res.get1();
 
         if (op == TRANSFORM && ret == null)
             ret = Collections.emptyMap();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java
index 5462ed20e054a..f36ebb2caf33d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java
@@ -1239,8 +1239,8 @@ protected void runQuery(GridCacheQueryInfo qryInfo) {
                         V val0 = null;
 
                         if (readEvt && cctx.gridEvents().hasListener(EVT_CACHE_QUERY_OBJECT_READ)) {
-                            key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false);
-                            val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false);
+                            key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false, null);
+                            val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false, null);
 
                             switch (type) {
                                 case SQL:
@@ -1289,9 +1289,9 @@ protected void runQuery(GridCacheQueryInfo qryInfo) {
 
                         if (rdc != null) {
                             if (key0 == null)
-                                key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false);
+                                key0 = (K)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false, null);
                             if (val0 == null)
-                                val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false);
+                                val0 = (V)CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false, null);
 
                             Cache.Entry entry = new CacheEntryImpl(key0, val0);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java
index e66455499318a..58aadb13c28b8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryEvent.java
@@ -65,17 +65,17 @@ public int partitionId() {
 
     /** {@inheritDoc} */
     @Override public K getKey() {
-        return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.key(), e.isKeepBinary(), false);
+        return (K)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.key(), e.isKeepBinary(), false, null);
     }
 
     /** {@inheritDoc} */
     @Override public V getValue() {
-        return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.value(), e.isKeepBinary(), false);
+        return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.value(), e.isKeepBinary(), false, null);
     }
 
     /** {@inheritDoc} */
     @Override public V getOldValue() {
-        return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.oldValue(), e.isKeepBinary(), false);
+        return (V)cctx.cacheObjectContext().unwrapBinaryIfNeeded(e.oldValue(), e.isKeepBinary(), false, null);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
index ad7fa076e48c9..24ac8dc8675ca 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
@@ -311,7 +311,7 @@ private CacheStore cacheStoreWrapper(GridKernalContext ctx,
                 // Never load internal keys from store as they are never persisted.
                 return null;
 
-            Object storeKey = cctx.unwrapBinaryIfNeeded(key, !convertBinary());
+            Object storeKey = cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null);
 
             if (log.isDebugEnabled())
                 log.debug(S.toString("Loading value from store for key",
@@ -446,7 +446,7 @@ private void loadAllFromStore(@Nullable IgniteInternalTx tx,
             Collection keys0 = F.viewReadOnly(keys,
                 new C1() {
                     @Override public Object apply(KeyCacheObject key) {
-                        return cctx.unwrapBinaryIfNeeded(key, !convertBinary());
+                        return cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null);
                     }
                 });
 
@@ -568,8 +568,8 @@ private void loadAllFromStore(@Nullable IgniteInternalTx tx,
             if (key instanceof GridCacheInternal)
                 return true;
 
-            Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary());
-            Object val0 = cctx.unwrapBinaryIfNeeded(val, !convertBinary());
+            Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null);
+            Object val0 = cctx.unwrapBinaryIfNeeded(val, !convertBinary(), null);
 
             if (log.isDebugEnabled()) {
                 log.debug(S.toString("Storing value in cache store",
@@ -680,7 +680,7 @@ private void loadAllFromStore(@Nullable IgniteInternalTx tx,
             if (key instanceof GridCacheInternal)
                 return false;
 
-            Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary());
+            Object key0 = cctx.unwrapBinaryIfNeeded(key, !convertBinary(), null);
 
             if (log.isDebugEnabled())
                 log.debug(S.toString("Removing value from cache store", "key", key0, true));
@@ -1200,8 +1200,8 @@ private void checkNext() {
 
                         Object v = locStore ? e.getValue() : e.getValue().get1();
 
-                        k = cctx.unwrapBinaryIfNeeded(k, !convertBinary());
-                        v = cctx.unwrapBinaryIfNeeded(v, !convertBinary());
+                        k = cctx.unwrapBinaryIfNeeded(k, !convertBinary(), null);
+                        v = cctx.unwrapBinaryIfNeeded(v, !convertBinary(), null);
 
                         if (rmvd != null && rmvd.contains(k))
                             continue;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java
index a69f17ee9cad5..5a552a92d93c9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java
@@ -186,6 +186,10 @@ public abstract class IgniteTxAdapter extends GridMetadataAwareAdapter implement
     @GridToStringInclude
     protected long timeout;
 
+    /** Deployment class loader id which will be used for deserialization of entries on a distributed task. */
+    @GridToStringExclude
+    protected IgniteUuid deploymentLdrId;
+
     /** Invalidate flag. */
     protected volatile boolean invalidate;
 
@@ -323,6 +327,7 @@ protected IgniteTxAdapter(
         this.txSize = txSize;
         this.subjId = subjId;
         this.taskNameHash = taskNameHash;
+        this.deploymentLdrId = U.contextDeploymentClassLoaderId(cctx.kernalContext());
 
         nodeId = cctx.discovery().localNode().id();
 
@@ -1486,7 +1491,7 @@ protected final void batchStoreCommit(Iterable writeEntries) thro
                                         key,
                                         e.cached().rawGet(),
                                         e.keepBinary()),
-                                    cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(val, e.keepBinary(), false));
+                                    cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(val, e.keepBinary(), false, null));
 
                                 if (interceptorVal == null)
                                     continue;
@@ -1624,8 +1629,14 @@ protected IgniteBiTuple applyTransformClosures(
             return F.t(cacheCtx.writeThrough() ? RELOAD : DELETE, null);
 
         if (F.isEmpty(txEntry.entryProcessors())) {
-            if (ret != null)
-                ret.value(cacheCtx, txEntry.value(), txEntry.keepBinary());
+            if (ret != null) {
+                ret.value(
+                    cacheCtx,
+                    txEntry.value(),
+                    txEntry.keepBinary(),
+                    U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)
+                );
+            }
 
             return F.t(txEntry.op(), txEntry.value());
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java
index bd9ec342dc2b7..7b6cd7f10dc98 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java
@@ -1240,8 +1240,14 @@ protected final void postLockWrite(
                                 addInvokeResult(txEntry, v, ret, ver);
                             }
                         }
-                        else
-                            ret.value(cacheCtx, v, txEntry.keepBinary());
+                        else {
+                            ret.value(
+                                cacheCtx,
+                                v,
+                                txEntry.keepBinary(),
+                                U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId)
+                            );
+                        }
                     }
 
                     boolean pass = F.isEmpty(filter) || cacheCtx.isAll(cached, filter);
@@ -1312,6 +1318,9 @@ protected final void addInvokeResult(IgniteTxEntry txEntry,
 
         IgniteThread.onEntryProcessorEntered(true);
 
+        if (cctx.kernalContext().deploy().enabled() && deploymentLdrId != null)
+            U.restoreDeploymentContext(cctx.kernalContext(), deploymentLdrId);
+
         try {
             Object res = null;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java
index 84dfae15d497e..7cd779318f3b3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java
@@ -82,7 +82,7 @@ public GridCacheLazyPlainVersionedEntry(GridCacheContext cctx,
     /** {@inheritDoc} */
     @Override public K key() {
         if (key == null)
-            key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary);
+            key = (K)cctx.unwrapBinaryIfNeeded(keyObj, keepBinary, null);
 
         return key;
     }
@@ -100,7 +100,7 @@ public GridCacheLazyPlainVersionedEntry(GridCacheContext cctx,
      */
     public V value(boolean keepBinary) {
         if (val == null)
-            val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true);
+            val = (V)cctx.unwrapBinaryIfNeeded(valObj, keepBinary, true, null);
 
         return val;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java
index aa4d5f573f2e6..dcb72b72dc437 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cacheobject/UserCacheObjectByteArrayImpl.java
@@ -47,7 +47,12 @@ public UserCacheObjectByteArrayImpl(byte[] val) {
 
     /** {@inheritDoc} */
     @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
-        return super.value(ctx, false); // Do not need copy since user value is not in cache.
+        return value(ctx, cpy, null);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
+        return super.value(ctx, false, ldr); // Do not need copy since user value is not in cache.
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java
index 27a4520f908a4..f3fdc35481f35 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerEntry.java
@@ -85,7 +85,7 @@ public DataStreamerEntry(KeyCacheObject key, CacheObject val) {
     public  Map.Entry toEntry(final GridCacheContext ctx, final boolean keepBinary) {
         return new Map.Entry() {
             @Override public K getKey() {
-                return (K)ctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false);
+                return (K)ctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false, null);
             }
 
             @Override public V setValue(V val) {
@@ -93,7 +93,7 @@ public  Map.Entry toEntry(final GridCacheContext ctx, final boolean
             }
 
             @Override public V getValue() {
-                return (V)ctx.cacheObjectContext().unwrapBinaryIfNeeded(val, keepBinary, false);
+                return (V)ctx.cacheObjectContext().unwrapBinaryIfNeeded(val, keepBinary, false, null);
             }
         };
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java
index 3d52487ba7c8b..2833f38fa8fbf 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java
@@ -291,7 +291,7 @@ public class GridJobProcessor extends GridProcessorAdapter {
     };
 
     /** Current session. */
-    private final ThreadLocal currSess = new ThreadLocal<>();
+    private final ThreadLocal currSess = new ThreadLocal<>();
 
     /**
      * @param ctx Kernal context.
@@ -1377,7 +1377,7 @@ else if (jobAlwaysActivate) {
      *
      * @param ses Session.
      */
-    public void currentTaskSession(ComputeTaskSession ses) {
+    public void currentTaskSession(GridJobSessionImpl ses) {
         currSess.set(ses);
     }
 
@@ -1409,6 +1409,20 @@ public String currentTaskName() {
         return ses.getTaskName();
     }
 
+    /**
+     * Returns current deployment.
+     *
+     * @return Deployment.
+     */
+    public GridDeployment currentDeployment() {
+        GridJobSessionImpl session = currSess.get();
+
+        if (session == null || session.deployment() == null)
+            return null;
+
+        return session.deployment();
+    }
+
     /**
      * @param jobWorker Worker.
      * @return {@code True} if job has not been cancelled and should be activated.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
index 6bdc6fab34138..4f1af3833be30 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
@@ -212,6 +212,7 @@
 import org.apache.ignite.internal.events.DiscoveryCustomEvent;
 import org.apache.ignite.internal.managers.communication.GridIoManager;
 import org.apache.ignite.internal.managers.communication.GridIoPolicy;
+import org.apache.ignite.internal.managers.deployment.GridDeployment;
 import org.apache.ignite.internal.managers.deployment.GridDeploymentInfo;
 import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager;
 import org.apache.ignite.internal.mxbean.IgniteStandardMXBean;
@@ -219,6 +220,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheAttributes;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.IgnitePeerToPeerClassLoadingException;
 import org.apache.ignite.internal.processors.cluster.BaselineTopology;
 import org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxDuplicateKeyCheckedException;
@@ -7611,6 +7613,72 @@ public static boolean p2pLoader(ClassLoader ldr) {
         return ldr instanceof GridDeploymentInfo;
     }
 
+    /**
+     * Returns Deployment class loader id if method was invoked in the job context
+     * (it may be the context of a cache's operation which was triggered by the distributed job)
+     * or {@code null} if no context was found or Deployment is switched off.
+     *
+     * @param ctx Kernal context.
+     * @return Deployment class loader id or {@code null}.
+     */
+    public static IgniteUuid contextDeploymentClassLoaderId(GridKernalContext ctx) {
+        if (ctx == null || !ctx.deploy().enabled())
+            return null;
+
+        if (ctx.job() != null && ctx.job().currentDeployment() != null)
+            return ctx.job().currentDeployment().classLoaderId();
+
+        if (ctx.cache() != null && ctx.cache().context() != null)
+            return ctx.cache().context().deploy().locLoaderId();
+
+        return null;
+    }
+
+    /**
+     * Gets that deployment class loader matching by the specific id, or {@code null}
+     * if the class loader was not found.
+     *
+     * @param ctx Kernal context.
+     * @param ldrId Class loader id.
+     * @return Deployment class loader or {@code null}.
+     */
+    public static ClassLoader deploymentClassLoader(GridKernalContext ctx, IgniteUuid ldrId) {
+        if (ldrId == null || !ctx.deploy().enabled())
+            return null;
+
+        GridDeployment dep = ctx.deploy().getDeployment(ldrId);
+
+        return dep == null ? null : dep.classLoader();
+    }
+
+    /**
+     * Restores a deployment context for cache deployment.
+     *
+     * @param ctx Kernal context.
+     * @param ldrId Class loader id.
+     */
+    public static void restoreDeploymentContext(GridKernalContext ctx, IgniteUuid ldrId) {
+        if (ctx.deploy().enabled() && ldrId != null) {
+            GridDeployment dep = ctx.deploy().getDeployment(ldrId);
+
+            if (dep != null) {
+                try {
+                    ctx.cache().context().deploy().p2pContext(
+                        dep.classLoaderId().globalId(),
+                        dep.classLoaderId(),
+                        dep.userVersion(),
+                        dep.deployMode(),
+                        dep.participants()
+                    );
+                }
+                catch (IgnitePeerToPeerClassLoadingException e) {
+                    ctx.log(ctx.cache().context().deploy().getClass())
+                        .error("Could not restore P2P context [ldrId=" + ldrId + ']', e);
+                }
+            }
+        }
+    }
+
     /**
      * Formats passed date with specified pattern.
      *
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java
index 8faa98f6cbe9b..3208d2e992852 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityNoCacheSelfTest.java
@@ -235,6 +235,10 @@ private TestCacheObject(Object val) {
 
         /** {@inheritDoc} */
         @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
+            return value(ctx, cpy, null);
+        }
+
+        @Override public  @Nullable T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
             A.notNull(ctx, "ctx");
 
             return (T)val;
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/managers/deployment/P2PCacheOperationIntoComputeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/managers/deployment/P2PCacheOperationIntoComputeTest.java
new file mode 100644
index 0000000000000..8062e1f60a4f0
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/managers/deployment/P2PCacheOperationIntoComputeTest.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.managers.deployment;
+
+import java.lang.reflect.Constructor;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DeploymentMode;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.lang.IgniteCallable;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.junit.Test;
+
+/**
+ * Using cache API in P2P tasks.
+ */
+public class P2PCacheOperationIntoComputeTest extends GridCommonAbstractTest {
+    /** Person class name. */
+    private static final String PERSON_CLASS_NAME = "org.apache.ignite.tests.p2p.cache.Person";
+
+    /** Deployment task name. */
+    private static final String AVERAGE_PERSON_SALARY_CLOSURE_NAME = "org.apache.ignite.tests.p2p.compute.AveragePersonSalaryCallable";
+
+    /** Transactional cache name. */
+    private static final String DEFAULT_TX_CACHE_NAME = DEFAULT_CACHE_NAME + "_tx";
+
+    /** Deployment mode for node configuration. */
+    public DeploymentMode deplymentMode;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        return super.getConfiguration(igniteInstanceName)
+            .setConsistentId(igniteInstanceName)
+            .setPeerClassLoadingEnabled(true)
+            .setDeploymentMode(deplymentMode)
+            .setCacheConfiguration(new CacheConfiguration(DEFAULT_CACHE_NAME),
+                new CacheConfiguration(DEFAULT_TX_CACHE_NAME)
+                    .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        super.afterTest();
+    }
+
+    /**
+     * Checks cache API in the deployed tasks with SHARED mode.
+     *
+     * @throws Exception If failed.
+     */
+    @Test
+    public void testShared() throws Exception {
+        deplymentMode = DeploymentMode.SHARED;
+
+        Ignite ignite0 = startGrids(2);
+
+        awaitPartitionMapExchange();
+
+        Ignite client = startClientGrid(2);
+
+        calculateAverageSalary(client, DEFAULT_CACHE_NAME);
+        calculateAverageSalary(client, DEFAULT_TX_CACHE_NAME);
+    }
+
+    /**
+     * Checks cache API in the deployed tasks with CONTINUOUS mode.
+     *
+     * @throws Exception If failed.
+     */
+    @Test
+    public void testContinuous() throws Exception {
+        deplymentMode = DeploymentMode.CONTINUOUS;
+
+        Ignite ignite0 = startGrids(2);
+
+        awaitPartitionMapExchange();
+
+        Ignite client = startClientGrid(2);
+
+        calculateAverageSalary(client, DEFAULT_CACHE_NAME);
+        calculateAverageSalary(client, DEFAULT_TX_CACHE_NAME);
+    }
+
+    /**
+     * Launches a closure which is initiated in a client node, but is executed in server. The closure are manipulating
+     * with a data through user's classes.
+     *
+     * @param client Client node.
+     * @param cacheName Cache name.
+     * @throws Exception If failed.
+     */
+    private void calculateAverageSalary(
+        Ignite client,
+        String cacheName
+    ) throws Exception {
+        Constructor personCtor = getExternalClassLoader().loadClass(PERSON_CLASS_NAME).getConstructor(String.class);
+
+        IgniteCallable avgSalaryClosure = (IgniteCallable)getExternalClassLoader().loadClass(AVERAGE_PERSON_SALARY_CLOSURE_NAME)
+            .getConstructor(String.class, int.class, int.class).newInstance(cacheName, 0, 10);
+
+        IgniteCache cache = client.cache(cacheName);
+
+        for (int i = 0; i < 10; i++)
+            cache.put(i, createPerson(personCtor, i));
+
+        Double avg = client.compute().call(avgSalaryClosure);
+
+        info("Average salary is " + avg);
+    }
+
+    /**
+     * Creates a new person instance.
+     *
+     * @param personConst Constructor.
+     * @param id Person id.
+     * @return A person instance.
+     * @throws Exception If failed.
+     */
+    private Object createPerson(Constructor personConst, int id) throws Exception {
+        Object person = personConst.newInstance("Person" + id);
+        GridTestUtils.setFieldValue(person, "id", id);
+        GridTestUtils.setFieldValue(person, "lastName", "Last name " + id);
+        GridTestUtils.setFieldValue(person, "salary", id * Math.PI);
+        return person;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java
index e0507ce6cba0c..72d9f9ca6a1b4 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteIncompleteCacheObjectSelfTest.java
@@ -104,6 +104,11 @@ private TestCacheObject(final byte type) {
 
         /** {@inheritDoc} */
         @Nullable @Override public  T value(final CacheObjectValueContext ctx, final boolean cpy) {
+            return value(ctx, cpy, null);
+        }
+
+        /** {@inheritDoc} */
+        @Nullable @Override public  T value(final CacheObjectValueContext ctx, final boolean cpy, ClassLoader ldr) {
             return null;
         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java
index 68f1668cd121b..a6ce732d5b325 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java
@@ -705,6 +705,10 @@ private TestCacheObject(int size) {
 
         /** {@inheritDoc} */
         @Nullable @Override public  T value(CacheObjectValueContext ctx, boolean cpy) {
+            return value(ctx, cpy, null);
+        }
+
+        @Override public  @Nullable T value(CacheObjectValueContext ctx, boolean cpy, ClassLoader ldr) {
             return (T)data;
         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java
index eafb75a7c6550..301aee7fdff52 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.internal.GridP2PAffinitySelfTest;
 import org.apache.ignite.internal.RaceOnDeployClassesWithSameAliases;
 import org.apache.ignite.internal.managers.deployment.GridDeploymentMessageCountSelfTest;
+import org.apache.ignite.internal.managers.deployment.P2PCacheOperationIntoComputeTest;
 import org.apache.ignite.p2p.DeploymentClassLoaderCallableTest;
 import org.apache.ignite.p2p.GridP2PClassLoadingSelfTest;
 import org.apache.ignite.p2p.GridP2PComputeWithNestedEntryProcessorTest;
@@ -72,7 +73,8 @@
     GridP2PComputeWithNestedEntryProcessorTest.class,
     GridP2PCountTiesLoadClassDirectlyFromClassLoaderTest.class,
     RaceOnDeployClassesWithSameAliases.class,
-    GridP2PScanQueryWithTransformerTest.class
+    GridP2PScanQueryWithTransformerTest.class,
+    P2PCacheOperationIntoComputeTest.class
 })
 public class IgniteP2PSelfTestSuite {
 }
diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/AveragePersonSalaryCallable.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/AveragePersonSalaryCallable.java
new file mode 100644
index 0000000000000..b7ab2c31bd77f
--- /dev/null
+++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/AveragePersonSalaryCallable.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests.p2p.compute;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.lang.IgniteCallable;
+import org.apache.ignite.resources.IgniteInstanceResource;
+import org.apache.ignite.resources.LoggerResource;
+import org.apache.ignite.tests.p2p.cache.Person;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+
+/**
+ * This closure calculates average salary of person in the defined key range.
+ */
+public class AveragePersonSalaryCallable implements IgniteCallable {
+    /** Ignite instance. */
+    @IgniteInstanceResource
+    private Ignite ignite;
+
+    /** Logger. */
+    @LoggerResource
+    private IgniteLogger log;
+
+    /** Cache name. */
+    private final String cacheName;
+
+    /** Left range border. */
+    private final int from;
+
+    /** Right range border. */
+    private final int to;
+
+    /**
+     * @param cacheName Cache name.
+     * @param from First entry key.
+     * @param to Up border of keys.
+     */
+    public AveragePersonSalaryCallable(String cacheName, int from, int to) {
+        this.cacheName = cacheName;
+        this.from = from;
+        this.to = to;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Double call() {
+        log.info("Job was started with parameters: [node=" + ignite.name() +
+            ", cache=" + cacheName +
+            ", from=" + from +
+            ", to=" + to + ']');
+
+        IgniteCache cache = ignite.cache(cacheName);
+
+        if (cache == null)
+            return 0D;
+
+        double avgSalary = calculateAverageSalary(cache);
+
+        addPersonWithAverageSalary(cache, avgSalary);
+
+        checkAverageSalaryThroughInvoke(cache, avgSalary);
+
+        if (isTxCache(cache)) {
+            log.info("Transaction cache checks was triggered here.");
+
+            for (TransactionIsolation isolation : TransactionIsolation.values()) {
+                for (TransactionConcurrency concurrency : TransactionConcurrency.values()) {
+                    try (Transaction tx = ignite.transactions().txStart(concurrency, isolation)) {
+                        double txAvgSalary = calculateAverageSalary(cache);
+
+                        assert Double.compare(txAvgSalary, avgSalary) == 0;
+                    }
+                }
+            }
+
+            for (TransactionIsolation isolation : TransactionIsolation.values()) {
+                for (TransactionConcurrency concurrency : TransactionConcurrency.values()) {
+                    try (Transaction tx = ignite.transactions().txStart(concurrency, isolation)) {
+                        addPersonWithAverageSalary(cache, avgSalary);
+
+                        checkAverageSalaryThroughInvoke(cache, avgSalary);
+                    }
+                }
+            }
+        }
+
+        return avgSalary;
+    }
+
+    /**
+     * @param cache Ignite cache.
+     * @param avgSalary Average salary calculated previously.
+     */
+    private void checkAverageSalaryThroughInvoke(IgniteCache cache, double avgSalary) {
+        double amount = 0;
+
+        for (int i = from; i < to; i++) {
+            amount += cache.invoke(i, (MutableEntry entry, Object... arguments) ->
+                entry.getValue().getSalary());
+        }
+
+        assert Double.compare(avgSalary, amount / (to - from)) == 0;
+    }
+
+    private boolean isTxCache(IgniteCache cache) {
+        CacheConfiguration ccfg = cache.getConfiguration(CacheConfiguration.class);
+
+        return ccfg.getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL;
+    }
+
+    /**
+     * Adds some person with average salary.
+     *
+     * @param cache Ignite cache.
+     * @param avgSalary Average salary.
+     */
+    private void addPersonWithAverageSalary(IgniteCache cache, double avgSalary) {
+        Map persons = IntStream.range(from, to).boxed().map(id -> createAveragePerson(avgSalary, to + id))
+            .collect(Collectors.toMap(Person::getId, Function.identity(), (u, v) -> {
+                    throw new IllegalStateException(String.format("Duplicate key %s", u));
+                }, TreeMap::new));
+
+        cache.putAll(persons);
+
+        for (Integer key : persons.keySet()) {
+            Person p = cache.getAndPut(to + key, createAveragePerson(avgSalary, to + key));
+
+            assert p == null || Double.compare(avgSalary, p.getSalary()) == 0;
+        }
+    }
+
+    /**
+     * Calculates average salary.
+     *
+     * @param cache Ignite cache.
+     * @return Average salary.
+     */
+    private double calculateAverageSalary(IgniteCache cache) {
+        double amount = 0;
+
+        Set keys = IntStream.range(from, to).boxed().collect(Collectors.toSet());
+
+        Map entries = cache.getAll(keys);
+
+        for (Integer key : keys) {
+            Person p = cache.get(key);
+
+            Person p1 = entries.get(key);
+
+            assert p.equals(p1);
+
+            amount += p.getSalary();
+        }
+
+        return amount / (to - from);
+    }
+
+    /**
+     * Creates average person.
+     *
+     * @param avgSalary Average salary.
+     * @param id Id.
+     */
+    private Person createAveragePerson(double avgSalary, Integer id) {
+        Person p = new Person("John " + id);
+
+        p.setId(id);
+        p.setLastName("Smith");
+        p.setSalary(avgSalary);
+
+        return p;
+    }
+}

From ececdfd7b3dddfbf7611a71e2b951a82610296c4 Mon Sep 17 00:00:00 2001
From: Aleksey Plekhanov 
Date: Sun, 8 Nov 2020 22:05:02 +0300
Subject: [PATCH 026/110] IGNITE-13685 Java thin client: Fix transactional
 tests - Fixes #8436.

Signed-off-by: Aleksey Plekhanov 
---
 .../apache/ignite/client/FunctionalTest.java  | 22 ++++++++-----------
 1 file changed, 9 insertions(+), 13 deletions(-)

diff --git a/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java b/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
index caa39e963f0d3..5291baca6f949 100644
--- a/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
@@ -34,8 +34,6 @@
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
@@ -612,14 +610,14 @@ private void testPessimisticTxLocking(TransactionIsolation isolation) throws Exc
             );
             cache.put(0, "value0");
 
-            Future fut;
+            IgniteInternalFuture fut;
 
             try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, isolation)) {
                 assertEquals("value0", cache.get(0));
 
                 CyclicBarrier barrier = new CyclicBarrier(2);
 
-                fut = ForkJoinPool.commonPool().submit(() -> {
+                fut = GridTestUtils.runAsync(() -> {
                     try (ClientTransaction tx2 = client.transactions().txStart(OPTIMISTIC, REPEATABLE_READ, 500)) {
                         cache.put(0, "value2");
                         tx2.commit();
@@ -663,7 +661,7 @@ public void testOptimitsticSerializableTransactionHoldsLock() throws Exception {
             try (ClientTransaction tx = client.transactions().txStart(OPTIMISTIC, SERIALIZABLE)) {
                 assertEquals("value0", cache.get(0));
 
-                Future fut = ForkJoinPool.commonPool().submit(() -> {
+                IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
                     try (ClientTransaction tx2 = client.transactions().txStart(OPTIMISTIC, REPEATABLE_READ)) {
                         cache.put(0, "value2");
                         tx2.commit();
@@ -708,15 +706,13 @@ public void testOptimitsticRepeatableReadUpdatesValue() throws Exception {
 
                 cache.put(0, "value1");
 
-                Future f = ForkJoinPool.commonPool().submit(() -> {
+                GridTestUtils.runAsync(() -> {
                     assertEquals("value0", cache.get(0));
 
                     cache.put(0, "value2");
 
                     assertEquals("value2", cache.get(0));
-                });
-
-                f.get();
+                }).get();
 
                 tx.commit();
             }
@@ -962,7 +958,7 @@ public void testTransactions() throws Exception {
 
                 cache.put(0, "value18");
 
-                Future fut = ForkJoinPool.commonPool().submit(() -> {
+                IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
                     try (ClientTransaction tx1 = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
                         cache.put(1, "value19");
 
@@ -1002,7 +998,7 @@ public void testTransactions() throws Exception {
             try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
                 cache.put(0, "value20");
 
-                ForkJoinPool.commonPool().submit(() -> {
+                GridTestUtils.runAsync(() -> {
                     // Implicit transaction started here.
                     cache.put(1, "value21");
 
@@ -1041,7 +1037,7 @@ public void testTransactions() throws Exception {
                 // Start implicit transaction after explicit transaction has been closed by another thread.
                 cache.put(0, "value22");
 
-                ForkJoinPool.commonPool().submit(() -> assertEquals("value22", cache.get(0))).get();
+                GridTestUtils.runAsync(() -> assertEquals("value22", cache.get(0))).get();
 
                 // New explicit transaction can be started after current transaction has been closed by another thread.
                 try (ClientTransaction tx1 = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
@@ -1092,7 +1088,7 @@ public void testTransactions() throws Exception {
             // Test that implicit transaction started after commit of previous one without closing.
             cache.put(0, "value24");
 
-            ForkJoinPool.commonPool().submit(() -> assertEquals("value24", cache.get(0))).get();
+            GridTestUtils.runAsync(() -> assertEquals("value24", cache.get(0))).get();
         }
     }
 

From 6b14606f227bd11f4a876f28cc023d7358eb7788 Mon Sep 17 00:00:00 2001
From: Vladsz83 
Date: Mon, 9 Nov 2020 14:24:52 +0300
Subject: [PATCH 027/110] IGNITE-13662 : Describe soLinger setting in TCP
 Discovery and SSL issues. (#8430)

---
 docs/_docs/clustering/network-configuration.adoc | 1 +
 docs/_docs/security/ssl-tls.adoc                 | 6 ++++++
 2 files changed, 7 insertions(+)

diff --git a/docs/_docs/clustering/network-configuration.adoc b/docs/_docs/clustering/network-configuration.adoc
index d656b0c8040c8..9d9c904a1a9c9 100644
--- a/docs/_docs/clustering/network-configuration.adoc
+++ b/docs/_docs/clustering/network-configuration.adoc
@@ -56,6 +56,7 @@ You can find the complete list of properties in the javadoc:org.apache.ignite.sp
 | `localPort`  | The port that the node binds to. If set to a non-default value, other cluster nodes must know this port to be able to discover the node. | `47500`
 | `localPortRange`| If the `localPort` is busy, the node attempts to bind to the next port (incremented by 1) and continues this process until it finds a free port. The `localPortRange` property defines the number of ports the node will try (starting from `localPort`).
    | `100`
+| `soLinger`| Setting linger-on-close can help with socket deadlocks of SSL issues like JDK-8219658. But costs longer detection of node failure. | `0`
 | `reconnectCount` | The number of times the node tries to (re)establish connection to another node. |`10`
 | `networkTimeout` |  The maximum network timeout in milliseconds for network operations. |`5000`
 | `socketTimeout` |  The socket operations timeout. This timeout is used to limit connection time and write-to-socket time. |`5000`
diff --git a/docs/_docs/security/ssl-tls.adoc b/docs/_docs/security/ssl-tls.adoc
index bf5a90e2bfd1d..a5edc0bf22034 100644
--- a/docs/_docs/security/ssl-tls.adoc
+++ b/docs/_docs/security/ssl-tls.adoc
@@ -32,6 +32,12 @@ To enable SSL/TLS for cluster nodes, configure an `SSLContext` factory in the no
 You can use the `org.apache.ignite.ssl.SslContextFactory`, which is the default factory that uses a configurable keystore to initialize the SSL context.
 //You can also implement your own `SSLContext` factory.
 
+[NOTE]
+====
+There are known socket deadlock issue on various JRE when SSL enabled. Example: JDK-8219658. It is recommended to use
+the latest versions of your JRE. Or you can enable linger options like `TcpDiscoverySpi.soLinger`.
+====
+
 Below is an example of `SslContextFactory` configuration:
 
 [tabs]

From d7364c2862713f9ea5f3e9e11b83e8125bfd96be Mon Sep 17 00:00:00 2001
From: Ivan Bessonov 
Date: Mon, 9 Nov 2020 14:34:23 +0300
Subject: [PATCH 028/110] IGNITE-13682 Type parameters added to
 MaintenanceAction usages around the code - Fixes #8434.

Signed-off-by: Sergey Chugunov 
---
 .../java/org/apache/ignite/internal/IgniteKernal.java |  2 +-
 .../internal/maintenance/MaintenanceFileStore.java    |  3 +--
 .../internal/maintenance/MaintenanceProcessor.java    | 11 +++++------
 .../persistence/CorruptedPdsMaintenanceCallback.java  |  5 ++---
 .../internal/visor/persistence/PersistenceTask.java   | 11 ++++++-----
 .../ignite/maintenance/MaintenanceRegistry.java       |  3 +--
 .../maintenance/MaintenanceWorkflowCallback.java      |  5 ++---
 .../LocalWalModeChangeDuringRebalancingSelfTest.java  |  5 ++---
 .../persistence/MaintenanceRegistrySimpleTest.java    |  7 +++----
 9 files changed, 23 insertions(+), 29 deletions(-)

diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
index bd35e7a6c6471..588ae7d52ba4c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
@@ -1295,7 +1295,7 @@ public void start(
                 throw e;
             }
 
-            // All components exept Discovery are started, time to check if maintenance is still needed
+            // All components exept Discovery are started, time to check if maintenance is still needed.
             mntcProcessor.prepareAndExecuteMaintenance();
 
             gw.writeLock();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java
index 02698a1c8a891..4695cbf6b1a62 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceFileStore.java
@@ -23,7 +23,6 @@
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
@@ -52,7 +51,7 @@
  */
 public class MaintenanceFileStore {
     /** */
-    private static final String MAINTENANCE_FILE_NAME = "maintenance_tasks.mntc";
+    public static final String MAINTENANCE_FILE_NAME = "maintenance_tasks.mntc";
 
     /** */
     private static final String TASKS_SEPARATOR = System.lineSeparator();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java
index 6bc3e8ecaae94..347b328935ae0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java
@@ -23,7 +23,6 @@
 import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.internal.GridKernalContext;
@@ -191,11 +190,11 @@ else if (isMaintenanceMode()) {
      */
     private void proceedWithMaintenance() {
         for (Map.Entry cbE : workflowCallbacks.entrySet()) {
-            MaintenanceAction mntcAction = cbE.getValue().automaticAction();
+            MaintenanceAction mntcAct = cbE.getValue().automaticAction();
 
-            if (mntcAction != null) {
+            if (mntcAct != null) {
                 try {
-                    mntcAction.execute();
+                    mntcAct.execute();
                 }
                 catch (Throwable t) {
                     log.warning("Failed to execute automatic action for maintenance task: " +
@@ -245,7 +244,7 @@ private void proceedWithMaintenance() {
         if (inMemoryMode)
             throw new IgniteException(IN_MEMORY_MODE_ERR_MSG);
 
-        List actions = cb.allActions();
+        List> actions = cb.allActions();
 
         if (actions == null || actions.isEmpty())
             throw new IgniteException("Maintenance workflow callback should provide at least one mainetance action");
@@ -272,7 +271,7 @@ private void proceedWithMaintenance() {
     }
 
     /** {@inheritDoc} */
-    @Override public List actionsForMaintenanceTask(String maintenanceTaskName) {
+    @Override public List> actionsForMaintenanceTask(String maintenanceTaskName) {
         if (inMemoryMode)
             throw new IgniteException(IN_MEMORY_MODE_ERR_MSG);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java
index 0173bca7facf7..7dfbd5c6cd058 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CorruptedPdsMaintenanceCallback.java
@@ -20,7 +20,6 @@
 import java.io.File;
 import java.util.Arrays;
 import java.util.List;
-
 import org.apache.ignite.maintenance.MaintenanceAction;
 import org.apache.ignite.maintenance.MaintenanceWorkflowCallback;
 import org.jetbrains.annotations.NotNull;
@@ -68,14 +67,14 @@ public CorruptedPdsMaintenanceCallback(@NotNull File workDir,
     }
 
     /** {@inheritDoc} */
-    @Override public List allActions() {
+    @Override public List> allActions() {
         return Arrays.asList(
             new CleanCacheStoresMaintenanceAction(workDir, cacheStoreDirs.toArray(new String[0])),
             new CheckCorruptedCacheStoresCleanAction(workDir, cacheStoreDirs.toArray(new String[0])));
     }
 
     /** {@inheritDoc} */
-    @Override public MaintenanceAction automaticAction() {
+    @Override public MaintenanceAction automaticAction() {
         return null;
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java
index 8f18115f5a55f..823126d3b5b61 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/persistence/PersistenceTask.java
@@ -249,13 +249,14 @@ private PersistenceTaskResult cleanCaches(
             if (!failedToCleanCaches.isEmpty())
                 res.failedCaches(failedToCleanCaches);
 
-            List actions = mntcReg.actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME);
+            List> actions = mntcReg.actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME);
 
-            Optional checkActionOpt = actions.stream().filter(a -> a.name().equals(CheckCorruptedCacheStoresCleanAction.ACTION_NAME))
+            Optional> checkActionOpt = actions.stream()
+                .filter(a -> a.name().equals(CheckCorruptedCacheStoresCleanAction.ACTION_NAME))
                 .findFirst();
 
             if (checkActionOpt.isPresent()) {
-                MaintenanceAction action = checkActionOpt.get();
+                MaintenanceAction action = (MaintenanceAction)checkActionOpt.get();
 
                 Boolean mntcTaskCompleted = action.execute();
 
@@ -296,10 +297,10 @@ private PersistenceTaskResult cleanAll(GridCacheProcessor cacheProc, Maintenance
         private PersistenceTaskResult cleanCorrupted(MaintenanceRegistry mntcReg) {
             PersistenceTaskResult res = new PersistenceTaskResult(true);
 
-            List actions = mntcReg
+            List> actions = mntcReg
                 .actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME);
 
-            Optional cleanCorruptedActionOpt = actions
+            Optional> cleanCorruptedActionOpt = actions
                 .stream()
                 .filter(a -> a.name().equals(CleanCacheStoresMaintenanceAction.ACTION_NAME))
                 .findFirst();
diff --git a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java
index b264700e969e6..3ce1aea825fd7 100644
--- a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java
+++ b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java
@@ -18,7 +18,6 @@
 package org.apache.ignite.maintenance;
 
 import java.util.List;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.lang.IgniteExperimental;
@@ -142,7 +141,7 @@ public interface MaintenanceRegistry {
      *
      * @throws IgniteException if no Maintenance Tasks are registered for provided name.
      */
-    public List actionsForMaintenanceTask(String maintenanceTaskName);
+    public List> actionsForMaintenanceTask(String maintenanceTaskName);
 
     /**
      * Examine all components if they need to execute maintenance actions.
diff --git a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java
index 26ba2a1ff14b3..340bb530fcfbd 100644
--- a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java
+++ b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceWorkflowCallback.java
@@ -18,7 +18,6 @@
 package org.apache.ignite.maintenance;
 
 import java.util.List;
-
 import org.apache.ignite.lang.IgniteExperimental;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
@@ -52,7 +51,7 @@ public interface MaintenanceWorkflowCallback {
      *
      * @return Not null and non-empty {@link List} of {@link MaintenanceAction}.
      */
-    @NotNull public List allActions();
+    @NotNull public List> allActions();
 
     /**
      * Component can provide optional {@link MaintenanceAction} that will be executed automatically
@@ -64,5 +63,5 @@ public interface MaintenanceWorkflowCallback {
      * @return {@link MaintenanceAction} for automatic execution or null if maintenance situation
      * should not be fixed automatically.
      */
-    @Nullable public MaintenanceAction automaticAction();
+    @Nullable public MaintenanceAction automaticAction();
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java
index 03ad4dd062bcb..b2b2bde699efc 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java
@@ -30,7 +30,6 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
-
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteCheckedException;
@@ -739,10 +738,10 @@ public void testPdsWithBrokenBinaryConsistencyIsClearedAfterRestartWithDisabledW
             @Override public void run() {
                 MaintenanceRegistry mntcRegistry = ((IgniteEx) ig).context().maintenanceRegistry();
 
-                List actions = mntcRegistry
+                List> actions = mntcRegistry
                     .actionsForMaintenanceTask(CORRUPTED_DATA_FILES_MNTC_TASK_NAME);
 
-                Optional optional = actions
+                Optional> optional = actions
                     .stream()
                     .filter(a -> a.name().equals(CleanCacheStoresMaintenanceAction.ACTION_NAME)).findFirst();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java
index 5866d43d74a8e..f64d5684b9800 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MaintenanceRegistrySimpleTest.java
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
@@ -295,9 +294,9 @@ public void testMaintenanceActionNameSymbols() throws IgniteCheckedException {
     /** */
     private final class SimpleMaintenanceCallback implements MaintenanceWorkflowCallback {
         /** */
-        private final List actions = new ArrayList<>();
+        private final List> actions = new ArrayList<>();
 
-        SimpleMaintenanceCallback(List actions) {
+        SimpleMaintenanceCallback(List> actions) {
             this.actions.addAll(actions);
         }
 
@@ -307,7 +306,7 @@ private final class SimpleMaintenanceCallback implements MaintenanceWorkflowCall
         }
 
         /** {@inheritDoc} */
-        @Override public @NotNull List allActions() {
+        @Override public @NotNull List> allActions() {
             return actions;
         }
 

From 223074926dab0ce0508eb0c32b941ed4419898c6 Mon Sep 17 00:00:00 2001
From: Aleksey Plekhanov 
Date: Mon, 9 Nov 2020 15:07:45 +0300
Subject: [PATCH 029/110] IGNITE-13653 Remove warning for unordered bulk
 operation on ATOMIC cache.

Improve test coverage.
Fixes #8414.

Signed-off-by: Ilya Kasnacheev 
---
 .../processors/cache/GridCacheAdapter.java    |  6 +++
 .../GridCacheHashMapPutAllWarningsTest.java   | 48 ++++++++++++++++++-
 ...tomicConcurrentUnorderedUpdateAllTest.java | 23 ++++++---
 3 files changed, 68 insertions(+), 9 deletions(-)

diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
index 2a06f1a92400d..a6fcee81de904 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
@@ -5214,6 +5214,9 @@ protected final void validateCacheKeys(Iterable keys) {
      * @param m Map to examine.
      */
     protected void warnIfUnordered(Map m, BulkOperation op) {
+        if (ctx.atomic())
+            return;
+
         if (m == null || m.size() <= 1)
             return;
 
@@ -5238,6 +5241,9 @@ protected void warnIfUnordered(Map m, BulkOperation op) {
      * @param coll Collection to examine.
      */
     protected void warnIfUnordered(Collection coll, BulkOperation op) {
+        if (ctx.atomic())
+            return;
+
         if (coll == null || coll.size() <= 1)
             return;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java
index 1e49d909f7bc3..9b59bd536d0b2 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/GridCacheHashMapPutAllWarningsTest.java
@@ -31,6 +31,7 @@
 import javax.cache.processor.MutableEntry;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteSystemProperties;
 import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.cache.CacheMode;
 import org.apache.ignite.configuration.CacheConfiguration;
@@ -41,6 +42,7 @@
 import org.apache.ignite.transactions.Transaction;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
+import org.junit.Assume;
 import org.junit.Test;
 
 /**
@@ -82,7 +84,8 @@ public void testHashMapPutAllExactMessage() throws Exception {
 
         Ignite ignite = startGrid(0);
 
-        IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration<>("exact"));
+        IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration("exact")
+            .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
 
         HashMap m = new HashMap<>();
 
@@ -150,6 +153,9 @@ public void testHashMapPutAllExplicitOptimistic() throws Exception {
      */
     @Test
     public void testHashMapInvokeAllLocal() throws Exception {
+        Assume.assumeFalse( "Local transactional caches not supported by MVCC",
+            IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_FORCE_MVCC_MODE_IN_TESTS, false));
+
         List messages = Collections.synchronizedList(new ArrayList<>());
 
         testLog = new ListeningTestLogger(false, log());
@@ -162,7 +168,7 @@ public void testHashMapInvokeAllLocal() throws Exception {
         Ignite ignite = startGrid(0);
 
         IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration("invoke")
-            .setCacheMode(CacheMode.LOCAL));
+            .setCacheMode(CacheMode.LOCAL).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
 
         c.put(1, "foo");
         c.put(2, "bar");
@@ -381,4 +387,42 @@ public void testHashSetGetAllTx() throws Exception {
 
         assertEquals(1, found);
     }
+
+    /**
+     * @throws Exception If failed.
+     */
+    @Test
+    public void testHashMapAtomic() throws Exception {
+        List messages = Collections.synchronizedList(new ArrayList<>());
+
+        testLog = new ListeningTestLogger(log());
+
+        testLog.registerListener((s) -> {
+            if (s.contains("deadlock"))
+                messages.add(s);
+        });
+
+        Ignite ignite = startGrid(0);
+
+        IgniteCache c = ignite.getOrCreateCache(new CacheConfiguration("atomic")
+            .setAtomicityMode(CacheAtomicityMode.ATOMIC));
+
+        HashMap m = new HashMap<>();
+
+        m.put(1, "foo");
+        m.put(2, "bar");
+
+        c.putAll(m);
+        c.invokeAll(m.keySet(), (k, v) -> v);
+        c.removeAll(m.keySet());
+        c.removeAll();
+
+        assertEquals(0, c.size());
+
+        for (String message : messages) {
+            assertFalse(message.contains("Unordered "));
+
+            assertFalse(message.contains("operation on cache"));
+        }
+    }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java
index 6863c94930ee1..be95c9e63846d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAtomicConcurrentUnorderedUpdateAllTest.java
@@ -29,6 +29,7 @@
 import org.apache.ignite.cache.CacheMode;
 import org.apache.ignite.cache.store.CacheStore;
 import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.NearCacheConfiguration;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.junit.Test;
@@ -53,15 +54,16 @@ public class IgniteCacheAtomicConcurrentUnorderedUpdateAllTest extends GridCommo
     private static final int CACHE_SIZE = 1_000;
 
     /** Parameters. */
-    @Parameterized.Parameters(name = "cacheMode={0}, writeThrough={1}")
+    @Parameterized.Parameters(name = "cacheMode={0}, writeThrough={1}, near={2}")
     public static Iterable data() {
         return Arrays.asList(
-            new Object[] {CacheMode.PARTITIONED, Boolean.FALSE},
-            new Object[] {CacheMode.PARTITIONED, Boolean.TRUE},
-            new Object[] {CacheMode.REPLICATED, Boolean.FALSE},
-            new Object[] {CacheMode.REPLICATED, Boolean.TRUE},
-            new Object[] {CacheMode.LOCAL, Boolean.FALSE},
-            new Object[] {CacheMode.LOCAL, Boolean.TRUE}
+            new Object[] {CacheMode.PARTITIONED, Boolean.FALSE, Boolean.FALSE},
+            new Object[] {CacheMode.PARTITIONED, Boolean.TRUE, Boolean.FALSE},
+            new Object[] {CacheMode.PARTITIONED, Boolean.FALSE, Boolean.TRUE},
+            new Object[] {CacheMode.REPLICATED, Boolean.FALSE, Boolean.FALSE},
+            new Object[] {CacheMode.REPLICATED, Boolean.TRUE, Boolean.FALSE},
+            new Object[] {CacheMode.LOCAL, Boolean.FALSE, Boolean.FALSE},
+            new Object[] {CacheMode.LOCAL, Boolean.TRUE, Boolean.FALSE}
         );
     }
 
@@ -73,6 +75,10 @@ public static Iterable data() {
     @Parameterized.Parameter(1)
     public Boolean writeThrough;
 
+    /** Near cache. */
+    @Parameterized.Parameter(2)
+    public Boolean near;
+
     /** {@inheritDoc} */
     @Override protected void afterTest() throws Exception {
         super.afterTest();
@@ -92,6 +98,7 @@ public void testConcurrentUpdateAll() throws Exception {
 
         IgniteCache cache = ignite.createCache(new CacheConfiguration<>(CACHE_NAME)
             .setWriteThrough(writeThrough).setCacheStoreFactory(cacheStoreFactory)
+            .setNearConfiguration(near ? new NearCacheConfiguration<>() : null)
             .setCacheMode(cacheMode).setAtomicityMode(ATOMIC).setBackups(1));
 
         CyclicBarrier barrier = new CyclicBarrier(THREADS_CNT);
@@ -122,6 +129,8 @@ public void testConcurrentUpdateAll() throws Exception {
 
                 cache0.putAll(map);
 
+                cache0.invokeAll(map.keySet(), (k, v) -> v);
+
                 cache0.removeAll(map.keySet());
 
                 log.info("Thread " + threadIdx + " iteration " + i + " finished");

From cb7448eecf1ae05c2062e24d9c342d8ae9d92149 Mon Sep 17 00:00:00 2001
From: Vladsz83 
Date: Mon, 9 Nov 2020 15:57:06 +0300
Subject: [PATCH 030/110] IGNITE-13643 : Disable socket linger dy default in
 Tcp Discovery Spi (#8407)

---
 .../org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
index f1e8406a9afe9..c18715ece79ce 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
@@ -287,7 +287,7 @@ public class TcpDiscoverySpi extends IgniteSpiAdapter implements IgniteDiscovery
     public static final long DFLT_MAX_ACK_TIMEOUT = 10 * 60 * 1000;
 
     /** Default SO_LINGER to set for socket, 0 means enabled with 0 timeout. */
-    public static final int DFLT_SO_LINGER = 5;
+    public static final int DFLT_SO_LINGER = 0;
 
     /** Default connection recovery timeout in ms. */
     public static final long DFLT_CONNECTION_RECOVERY_TIMEOUT = IgniteConfiguration.DFLT_FAILURE_DETECTION_TIMEOUT;

From f6ba904a3fcb6cbaacc19001eaa2a7b71836fbce Mon Sep 17 00:00:00 2001
From: Alexey Scherbakov 
Date: Mon, 9 Nov 2020 20:10:24 +0300
Subject: [PATCH 031/110] IGNITE-13244 Throw CacheInvalidStateException if all
 owners for a partition have failed on commit.

Signed-off-by: Alexey Scherbakov 
---
 .../near/GridNearTxFinishFuture.java          |  23 ++
 .../cache/IgniteCacheGroupsTest.java          |   9 +-
 .../near/IgniteTxExceptionNodeFailTest.java   | 197 ++++++++++++++++++
 .../testsuites/IgniteCacheTestSuite3.java     |   2 +
 4 files changed, 229 insertions(+), 2 deletions(-)
 create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxExceptionNodeFailTest.java

diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
index fc239da9c5f0a..8a25f86154e69 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
@@ -31,6 +31,7 @@
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.processors.cache.CacheInvalidStateException;
 import org.apache.ignite.internal.processors.cache.GridCacheCompoundIdentityFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
@@ -58,11 +59,16 @@
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.transactions.TransactionRollbackException;
 
+import static java.util.Collections.emptySet;
+import static java.util.stream.Stream.concat;
+import static java.util.stream.Stream.of;
 import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_ASYNC;
 import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
 import static org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings;
 import static org.apache.ignite.internal.processors.tracing.MTC.support;
 import static org.apache.ignite.internal.processors.tracing.SpanType.TX_NEAR_FINISH;
+import static org.apache.ignite.transactions.TransactionState.COMMITTED;
+import static org.apache.ignite.transactions.TransactionState.COMMITTING;
 import static org.apache.ignite.transactions.TransactionState.UNKNOWN;
 
 /**
@@ -73,6 +79,10 @@ public final class GridNearTxFinishFuture extends GridCacheCompoundIdentit
     /** */
     private static final long serialVersionUID = 0L;
 
+    /** All owners left grid message. */
+    public static final String ALL_PARTITION_OWNERS_LEFT_GRID_MSG =
+        "Failed to commit a transaction (all partition owners have left the grid, partition data has been lost)";
+
     /** Tracing span. */
     private Span span;
 
@@ -979,6 +989,19 @@ public GridDistributedTxMapping mapping() {
 
         /** {@inheritDoc} */
         @Override boolean onNodeLeft(UUID nodeId, boolean discoThread) {
+            if (tx.state() == COMMITTING || tx.state() == COMMITTED) {
+                if (concat(of(m.primary().id()), tx.transactionNodes().getOrDefault(nodeId, emptySet()).stream())
+                    .noneMatch(uuid -> cctx.discovery().alive(uuid))) {
+                    onDone(new CacheInvalidStateException(ALL_PARTITION_OWNERS_LEFT_GRID_MSG +
+                        m.entries().stream().map(e -> " [cacheName=" + e.cached().context().name() +
+                            ", partition=" + e.key().partition() +
+                            (S.includeSensitive() ? ", key=" + e.key() : "") +
+                            "]").findFirst().orElse("")));
+
+                    return true;
+                }
+            }
+
             if (nodeId.equals(m.primary().id())) {
                 if (msgLog.isDebugEnabled()) {
                     msgLog.debug("Near finish fut, mini future node left [txId=" + tx.nearXidVersion() +
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java
index cd28022f788d4..db64131315861 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheGroupsTest.java
@@ -99,6 +99,7 @@
 import org.apache.ignite.testframework.GridTestUtils.SF;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionRollbackException;
 import org.jetbrains.annotations.Nullable;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -4106,10 +4107,14 @@ public void testRestartsAndCacheCreateDestroy() throws Exception {
                                             cacheOperation(rnd, cache);
                                     }
                                     catch (Exception e) {
-                                        if (X.hasCause(e, CacheStoppedException.class)) {
+                                        if (X.hasCause(e, CacheStoppedException.class) ||
+                                            (X.hasCause(e, CacheInvalidStateException.class) &&
+                                                X.hasCause(e, TransactionRollbackException.class))
+                                        ) {
                                             // Cache operation can be blocked on
                                             // awaiting new topology version and cancelled with CacheStoppedException cause.
-
+                                            // Cache operation can failed
+                                            // if a node was stopped during transaction.
                                             continue;
                                         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxExceptionNodeFailTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxExceptionNodeFailTest.java
new file mode 100644
index 0000000000000..f16c6eab561e2
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxExceptionNodeFailTest.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.util.Arrays;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.apache.ignite.ShutdownPolicy;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgnitionEx;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.cache.CacheInvalidStateException;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionHeuristicException;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.springframework.util.Assert;
+
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.PRIMARY_SYNC;
+import static org.apache.ignite.internal.TestRecordingCommunicationSpi.spi;
+import static org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishFuture.ALL_PARTITION_OWNERS_LEFT_GRID_MSG;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccEnabled;
+
+/**
+ * Tests check a result of commit when a node fail before
+ * send {@link GridNearTxFinishResponse} to transaction coodinator
+ */
+@RunWith(Parameterized.class)
+public class IgniteTxExceptionNodeFailTest extends GridCommonAbstractTest {
+    /** Parameters. */
+    @Parameterized.Parameters(name = "syncMode={0}")
+    public static Iterable data() {
+        return Arrays.asList(new Object[][] {
+            { PRIMARY_SYNC },
+            { FULL_SYNC },
+        });
+    }
+
+    /** syncMode */
+    @Parameterized.Parameter()
+    public CacheWriteSynchronizationMode syncMode;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        DataStorageConfiguration dsConfig = new DataStorageConfiguration()
+            .setDefaultDataRegionConfiguration(new DataRegionConfiguration().setMaxSize(100L * 1024 * 1024)
+                .setPersistenceEnabled(true));
+
+        cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
+
+        return cfg
+            .setDataStorageConfiguration(dsConfig)
+            .setCacheConfiguration(new CacheConfiguration("cache")
+                .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
+                .setWriteSynchronizationMode(syncMode).setBackups(0));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        stopAllGrids();
+    }
+
+    /**
+     * 
    + *
  • Start 2 nodes with transactional cache, without backups, with {@link IgniteTxExceptionNodeFailTest#syncMode} + *
  • Start transaction: + *
      + *
    • put a key to a partition on transaction coordinator + *
    • put a key to a partition on other node + *
    • try to commit the transaction + *
    + *
  • Stop other node when it try to send GridNearTxFinishResponse + *
  • Check that {@link Transaction#commit()} throw {@link TransactionHeuristicException} + *
+ * + * @throws Exception If failed + */ + @Test + public void testNodeFailBeforeSendGridNearTxFinishResponse() throws Exception { + startGrids(2); + + grid(0).cluster().active(true); + + IgniteEx grid0 = grid(0); + IgniteEx grid1 = grid(1); + + int key0 = 0; + int key1 = 0; + + Affinity aff = grid1.affinity("cache"); + + for (int i = 1; i < 1000; i++) { + if (grid0.equals(grid(aff.mapKeyToNode(i)))) { + key0 = i; + + break; + } + } + + for (int i = key0; i < 1000; i++) { + if (grid1.equals(grid(aff.mapKeyToNode(i))) && !aff.mapKeyToNode(key0).equals(aff.mapKeyToNode(i))) { + key1 = i; + + break; + } + } + + assert !aff.mapKeyToNode(key0).equals(aff.mapKeyToNode(key1)); + + try (Transaction tx = grid1.transactions().txStart()) { + grid1.cache("cache").put(key0, 100); + grid1.cache("cache").put(key1, 200); + + spi(grid0).blockMessages((node, msg) -> { + if (msg instanceof GridNearTxFinishResponse) { + new Thread( + new Runnable() { + @Override public void run() { + log().info("Stopping node: [" + grid0.name() + "]"); + + IgnitionEx.stop(grid0.name(), true, ShutdownPolicy.IMMEDIATE, false); + } + }, + "node-stopper" + ).start(); + + return true; + } + + return false; + } + ); + + boolean passed = false; + + try { + tx.commit(); + } + catch (Throwable e) { + String msg = e.getMessage(); + + Assert.isTrue(e.getCause() instanceof CacheInvalidStateException); + + Assert.isTrue(msg.contains(ALL_PARTITION_OWNERS_LEFT_GRID_MSG)); + + if (!mvccEnabled(grid1.context())) { + Pattern msgPtrn = Pattern.compile(" \\[cacheName=cache, partition=\\d+, " + "key=KeyCacheObjectImpl \\[part=\\d+, val=" + key0 + + ", hasValBytes=true\\]\\]"); + + Matcher matcher = msgPtrn.matcher(msg); + + Assert.isTrue(matcher.find()); + } + + passed = true; + } + + Assert.isTrue(passed); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java index ebe0b5a2b6dd0..1e4f59cdcbdbd 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite3.java @@ -52,6 +52,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearPartitionedP2PDisabledByteArrayValuesSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearPartitionedP2PEnabledByteArrayValuesSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCachePutArrayValueSelfTest; +import org.apache.ignite.internal.processors.cache.distributed.near.IgniteTxExceptionNodeFailTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteTxReentryNearSelfTest; import org.apache.ignite.internal.processors.cache.distributed.replicated.GridCacheDaemonNodeReplicatedSelfTest; import org.apache.ignite.internal.processors.cache.distributed.replicated.GridCacheReplicatedAtomicGetAndTransformStoreSelfTest; @@ -152,6 +153,7 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, IgniteTxReentryNearSelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteTxReentryColocatedSelfTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgniteTxExceptionNodeFailTest.class, ignoredTests); // Test for byte array value special case. GridTestUtils.addTestIfNeeded(suite, GridCacheLocalByteArrayValuesSelfTest.class, ignoredTests); From ff55943167bfde8096b608b223e42d81108d7dfb Mon Sep 17 00:00:00 2001 From: sergeyuttsel Date: Mon, 9 Nov 2020 23:11:29 +0300 Subject: [PATCH 032/110] IGNITE-13678 Extended test coverage for persistence files directory. Fixes #8429 Signed-off-by: Slava Koptilin --- ...tartupWithUndefinedIgniteHomeSelfTest.java | 14 +- .../util/IgniteUtilsWorkDirectoryTest.java | 257 ++++++++++++++++++ .../testsuites/IgniteKernalSelfTestSuite.java | 2 + 3 files changed, 266 insertions(+), 7 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/util/IgniteUtilsWorkDirectoryTest.java diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java index 75711a6cefb9c..d09d2725b41f3 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/GridStartupWithUndefinedIgniteHomeSelfTest.java @@ -34,13 +34,13 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_HOME; import static org.apache.ignite.internal.util.IgniteUtils.nullifyHomeDirectory; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; /** * Checks that node can be started without operations with undefined IGNITE_HOME. *

- * Notes: - * 1. The test is intentionally made independent from {@link GridCommonAbstractTest} stuff. - * 2. Do not replace native Java asserts with JUnit ones - test won't fall on TeamCity. + * The test is intentionally made independent from {@link GridCommonAbstractTest} stuff. */ public class GridStartupWithUndefinedIgniteHomeSelfTest { /** */ @@ -67,13 +67,13 @@ public void testStartStopWithUndefinedIgniteHome() { // it will initialize cached value which is forbidden to override. String igniteHome = IgniteSystemProperties.getString(IGNITE_HOME); - assert igniteHome != null; + assertNotNull(igniteHome); U.setIgniteHome(null); String igniteHome0 = U.getIgniteHome(); - assert igniteHome0 == null; + assertNull(igniteHome0); IgniteLogger log = new JavaLogger(); @@ -93,11 +93,11 @@ public void testStartStopWithUndefinedIgniteHome() { cfg.setConnectorConfiguration(null); try (Ignite ignite = G.start(cfg)) { - assert ignite != null; + assertNotNull(ignite); igniteHome0 = U.getIgniteHome(); - assert igniteHome0 == null; + assertNull(igniteHome0); X.println("Stopping grid " + ignite.cluster().localNode().id()); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteUtilsWorkDirectoryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteUtilsWorkDirectoryTest.java new file mode 100644 index 0000000000000..03092b4200bb0 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteUtilsWorkDirectoryTest.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.InputStreamReader; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static java.lang.String.join; +import static java.lang.System.clearProperty; +import static java.lang.System.getProperty; +import static java.lang.System.setProperty; +import static org.apache.ignite.internal.util.IgniteUtils.workDirectory; +import static org.apache.ignite.internal.util.typedef.internal.U.getIgniteHome; +import static org.apache.ignite.internal.util.typedef.internal.U.nullifyHomeDirectory; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** */ +public class IgniteUtilsWorkDirectoryTest { + /** */ + private static final String USER_WORK_DIR = join(File.separator, + getIgniteHome(), "userWorkDirTest"); + + /** */ + private static final String USER_IGNITE_HOME = join(File.separator, + getIgniteHome(), "userIgniteHomeTest"); + + /** */ + private static final String USER_DIR_PROPERTY_VALUE = join(File.separator, + new File(getIgniteHome()).getParent(), "userDirPropertyTest"); + + /** */ + private static String dfltIgniteHome; + + /** */ + private static String dfltUserDir; + + /** */ + @Before + public void setup() { + dfltIgniteHome = getProperty(IgniteSystemProperties.IGNITE_HOME); + dfltUserDir = getProperty("user.dir"); + clearProperty(IgniteSystemProperties.IGNITE_HOME); + clearProperty("user.dir"); + } + + /** */ + @After + public void tearDown() { + if (dfltIgniteHome != null) + setProperty(IgniteSystemProperties.IGNITE_HOME, dfltIgniteHome); + if (dfltUserDir != null) + setProperty("user.dir", dfltUserDir); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory1() { + executeGenericTest(true, false, false, + USER_WORK_DIR); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory2() { + executeGenericTest(true, false, true, + USER_WORK_DIR); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory3() { + executeGenericTest(true, true, false, + USER_WORK_DIR); + } + + /** + * The work directory specified by the user has the highest priority + */ + @Test + public void testWorkDirectory4() { + executeGenericTest(true, true, true, + USER_WORK_DIR); + } + + /** + * The method set/clear "user.dir" system property and invoke + * {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + * with ignite work directory and ignite home directory provided by user + * + * @param userWorkDirFlag need or not to pass userWorkDir to {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + * @param userIgniteHomeFlag need or not to pass userIgniteHome to {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + * @param userDirPropFlag need to set or clear "user.dir" system property + * @param expWorkDir expected Ignite work directory that will be returned by {@link IgniteUtils#workDirectory(java.lang.String, java.lang.String)} + */ + private void executeGenericTest(boolean userWorkDirFlag, boolean userIgniteHomeFlag, + boolean userDirPropFlag, String expWorkDir) { + if (userDirPropFlag) + setProperty("user.dir", USER_DIR_PROPERTY_VALUE); + else + clearProperty("user.dir"); + + String userWorkDir = ""; + if (userWorkDirFlag) + userWorkDir = USER_WORK_DIR; + + nullifyHomeDirectory(); + clearProperty(IgniteSystemProperties.IGNITE_HOME); + String userIgniteHome = ""; + if (userIgniteHomeFlag) + userIgniteHome = USER_IGNITE_HOME; + + String actualWorkDir = null; + try { + actualWorkDir = workDirectory(userWorkDir, userIgniteHome); + } + catch (Throwable e) { + fail(); + } + + assertEquals(expWorkDir, actualWorkDir); + + } + + /** */ + @Test + public void testNonAbsolutePathWorkDir() { + genericPathExceptionTest("nonAbsolutePathTestDirectory", + "Work directory path must be absolute: nonAbsolutePathTestDirectory"); + } + + /** + * This test only makes sense on Linux platform. + */ + @Test + public void testDisabledWriteToWorkDir() { + String strDir = join(File.separator, USER_WORK_DIR, "CannotWriteTestDirectory"); + File dir = new File(strDir); + + if (dir.exists()) { + resetPermission(strDir); + boolean deleted = U.delete(dir); + assertTrue("cannot delete file", deleted); + } + + dir.mkdirs(); + + try { + executeCommand("chmod 444 " + strDir); + executeCommand("chattr +i " + strDir); + + genericPathExceptionTest(strDir, "Cannot write to work directory: " + strDir); + } + finally { + resetPermission(strDir); + } + } + + /** + * This test only makes sense on Linux platform. + */ + @Test + public void testDisabledWorkDirCreation() { + String strDirParent = join(File.separator, USER_WORK_DIR, "CannotWriteTestDirectory"); + File dirParent = new File(strDirParent); + + if (dirParent.exists()) { + resetPermission(strDirParent); + boolean deleted = U.delete(dirParent); + assertTrue("cannot delete file", deleted); + } + dirParent.mkdirs(); + + try { + executeCommand("chmod 444 " + strDirParent); + executeCommand("chattr +i " + strDirParent); + + String strDir = join(File.separator, strDirParent, "newDirectory"); + + genericPathExceptionTest(strDir, "Work directory does not exist and cannot be created: " + strDir); + } + finally { + resetPermission(strDirParent); + } + } + + /** */ + private static void resetPermission(String dir) { + executeCommand("chattr -i " + dir); + executeCommand("chmod 777 " + dir); + } + + /** */ + private static void executeCommand(String cmd) { + X.println("Command to execute: " + cmd); + + try { + Process proc = Runtime.getRuntime().exec(cmd); + + BufferedReader stdInput = new BufferedReader(new + InputStreamReader(proc.getInputStream())); + BufferedReader stdError = new BufferedReader(new + InputStreamReader(proc.getErrorStream())); + + String s; + + while ((s = stdInput.readLine()) != null) + X.println("stdInput: " + s); + while ((s = stdError.readLine()) != null) + X.println("stdError:" + s); + } + catch (Exception e) { + fail(); + } + } + + /** */ + private void genericPathExceptionTest(String userWorkDir, String expMsg) { + assertThrows(null, + () -> workDirectory(userWorkDir, null), + IgniteCheckedException.class, + expMsg + ); + } + +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java index 4c388ff846800..432ee923bce19 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java @@ -58,6 +58,7 @@ import org.apache.ignite.internal.processors.cluster.GridUpdateNotifierSelfTest; import org.apache.ignite.internal.processors.port.GridPortProcessorSelfTest; import org.apache.ignite.internal.util.GridStartupWithUndefinedIgniteHomeSelfTest; +import org.apache.ignite.internal.util.IgniteUtilsWorkDirectoryTest; import org.apache.ignite.spi.communication.GridCacheMessageSelfTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -91,6 +92,7 @@ GridPortProcessorSelfTest.class, GridHomePathSelfTest.class, GridStartupWithUndefinedIgniteHomeSelfTest.class, + IgniteUtilsWorkDirectoryTest.class, GridVersionSelfTest.class, GridListenActorSelfTest.class, GridNodeLocalSelfTest.class, From 2a25ad92b1d9dc11f77834766f654002803938ba Mon Sep 17 00:00:00 2001 From: Semyon Danilov Date: Tue, 10 Nov 2020 14:32:30 +0300 Subject: [PATCH 033/110] IGNITE-13683 Support of MVCC-enabled caches added to index validation feature. - Fixes #8432. Signed-off-by: Sergey Chugunov --- .../internal/processors/query/h2/H2Utils.java | 2 +- .../visor/verify/ValidateIndexesClosure.java | 265 ++++++++++++------ .../database/RebuildIndexWithMVCCTest.java | 39 +++ .../IgnitePdsWithIndexingTestSuite.java | 2 + 4 files changed, 215 insertions(+), 93 deletions(-) create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/RebuildIndexWithMVCCTest.java diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java index fcd27ce653ad2..233b27215721a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java @@ -114,7 +114,7 @@ */ public class H2Utils { /** Query context H2 variable name. */ - static final String QCTX_VARIABLE_NAME = "_IGNITE_QUERY_CONTEXT"; + public static final String QCTX_VARIABLE_NAME = "_IGNITE_QUERY_CONTEXT"; /** * The default precision for a char/varchar value. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java b/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java index 139468715c835..d0107027f729f 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/visor/verify/ValidateIndexesClosure.java @@ -48,6 +48,9 @@ import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.PartitionUpdateCounter; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker; +import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; +import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; @@ -58,11 +61,14 @@ import org.apache.ignite.internal.processors.query.GridQueryProcessor; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.QueryTypeDescriptorImpl; +import org.apache.ignite.internal.processors.query.h2.ConnectionManager; +import org.apache.ignite.internal.processors.query.h2.H2Utils; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndexBase; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.h2.opt.H2CacheRow; +import org.apache.ignite.internal.processors.query.h2.opt.QueryContext; import org.apache.ignite.internal.util.lang.GridIterator; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; @@ -76,6 +82,7 @@ import org.h2.engine.Session; import org.h2.index.Cursor; import org.h2.index.Index; +import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.jetbrains.annotations.Nullable; @@ -519,141 +526,215 @@ private Map processPartition( PartitionUpdateCounter updateCntrBefore = updCntr == null ? null : updCntr.copy(); - GridIterator it = grpCtx.offheap().partitionIterator(part.id()); - partRes = new ValidateIndexesPartitionResult(); - boolean enoughIssues = false; + boolean hasMvcc = grpCtx.caches().stream().anyMatch(GridCacheContext::mvccEnabled); - GridQueryProcessor qryProcessor = ignite.context().query(); + if (hasMvcc) { + for (GridCacheContext context : grpCtx.caches()) { + try (Session session = mvccSession(context)) { + MvccSnapshot mvccSnapshot = null; - final boolean skipConditions = checkFirst > 0 || checkThrough > 0; - final boolean bothSkipConditions = checkFirst > 0 && checkThrough > 0; + boolean mvccEnabled = context.mvccEnabled(); - long current = 0; - long processedNumber = 0; + if (mvccEnabled) + mvccSnapshot = ((QueryContext) session.getVariable(H2Utils.QCTX_VARIABLE_NAME).getObject()).mvccSnapshot(); - while (it.hasNextX()) { - if (enoughIssues) - break; + GridIterator iterator = grpCtx.offheap().cachePartitionIterator( + context.cacheId(), + part.id(), + mvccSnapshot, + null + ); - CacheDataRow row = it.nextX(); + processPartIterator(grpCtx, partRes, session, iterator); + } + } + } + else + processPartIterator(grpCtx, partRes, null, grpCtx.offheap().partitionIterator(part.id())); - if (skipConditions) { - if (bothSkipConditions) { - if (processedNumber > checkFirst) + PartitionUpdateCounter updateCntrAfter = part.dataStore().partUpdateCounter(); + + if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) { + throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + + ", grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "] changed during index validation " + + "[before=" + updateCntrBefore + ", after=" + updateCntrAfter + "]"); + } + } + catch (IgniteCheckedException e) { + error(log, "Failed to process partition [grpId=" + grpCtx.groupId() + + ", partId=" + part.id() + "]", e); + + return emptyMap(); + } + finally { + part.release(); + + printProgressOfIndexValidationIfNeeded(); + } + + PartitionKey partKey = new PartitionKey(grpCtx.groupId(), part.id(), grpCtx.cacheOrGroupName()); + + processedPartitions.incrementAndGet(); + + return Collections.singletonMap(partKey, partRes); + } + + /** + * Process partition iterator. + * + * @param grpCtx Cache group context. + * @param partRes Result object. + * @param session H2 session. + * @param it Partition iterator. + * @throws IgniteCheckedException + */ + private void processPartIterator( + CacheGroupContext grpCtx, + ValidateIndexesPartitionResult partRes, + Session session, + GridIterator it + ) throws IgniteCheckedException { + boolean enoughIssues = false; + + GridQueryProcessor qryProcessor = ignite.context().query(); + + final boolean skipConditions = checkFirst > 0 || checkThrough > 0; + final boolean bothSkipConditions = checkFirst > 0 && checkThrough > 0; + + long current = 0; + long processedNumber = 0; + + while (it.hasNextX()) { + if (enoughIssues) + break; + + CacheDataRow row = it.nextX(); + + if (skipConditions) { + if (bothSkipConditions) { + if (processedNumber > checkFirst) + break; + else if (current++ % checkThrough > 0) + continue; + else + processedNumber++; + } else { + if (checkFirst > 0) { + if (current++ > checkFirst) break; - else if (current++ % checkThrough > 0) + } else { + if (current++ % checkThrough > 0) continue; - else - processedNumber++; - } - else { - if (checkFirst > 0) { - if (current++ > checkFirst) - break; - } - else { - if (current++ % checkThrough > 0) - continue; - } } } + } - int cacheId = row.cacheId() == 0 ? grpCtx.groupId() : row.cacheId(); + int cacheId = row.cacheId() == 0 ? grpCtx.groupId() : row.cacheId(); - GridCacheContext cacheCtx = row.cacheId() == 0 ? - grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(row.cacheId()); + GridCacheContext cacheCtx = row.cacheId() == 0 ? + grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(row.cacheId()); - if (cacheCtx == null) - throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId); + if (cacheCtx == null) + throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId); - if (row.link() == 0L) { - String errMsg = "Invalid partition row, possibly deleted"; + if (row.link() == 0L) { + String errMsg = "Invalid partition row, possibly deleted"; - log.error(errMsg); + log.error(errMsg); - IndexValidationIssue is = new IndexValidationIssue(null, cacheCtx.name(), null, - new IgniteCheckedException(errMsg)); + IndexValidationIssue is = new IndexValidationIssue(null, cacheCtx.name(), null, + new IgniteCheckedException(errMsg)); - enoughIssues |= partRes.reportIssue(is); + enoughIssues |= partRes.reportIssue(is); - continue; - } + continue; + } - QueryTypeDescriptorImpl res = qryProcessor.typeByValue( - cacheCtx.name(), - cacheCtx.cacheObjectContext(), - row.key(), - row.value(), - true - ); + QueryTypeDescriptorImpl res = qryProcessor.typeByValue( + cacheCtx.name(), + cacheCtx.cacheObjectContext(), + row.key(), + row.value(), + true + ); - if (res == null) - continue; // Tolerate - (k, v) is just not indexed. + if (res == null) + continue; // Tolerate - (k, v) is just not indexed. - IgniteH2Indexing indexing = (IgniteH2Indexing)qryProcessor.getIndexing(); + IgniteH2Indexing indexing = (IgniteH2Indexing) qryProcessor.getIndexing(); - GridH2Table gridH2Tbl = indexing.schemaManager().dataTable(cacheCtx.name(), res.tableName()); + GridH2Table gridH2Tbl = indexing.schemaManager().dataTable(cacheCtx.name(), res.tableName()); - if (gridH2Tbl == null) - continue; // Tolerate - (k, v) is just not indexed. + if (gridH2Tbl == null) + continue; // Tolerate - (k, v) is just not indexed. - GridH2RowDescriptor gridH2RowDesc = gridH2Tbl.rowDescriptor(); + GridH2RowDescriptor gridH2RowDesc = gridH2Tbl.rowDescriptor(); - H2CacheRow h2Row = gridH2RowDesc.createRow(row); + H2CacheRow h2Row = gridH2RowDesc.createRow(row); - ArrayList indexes = gridH2Tbl.getIndexes(); + ArrayList indexes = gridH2Tbl.getIndexes(); - for (Index idx : indexes) { - if (!(idx instanceof H2TreeIndexBase)) - continue; + for (Index idx : indexes) { + if (!(idx instanceof H2TreeIndexBase)) + continue; - try { - Cursor cursor = idx.find((Session)null, h2Row, h2Row); + try { + Cursor cursor = idx.find(session, h2Row, h2Row); - if (cursor == null || !cursor.next()) - throw new IgniteCheckedException("Key is present in CacheDataTree, but can't be found in SQL index."); - } - catch (Throwable t) { - Object o = CacheObjectUtils.unwrapBinaryIfNeeded( + if (cursor == null || !cursor.next()) + throw new IgniteCheckedException("Key is present in CacheDataTree, but can't be found in SQL index."); + } catch (Throwable t) { + Object o = CacheObjectUtils.unwrapBinaryIfNeeded( grpCtx.cacheObjectContext(), row.key(), true, true); - IndexValidationIssue is = new IndexValidationIssue( + IndexValidationIssue is = new IndexValidationIssue( o.toString(), cacheCtx.name(), idx.getName(), t); - log.error("Failed to lookup key: " + is.toString(), t); + log.error("Failed to lookup key: " + is.toString(), t); - enoughIssues |= partRes.reportIssue(is); - } + enoughIssues |= partRes.reportIssue(is); } } + } + } - PartitionUpdateCounter updateCntrAfter = part.dataStore().partUpdateCounter(); + /** + * Get session with MVCC snapshot and QueryContext. + * + * @param cctx Cache context. + * @return Session with QueryContext and MVCC snapshot. + * @throws IgniteCheckedException If failed. + */ + private Session mvccSession(GridCacheContext cctx) throws IgniteCheckedException { + Session session = null; - if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) { - throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + - ", grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "] changed during index validation " + - "[before=" + updateCntrBefore + ", after=" + updateCntrAfter + "]"); - } - } - catch (IgniteCheckedException e) { - error(log, "Failed to process partition [grpId=" + grpCtx.groupId() + - ", partId=" + part.id() + "]", e); + boolean mvccEnabled = cctx.mvccEnabled(); - return emptyMap(); - } - finally { - part.release(); + if (mvccEnabled) { + ConnectionManager connMgr = ((IgniteH2Indexing) ignite.context().query().getIndexing()).connections(); - printProgressOfIndexValidationIfNeeded(); - } + JdbcConnection connection = (JdbcConnection) connMgr.connection().connection(); - PartitionKey partKey = new PartitionKey(grpCtx.groupId(), part.id(), grpCtx.cacheOrGroupName()); + session = (Session) connection.getSession(); - processedPartitions.incrementAndGet(); + MvccQueryTracker tracker = MvccUtils.mvccTracker(cctx, true); - return Collections.singletonMap(partKey, partRes); + MvccSnapshot mvccSnapshot = tracker.snapshot(); + + final QueryContext qctx = new QueryContext( + 0, + cacheName -> null, + null, + mvccSnapshot, + null, + true + ); + + session.setVariable(H2Utils.QCTX_VARIABLE_NAME, new H2Utils.ValueRuntimeSimpleObject<>(qctx)); + } + return session; } /** @@ -713,8 +794,8 @@ private Map processIndex( Cursor cursor = null; - try { - cursor = idx.find((Session)null, null, null); + try (Session session = mvccSession(cacheCtxWithIdx.get1())) { + cursor = idx.find(session, null, null); if (cursor == null) throw new IgniteCheckedException("Can't iterate through index: " + idx); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/RebuildIndexWithMVCCTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/RebuildIndexWithMVCCTest.java new file mode 100644 index 0000000000000..f5c2440acd98c --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/RebuildIndexWithMVCCTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.database; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; + +/** + * Test index rebuild with MVCC enabled. + */ +public class RebuildIndexWithMVCCTest extends RebuildIndexTest { + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + final IgniteConfiguration configuration = super.getConfiguration(gridName); + + for (CacheConfiguration cacheConfiguration : configuration.getCacheConfiguration()) + cacheConfiguration.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT); + + return configuration; + } + +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java index 1074af312ed60..f4a0ac95e7a97 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java @@ -34,6 +34,7 @@ import org.apache.ignite.internal.processors.database.IgniteTwoRegionsRebuildIndexTest; import org.apache.ignite.internal.processors.database.RebuildIndexTest; import org.apache.ignite.internal.processors.database.RebuildIndexWithHistoricalRebalanceTest; +import org.apache.ignite.internal.processors.database.RebuildIndexWithMVCCTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -53,6 +54,7 @@ IndexingMultithreadedLoadContinuousRestartTest.class, LongDestroyDurableBackgroundTaskTest.class, RebuildIndexTest.class, + RebuildIndexWithMVCCTest.class, IgniteClusterSnapshotWithIndexesTest.class, ClientReconnectWithSqlTableConfiguredTest.class, MultipleParallelCacheDeleteDeadlockTest.class, From 071726f82e683d8f2cc402dda6cb3d9797697673 Mon Sep 17 00:00:00 2001 From: zstan Date: Tue, 10 Nov 2020 17:35:18 +0300 Subject: [PATCH 034/110] IGNITE-13686 Data structures smoke test with page compression - Fixes #8437. Signed-off-by: Ilya Kasnacheev --- .../internal/processors/cache/CacheCompressionManager.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java index a3900c3bfa349..3bf2b8bbb48b8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheCompressionManager.java @@ -25,6 +25,7 @@ import org.apache.ignite.configuration.DiskPageCompression; import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.compress.CompressionProcessor; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import static org.apache.ignite.internal.processors.compress.CompressionProcessor.checkCompressionLevelBounds; @@ -45,7 +46,9 @@ public class CacheCompressionManager extends GridCacheManagerAdapter { /** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { - if (cctx.kernalContext().clientNode()) { + CacheConfiguration cfg = cctx.config(); + + if (cctx.kernalContext().clientNode() || !CU.isPersistentCache(cfg, cctx.gridConfig().getDataStorageConfiguration())) { diskPageCompression = DiskPageCompression.DISABLED; return; @@ -53,8 +56,6 @@ public class CacheCompressionManager extends GridCacheManagerAdapter { compressProc = cctx.kernalContext().compress(); - CacheConfiguration cfg = cctx.config(); - diskPageCompression = cctx.kernalContext().config().isClientMode() ? null : cfg.getDiskPageCompression(); if (diskPageCompression != DiskPageCompression.DISABLED) { From 1734bc10ab9cd968d96ca2590af3d959ae7a835e Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 10 Nov 2020 23:33:14 +0300 Subject: [PATCH 035/110] IGNITE-12126 ODBC SQLNumResultCols() works after SQLPrepare() --- .../odbc/odbc/OdbcMessageParser.java | 42 ++++-- .../odbc/OdbcQueryGetParamsMetaRequest.java | 34 +---- .../odbc/OdbcQueryGetQueryMetaRequest.java | 62 +++++++++ .../OdbcQueryGetResultsetMetaRequest.java | 31 +++++ .../odbc/OdbcQueryGetResultsetMetaResult.java | 42 ++++++ .../processors/odbc/odbc/OdbcRequest.java | 3 + .../odbc/odbc/OdbcRequestHandler.java | 39 +++++- .../processors/odbc/odbc/OdbcResultSet.java | 37 ++--- .../processors/odbc/odbc/OdbcUtils.java | 24 ++++ .../cpp/odbc-test/include/odbc_test_suite.h | 8 ++ .../cpp/odbc-test/src/meta_queries_test.cpp | 128 ++++++++++++++++++ .../cpp/odbc-test/src/odbc_test_suite.cpp | 9 +- .../odbc/diagnostic/diagnosable_adapter.h | 9 +- .../cpp/odbc/include/ignite/odbc/message.h | 73 +++++++++- .../include/ignite/odbc/query/batch_query.h | 4 +- .../ignite/odbc/query/column_metadata_query.h | 6 +- .../include/ignite/odbc/query/data_query.h | 36 ++++- .../ignite/odbc/query/foreign_keys_query.h | 4 +- .../ignite/odbc/query/internal_query.h | 10 +- .../ignite/odbc/query/primary_keys_query.h | 4 +- .../odbc/include/ignite/odbc/query/query.h | 8 +- .../ignite/odbc/query/special_columns_query.h | 4 +- .../ignite/odbc/query/streaming_query.h | 4 +- .../ignite/odbc/query/table_metadata_query.h | 4 +- .../ignite/odbc/query/type_info_query.h | 4 +- .../cpp/odbc/include/ignite/odbc/statement.h | 2 +- modules/platforms/cpp/odbc/src/connection.cpp | 9 +- .../src/diagnostic/diagnosable_adapter.cpp | 5 + modules/platforms/cpp/odbc/src/message.cpp | 35 +++++ .../cpp/odbc/src/query/batch_query.cpp | 8 +- .../odbc/src/query/column_metadata_query.cpp | 8 +- .../cpp/odbc/src/query/data_query.cpp | 100 +++++++++++--- .../cpp/odbc/src/query/foreign_keys_query.cpp | 6 +- .../cpp/odbc/src/query/primary_keys_query.cpp | 6 +- .../odbc/src/query/special_columns_query.cpp | 6 +- .../cpp/odbc/src/query/streaming_query.cpp | 8 +- .../odbc/src/query/table_metadata_query.cpp | 8 +- .../cpp/odbc/src/query/type_info_query.cpp | 6 +- modules/platforms/cpp/odbc/src/statement.cpp | 29 ++-- .../odbc/src/streaming/streaming_context.cpp | 2 +- 40 files changed, 687 insertions(+), 180 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetQueryMetaRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaResult.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java index c9b779f237bc3..33f1c4b464d1b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java @@ -211,6 +211,15 @@ public OdbcMessageParser(GridKernalContext ctx, ClientListenerProtocolVersion ve break; } + case OdbcRequest.META_RESULTSET: { + String schema = reader.readString(); + String sqlQuery = reader.readString(); + + res = new OdbcQueryGetResultsetMetaRequest(schema, sqlQuery); + + break; + } + case OdbcRequest.MORE_RESULTS: { long queryId = reader.readLong(); int pageSize = reader.readInt(); @@ -281,12 +290,7 @@ else if (res0 instanceof OdbcQueryExecuteResult) { Collection metas = res.columnsMetadata(); - assert metas != null; - - writer.writeInt(metas.size()); - - for (OdbcColumnMeta meta : metas) - meta.write(writer); + writeResultsetMeta(writer, metas); writeAffectedRows(writer, res.affectedRows()); } @@ -378,12 +382,7 @@ else if (res0 instanceof OdbcQueryGetColumnsMetaResult) { Collection columnsMeta = res.meta(); - assert columnsMeta != null; - - writer.writeInt(columnsMeta.size()); - - for (OdbcColumnMeta columnMeta : columnsMeta) - columnMeta.write(writer); + writeResultsetMeta(writer, columnsMeta); } else if (res0 instanceof OdbcQueryGetTablesMetaResult) { OdbcQueryGetTablesMetaResult res = (OdbcQueryGetTablesMetaResult) res0; @@ -404,12 +403,31 @@ else if (res0 instanceof OdbcQueryGetParamsMetaResult) { SqlListenerUtils.writeObject(writer, typeIds, true); } + else if (res0 instanceof OdbcQueryGetResultsetMetaResult) { + OdbcQueryGetResultsetMetaResult res = (OdbcQueryGetResultsetMetaResult) res0; + + writeResultsetMeta(writer, res.columnsMetadata()); + } else assert false : "Should not reach here."; return new ClientMessage(writer.array()); } + /** + * Write resultset columns metadata in a unified way. + * @param writer Writer. + * @param meta Metadata + */ + private static void writeResultsetMeta(BinaryWriterExImpl writer, Collection meta) { + assert meta != null; + + writer.writeInt(meta.size()); + + for (OdbcColumnMeta columnMeta : meta) + columnMeta.write(writer); + } + /** {@inheritDoc} */ @Override public int decodeCommandType(ClientMessage msg) { assert msg != null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java index 0ae29161b3846..072b4fc51c120 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetParamsMetaRequest.java @@ -17,45 +17,15 @@ package org.apache.ignite.internal.processors.odbc.odbc; -import org.apache.ignite.internal.util.typedef.internal.S; - /** * ODBC query get params meta request. */ -public class OdbcQueryGetParamsMetaRequest extends OdbcRequest { - /** Schema. */ - private final String schema; - - /** Query. */ - private final String query; - +public class OdbcQueryGetParamsMetaRequest extends OdbcQueryGetQueryMetaRequest { /** * @param schema Schema. * @param query SQL Query. */ public OdbcQueryGetParamsMetaRequest(String schema, String query) { - super(META_PARAMS); - - this.schema = schema; - this.query = query; - } - - /** - * @return SQL Query. - */ - public String query() { - return query; - } - - /** - * @return Schema name. - */ - public String schema() { - return schema; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return S.toString(OdbcQueryGetParamsMetaRequest.class, this); + super(META_PARAMS, schema, query); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetQueryMetaRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetQueryMetaRequest.java new file mode 100644 index 0000000000000..d606b14a1ab9b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetQueryMetaRequest.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * ODBC query get query meta request. + */ +public class OdbcQueryGetQueryMetaRequest extends OdbcRequest { + /** Schema. */ + protected final String schema; + + /** Query. */ + protected final String query; + + /** + * @param cmd Command code. + * @param schema Schema. + * @param query SQL Query. + */ + public OdbcQueryGetQueryMetaRequest(byte cmd, String schema, String query) { + super(cmd); + + this.schema = schema; + this.query = query; + } + + /** + * @return SQL Query. + */ + public String query() { + return query; + } + + /** + * @return Schema name. + */ + public String schema() { + return schema; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(OdbcQueryGetQueryMetaRequest.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaRequest.java new file mode 100644 index 0000000000000..99a3263e19f3c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaRequest.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +/** + * ODBC query get params meta request. + */ +public class OdbcQueryGetResultsetMetaRequest extends OdbcQueryGetQueryMetaRequest { + /** + * @param schema Schema. + * @param query SQL Query. + */ + public OdbcQueryGetResultsetMetaRequest(String schema, String query) { + super(META_RESULTSET, schema, query); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaResult.java new file mode 100644 index 0000000000000..0cf0b24059f16 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryGetResultsetMetaResult.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import java.util.Collection; + +/** + * SQL listener query resultset metadata result. + */ +public class OdbcQueryGetResultsetMetaResult { + /** Resultset columns metadata. */ + private final Collection columnsMetadata; + + /** + * @param columnsMetadata Columns metadata. + */ + public OdbcQueryGetResultsetMetaResult(Collection columnsMetadata) { + this.columnsMetadata = columnsMetadata; + } + + /** + * @return Columns metadata. + */ + public Collection columnsMetadata() { + return columnsMetadata; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java index 5b02cfe9924b5..bc4d179d8b6c3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java @@ -50,6 +50,9 @@ public class OdbcRequest extends ClientListenerRequestNoId { /** Process ordered streaming batch. */ public static final byte STREAMING_BATCH = 10; + /** Get resultset columns meta. */ + public static final byte META_RESULTSET = 11; + /** Command. */ private final byte cmd; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java index 9bf2c12f8801a..a21caebf7e179 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java @@ -28,6 +28,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import javax.cache.configuration.Factory; + import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -48,6 +49,7 @@ import org.apache.ignite.internal.processors.odbc.ClientListenerResponseSender; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta; import org.apache.ignite.internal.processors.odbc.odbc.escape.OdbcEscapeUtils; +import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.processors.query.GridQueryProperty; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.IgniteSQLException; @@ -69,6 +71,7 @@ import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_COLS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_PARAMS; +import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_RESULTSET; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_TBLS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.MORE_RESULTS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.QRY_CLOSE; @@ -254,6 +257,9 @@ public ClientListenerResponse doHandle(OdbcRequest req) { case META_PARAMS: return getParamsMeta((OdbcQueryGetParamsMetaRequest)req); + case META_RESULTSET: + return getResultMeta((OdbcQueryGetResultsetMetaRequest)req); + case MORE_RESULTS: return moreResults((OdbcQueryMoreResultsRequest)req); } @@ -411,7 +417,7 @@ private ClientListenerResponse executeQuery(OdbcQueryExecuteRequest req) { if (set == null) fieldsMeta = new ArrayList<>(); else { - fieldsMeta = results.currentResultSet().fieldsMeta(); + fieldsMeta = set.fieldsMeta(); if (log.isDebugEnabled()) { for (OdbcColumnMeta meta : fieldsMeta) @@ -747,7 +753,8 @@ private ClientListenerResponse getTablesMeta(OdbcQueryGetTablesMetaRequest req) } /** - * {@link OdbcQueryGetParamsMetaRequest} command handler. + * {@link OdbcQueryGetQueryMetaRequest} command handler. + * Returns metadata for the parameters to be set. * * @param req Get params metadata request. * @return Response. @@ -780,6 +787,34 @@ private ClientListenerResponse getParamsMeta(OdbcQueryGetParamsMetaRequest req) } } + /** + * {@link OdbcQueryGetQueryMetaRequest} command handler. + * Returns metadata for a columns of the result set. + * + * @param req Get resultset metadata request. + * @return Response. + */ + private ClientListenerResponse getResultMeta(OdbcQueryGetResultsetMetaRequest req) { + try { + String sql = OdbcEscapeUtils.parse(req.query()); + String schema = OdbcUtils.prepareSchema(req.schema()); + + SqlFieldsQueryEx qry = makeQuery(schema, sql); + + List columns = ctx.query().getIndexing().resultMetaData(schema, qry); + Collection meta = OdbcUtils.convertMetadata(columns, ver); + + OdbcQueryGetResultsetMetaResult res = new OdbcQueryGetResultsetMetaResult(meta); + + return new OdbcResponse(res); + } + catch (Exception e) { + U.error(log, "Failed to get resultset metadata [reqId=" + req.requestId() + ", req=" + req + ']', e); + + return exceptionToResult(e); + } + } + /** * {@link OdbcQueryMoreResultsRequest} command handler. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java index b319366b7071c..1eba63df1cdc3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java @@ -24,7 +24,6 @@ import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; -import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; /** * Represents single result set. @@ -39,6 +38,9 @@ public class OdbcResultSet { /** Client version. */ private ClientListenerProtocolVersion ver; + /** Result columns metadata. */ + private Collection meta; + /** * Constructor. * @param cursor Result set cursor. @@ -50,10 +52,14 @@ public class OdbcResultSet { this.cursor = (QueryCursorImpl>)cursor; this.ver = ver; - if (this.cursor.isQuery()) + if (this.cursor.isQuery()) { iter = this.cursor.iterator(); - else + meta = OdbcUtils.convertMetadata(this.cursor.fieldsMeta(), ver); + } + else { iter = null; + meta = new ArrayList<>(); + } } /** @@ -67,10 +73,7 @@ public boolean hasUnfetchedRows() { * @return Fields metadata of the current result set. */ public Collection fieldsMeta() { - if (!cursor.isQuery()) - return new ArrayList<>(); - - return convertMetadata(cursor.fieldsMeta(), ver); + return meta; } /** @@ -89,24 +92,4 @@ public List fetch(int maxSize) { return items; } - - /** - * Convert metadata in collection from {@link GridQueryFieldMetadata} to - * {@link OdbcColumnMeta}. - * - * @param meta Internal query field metadata. - * @param ver Client version. - * @return Odbc query field metadata. - */ - private static Collection convertMetadata(Collection meta, - ClientListenerProtocolVersion ver) { - List res = new ArrayList<>(); - - if (meta != null) { - for (GridQueryFieldMetadata info : meta) - res.add(new OdbcColumnMeta(info, ver)); - } - - return res; - } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java index a687b9697997a..110f71c0dba74 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java @@ -17,14 +17,18 @@ package org.apache.ignite.internal.processors.odbc.odbc; +import java.util.ArrayList; +import java.util.Collection; import java.util.Iterator; import java.util.List; import org.apache.ignite.IgniteException; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; import org.apache.ignite.internal.processors.odbc.SqlListenerDataTypes; import org.apache.ignite.internal.processors.odbc.SqlListenerUtils; +import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.util.typedef.F; @@ -242,4 +246,24 @@ public static long rowsAffected(QueryCursor> qryCur) { return 0; } + + /** + * Convert metadata in collection from {@link GridQueryFieldMetadata} to + * {@link OdbcColumnMeta}. + * + * @param meta Internal query field metadata. + * @param ver Client version. + * @return Odbc query field metadata. + */ + public static Collection convertMetadata(Collection meta, + ClientListenerProtocolVersion ver) { + List res = new ArrayList<>(); + + if (meta != null) { + for (GridQueryFieldMetadata info : meta) + res.add(new OdbcColumnMeta(info, ver)); + } + + return res; + } } diff --git a/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h b/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h index 2381130385af0..89133a98f1b83 100644 --- a/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h +++ b/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h @@ -177,6 +177,14 @@ namespace ignite */ SQLRETURN ExecQuery(const std::string& qry); + /** + * Prepares SQL query. + * + * @param qry Query. + * @return Result. + */ + SQLRETURN PrepareQuery(const std::string& qry); + /** ODBC Environment. */ SQLHENV env; diff --git a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp index bd6b31a47b4a1..733bf078d9ed6 100644 --- a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp @@ -216,6 +216,101 @@ BOOST_AUTO_TEST_CASE(TestColAttributesColumnScale) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); } +BOOST_AUTO_TEST_CASE(TestColAttributesColumnLengthPrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + InsertTestStrings(1); + + SQLCHAR req[] = "select strField from TestType"; + SQLPrepare(stmt, req, SQL_NTS); + + SQLLEN intVal; + SQLCHAR strBuf[1024]; + SQLSMALLINT strLen; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_COLUMN_LENGTH, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, 1, SQL_COLUMN_LENGTH, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); +} + +BOOST_AUTO_TEST_CASE(TestColAttributesColumnPresicionPrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + InsertTestStrings(1); + + SQLCHAR req[] = "select strField from TestType"; + SQLPrepare(stmt, req, SQL_NTS); + + SQLLEN intVal; + SQLCHAR strBuf[1024]; + SQLSMALLINT strLen; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_COLUMN_PRECISION, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, 1, SQL_COLUMN_PRECISION, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, 60); +} + +BOOST_AUTO_TEST_CASE(TestColAttributesColumnScalePrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + InsertTestStrings(1); + + SQLCHAR req[] = "select strField from TestType"; + SQLPrepare(stmt, req, SQL_NTS); + + SQLLEN intVal; + SQLCHAR strBuf[1024]; + SQLSMALLINT strLen; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_COLUMN_SCALE, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, 1, SQL_COLUMN_SCALE, strBuf, sizeof(strBuf), &strLen, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); +} + BOOST_AUTO_TEST_CASE(TestGetDataWithGetTypeInfo) { Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); @@ -457,4 +552,37 @@ BOOST_AUTO_TEST_CASE(TestDdlColumnsMetaEscaped) BOOST_REQUIRE_EQUAL(ret, SQL_NO_DATA); } +BOOST_AUTO_TEST_CASE(TestSQLNumResultColsAfterSQLPrepare) +{ + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=PUBLIC"); + + SQLRETURN ret = ExecQuery("create table TestSqlPrepare(id int primary key, test1 varchar, test2 long, test3 varchar)"); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFreeStmt(stmt, SQL_CLOSE); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = PrepareQuery("select * from PUBLIC.TestSqlPrepare"); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + SQLSMALLINT columnCount = 0; + + ret = SQLNumResultCols(stmt, &columnCount); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(columnCount, 4); + + ret = SQLExecute(stmt); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + columnCount = 0; + + ret = SQLNumResultCols(stmt, &columnCount); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(columnCount, 4); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp index 68ea3164106e4..4ebec9f55598f 100644 --- a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp +++ b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp @@ -227,7 +227,14 @@ namespace ignite { std::vector sql = MakeQuery(qry); - return SQLExecDirect(stmt, &sql[0], static_cast(sql.size())); + return SQLExecDirect(stmt, sql.data(), static_cast(sql.size())); + } + + SQLRETURN OdbcTestSuite::PrepareQuery(const std::string& qry) + { + std::vector sql = MakeQuery(qry); + + return SQLPrepare(stmt, sql.data(), static_cast(sql.size())); } void OdbcTestSuite::InsertTestStrings(int recordsNum, bool merge) diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h index d8cfeb71dd848..255cd421bbcb7 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h @@ -103,6 +103,13 @@ namespace ignite */ virtual void AddStatusRecord(SqlState::Type sqlState, const std::string& message); + /** + * Add new status record with SqlState::SHY000_GENERAL_ERROR state. + * + * @param message Message. + */ + virtual void AddStatusRecord(const std::string& message); + /** * Add new status record. * @@ -129,4 +136,4 @@ namespace ignite } } -#endif //_IGNITE_ODBC_DIAGNOSTIC_DIAGNOSABLE_ADAPTER \ No newline at end of file +#endif //_IGNITE_ODBC_DIAGNOSTIC_DIAGNOSABLE_ADAPTER diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h index 79e78c3e25c82..aa5a2c0d01e09 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h @@ -71,7 +71,9 @@ namespace ignite QUERY_MORE_RESULTS = 9, - STREAMING_BATCH = 10 + STREAMING_BATCH = 10, + + META_RESULTSET = 11 }; }; @@ -309,6 +311,39 @@ namespace ignite std::string column; }; + /** + * Query get result set metadata request. + */ + class QueryGetResultsetMetaRequest + { + public: + /** + * Constructor. + * + * @param schema Schema. + * @param sqlQuery SQL query itself. + */ + QueryGetResultsetMetaRequest(const std::string& schema, const std::string& sqlQuery); + + /** + * Destructor. + */ + ~QueryGetResultsetMetaRequest(); + + /** + * Write request using provided writer. + * @param writer Writer. + */ + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; + + private: + /** Schema. */ + std::string schema; + + /** SQL query. */ + std::string sqlQuery; + }; + /** * Query get tables metadata request. */ @@ -885,6 +920,42 @@ namespace ignite meta::ColumnMetaVector meta; }; + /** + * Query get resultset metadata response. + */ + class QueryGetResultsetMetaResponse : public Response + { + public: + /** + * Constructor. + */ + QueryGetResultsetMetaResponse(); + + /** + * Destructor. + */ + virtual ~QueryGetResultsetMetaResponse(); + + /** + * Get column metadata. + * @return Column metadata. + */ + const meta::ColumnMetaVector& GetMeta() const + { + return meta; + } + + private: + /** + * Read response using provided reader. + * @param reader Reader. + */ + virtual void ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&); + + /** Columns metadata. */ + meta::ColumnMetaVector meta; + }; + /** * Query get table metadata response. */ diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h index af319abf398df..05603d63a1a77 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h @@ -46,7 +46,7 @@ namespace ignite * @param params SQL params. * @param timeout Timeout in seconds. */ - BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + BatchQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout); /** @@ -66,7 +66,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h index 354393e9cd6fe..fe760a9b31ca7 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h @@ -45,7 +45,7 @@ namespace ignite * @param table Table search pattern. * @param column Column search pattern. */ - ColumnMetadataQuery(diagnostic::Diagnosable& diag, + ColumnMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& schema, const std::string& table, const std::string& column); @@ -66,7 +66,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. @@ -153,4 +153,4 @@ namespace ignite } } -#endif //_IGNITE_ODBC_QUERY_COLUMN_METADATA_QUERY \ No newline at end of file +#endif //_IGNITE_ODBC_QUERY_COLUMN_METADATA_QUERY diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h index 8c630c4d2912f..ea3ef244ce4c7 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h @@ -46,7 +46,7 @@ namespace ignite * @param params SQL params. * @param timeout Timeout. */ - DataQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + DataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout); /** @@ -66,7 +66,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. @@ -133,6 +133,14 @@ namespace ignite */ bool IsClosedRemotely() const; + /** + * Make query prepare request and use response to set internal + * state. + * + * @return Result. + */ + SqlResult::Type MakeRequestPrepare(); + /** * Make query execute request and use response to set internal * state. @@ -162,6 +170,13 @@ namespace ignite */ SqlResult::Type MakeRequestMoreResults(); + /** + * Make result set metadata request. + * + * @return Result. + */ + SqlResult::Type MakeRequestResultsetMeta(); + /** * Process column conversion operation result. * @@ -171,7 +186,17 @@ namespace ignite * @return General SQL result. */ SqlResult::Type ProcessConversionResult(app::ConversionResult::Type convRes, int32_t rowIdx, - int32_t columnIdx); + int32_t columnIdx);; + + /** + * Process column conversion operation result. + * + * @param convRes Conversion result. + * @param rowIdx Row index. + * @param columnIdx Column index. + * @return General SQL result. + */ + void SetResultsetMeta(const meta::ColumnMetaVector& value); /** * Close query. @@ -189,7 +214,10 @@ namespace ignite /** Parameter bindings. */ const app::ParameterSet& params; - /** Columns metadata. */ + /** Result set metadata is available */ + bool resultMetaAvailable; + + /** Result set metadata. */ meta::ColumnMetaVector resultMeta; /** Cursor. */ diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h index 81e8093680e18..307decf89492e 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h @@ -45,7 +45,7 @@ namespace ignite * @param foreignSchema Foreign key schema name. * @param foreignTable Foreign key table name. */ - ForeignKeysQuery(diagnostic::Diagnosable& diag, Connection& connection, + ForeignKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& primaryCatalog, const std::string& primarySchema, const std::string& primaryTable, const std::string& foreignCatalog, const std::string& foreignSchema, const std::string& foreignTable); @@ -67,7 +67,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h index 450420b850fc3..d6979997e85e8 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/internal_query.h @@ -47,7 +47,7 @@ namespace ignite * @param sql SQL query. * @param cmd Parsed command. */ - InternalQuery(diagnostic::Diagnosable& diag, const std::string& sql, std::auto_ptr cmd) : + InternalQuery(diagnostic::DiagnosableAdapter& diag, const std::string& sql, std::auto_ptr cmd) : Query(diag, QueryType::INTERNAL), sql(sql), cmd(cmd) @@ -70,7 +70,7 @@ namespace ignite */ virtual SqlResult::Type Execute() { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Internal error."); + diag.AddStatusRecord("Internal error."); return SqlResult::AI_ERROR; } @@ -118,11 +118,9 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const + virtual const meta::ColumnMetaVector* GetMeta() { - static const meta::ColumnMetaVector empty; - - return empty; + return 0; } /** diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h index 3650fcf36adda..51c233be5ab86 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h @@ -43,7 +43,7 @@ namespace ignite * @param schema Schema name. * @param table Table name. */ - PrimaryKeysQuery(diagnostic::Diagnosable& diag, + PrimaryKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog, const std::string& schema, const std::string& table); @@ -64,7 +64,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h index 22503a1410f08..4e64a21b0c509 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h @@ -22,7 +22,7 @@ #include -#include "ignite/odbc/diagnostic/diagnosable.h" +#include "ignite/odbc/diagnostic/diagnosable_adapter.h" #include "ignite/odbc/meta/column_meta.h" #include "ignite/odbc/common_types.h" #include "ignite/odbc/row.h" @@ -120,7 +120,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const = 0; + virtual const meta::ColumnMetaVector* GetMeta() = 0; /** * Check if data is available. @@ -157,7 +157,7 @@ namespace ignite /** * Constructor. */ - Query(diagnostic::Diagnosable& diag, QueryType::Type type) : + Query(diagnostic::DiagnosableAdapter& diag, QueryType::Type type) : diag(diag), type(type) { @@ -165,7 +165,7 @@ namespace ignite } /** Diagnostics collector. */ - diagnostic::Diagnosable& diag; + diagnostic::DiagnosableAdapter& diag; /** Query type. */ QueryType::Type type; diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h index 919febfd86c7a..d6b244bcb9290 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h @@ -44,7 +44,7 @@ namespace ignite * @param nullable Determines whether to return special columns * that can have a NULL value. */ - SpecialColumnsQuery(diagnostic::Diagnosable& diag, int16_t type, + SpecialColumnsQuery(diagnostic::DiagnosableAdapter& diag, int16_t type, const std::string& catalog, const std::string& schema, const std::string& table, int16_t scope, int16_t nullable); @@ -89,7 +89,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Check if data is available. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h index cf87e80966cb9..285d3fb9fc2b8 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/streaming_query.h @@ -44,7 +44,7 @@ namespace ignite * @param params SQL params. */ StreamingQuery( - diagnostic::Diagnosable& diag, + diagnostic::DiagnosableAdapter& diag, Connection& connection, const app::ParameterSet& params); @@ -65,7 +65,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h index 776b747b65acc..fa9b720aeb7eb 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h @@ -46,7 +46,7 @@ namespace ignite * @param table Table search pattern. * @param tableType Table type search pattern. */ - TableMetadataQuery(diagnostic::Diagnosable& diag, Connection& connection, + TableMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog, const std::string& schema, const std::string& table, const std::string& tableType); @@ -67,7 +67,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h index 3f2e76c7b4aa2..b9638208ef750 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h @@ -38,7 +38,7 @@ namespace ignite * @param diag Diagnostics collector. * @param sqlType SQL type. */ - TypeInfoQuery(diagnostic::Diagnosable& diag, int16_t sqlType); + TypeInfoQuery(diagnostic::DiagnosableAdapter& diag, int16_t sqlType); /** * Destructor. @@ -57,7 +57,7 @@ namespace ignite * * @return Column metadata. */ - virtual const meta::ColumnMetaVector& GetMeta() const; + virtual const meta::ColumnMetaVector* GetMeta(); /** * Fetch next result row to application buffers. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h index 56eea6c89fc94..19d29a93c8b3a 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h @@ -255,7 +255,7 @@ namespace ignite * * @return Column metadata. */ - const meta::ColumnMetaVector* GetMeta() const; + const meta::ColumnMetaVector* GetMeta(); /** * Check if data is available. diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp index a8a67f0b0037c..38f1bf3e4cc34 100644 --- a/modules/platforms/cpp/odbc/src/connection.cpp +++ b/modules/platforms/cpp/odbc/src/connection.cpp @@ -149,8 +149,7 @@ namespace ignite { LOG_MSG("Can not load OpenSSL library: " << err.GetText()); - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, - "Can not load OpenSSL library (did you set OPENSSL_HOME environment variable?)."); + AddStatusRecord("Can not load OpenSSL library (did you set OPENSSL_HOME environment variable?)"); return SqlResult::AI_ERROR; } @@ -176,7 +175,7 @@ namespace ignite if (!config.IsHostSet() && config.IsAddressesSet() && config.GetAddresses().empty()) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "No valid address to connect."); + AddStatusRecord("No valid address to connect."); return SqlResult::AI_ERROR; } @@ -430,7 +429,7 @@ namespace ignite } catch (const IgniteError& err) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -471,7 +470,7 @@ namespace ignite } catch (const IgniteError& err) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp b/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp index 6de071615c9c6..eea7649086d07 100644 --- a/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp +++ b/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp @@ -48,6 +48,11 @@ namespace ignite AddStatusRecord(sqlState, message, 0, 0); } + void DiagnosableAdapter::AddStatusRecord(const std::string& message) + { + AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, message); + } + void DiagnosableAdapter::AddStatusRecord(const OdbcError& err) { AddStatusRecord(err.GetStatus(), err.GetErrorMessage(), 0, 0); diff --git a/modules/platforms/cpp/odbc/src/message.cpp b/modules/platforms/cpp/odbc/src/message.cpp index 00e0ae2d24cfe..946529de3d664 100644 --- a/modules/platforms/cpp/odbc/src/message.cpp +++ b/modules/platforms/cpp/odbc/src/message.cpp @@ -224,6 +224,26 @@ namespace ignite writer.WriteObject(column); } + QueryGetResultsetMetaRequest::QueryGetResultsetMetaRequest(const std::string &schema, const std::string &sqlQuery) : + schema(schema), + sqlQuery(sqlQuery) + { + // No-op. + } + + QueryGetResultsetMetaRequest::~QueryGetResultsetMetaRequest() + { + // No-op. + } + + void QueryGetResultsetMetaRequest::Write(impl::binary::BinaryWriterImpl &writer, const ProtocolVersion &) const + { + writer.WriteInt8(RequestType::META_RESULTSET); + + writer.WriteObject(schema); + writer.WriteObject(sqlQuery); + } + QueryGetTablesMetaRequest::QueryGetTablesMetaRequest(const std::string& catalog, const std::string& schema, const std::string& table, const std::string& tableTypes): catalog(catalog), @@ -478,6 +498,21 @@ namespace ignite meta::ReadColumnMetaVector(reader, meta, ver); } + QueryGetResultsetMetaResponse::QueryGetResultsetMetaResponse() + { + // No-op. + } + + QueryGetResultsetMetaResponse::~QueryGetResultsetMetaResponse() + { + // No-op. + } + + void QueryGetResultsetMetaResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl &reader, const ProtocolVersion& ver) + { + meta::ReadColumnMetaVector(reader, meta, ver); + } + QueryGetTablesMetaResponse::QueryGetTablesMetaResponse() { // No-op. diff --git a/modules/platforms/cpp/odbc/src/query/batch_query.cpp b/modules/platforms/cpp/odbc/src/query/batch_query.cpp index c687672c2d9e9..8b26e0d59e39b 100644 --- a/modules/platforms/cpp/odbc/src/query/batch_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/batch_query.cpp @@ -27,7 +27,7 @@ namespace ignite { namespace query { - BatchQuery::BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + BatchQuery::BatchQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout) : Query(diag, QueryType::BATCH), connection(connection), @@ -75,9 +75,9 @@ namespace ignite return res; } - const meta::ColumnMetaVector& BatchQuery::GetMeta() const + const meta::ColumnMetaVector* BatchQuery::GetMeta() { - return resultMeta; + return &resultMeta; } SqlResult::Type BatchQuery::FetchNextRow(app::ColumnBindingMap&) @@ -170,7 +170,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp b/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp index 649fa3f9e84a1..fda92fe34a590 100644 --- a/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp @@ -75,7 +75,7 @@ namespace ignite { namespace query { - ColumnMetadataQuery::ColumnMetadataQuery(diagnostic::Diagnosable& diag, + ColumnMetadataQuery::ColumnMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& schema, const std::string& table, const std::string& column) : Query(diag, QueryType::COLUMN_METADATA), @@ -135,9 +135,9 @@ namespace ignite return result; } - const meta::ColumnMetaVector& ColumnMetadataQuery::GetMeta() const + const meta::ColumnMetaVector* ColumnMetadataQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type ColumnMetadataQuery::FetchNextRow(app::ColumnBindingMap & columnBindings) @@ -311,7 +311,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/query/data_query.cpp b/modules/platforms/cpp/odbc/src/query/data_query.cpp index 4ba354d0cecae..a93e5a379c180 100644 --- a/modules/platforms/cpp/odbc/src/query/data_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/data_query.cpp @@ -28,12 +28,13 @@ namespace ignite { namespace query { - DataQuery::DataQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + DataQuery::DataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& sql, const app::ParameterSet& params, int32_t& timeout) : Query(diag, QueryType::DATA), connection(connection), sql(sql), params(params), + resultMetaAvailable(false), resultMeta(), cursor(), rowsAffected(), @@ -57,9 +58,17 @@ namespace ignite return MakeRequestExecute(); } - const meta::ColumnMetaVector & DataQuery::GetMeta() const + const meta::ColumnMetaVector* DataQuery::GetMeta() { - return resultMeta; + if (!resultMetaAvailable) + { + MakeRequestResultsetMeta(); + + if (!resultMetaAvailable) + return 0; + } + + return &resultMeta; } SqlResult::Type DataQuery::FetchNextRow(app::ColumnBindingMap& columnBindings) @@ -96,7 +105,7 @@ namespace ignite if (!row) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Unknown error."); + diag.AddStatusRecord("Unknown error."); return SqlResult::AI_ERROR; } @@ -164,8 +173,6 @@ namespace ignite { cursor.reset(); - resultMeta.clear(); - rowsAffectedIdx = 0; rowsAffected.clear(); @@ -242,7 +249,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -256,21 +263,12 @@ namespace ignite return SqlResult::AI_ERROR; } - resultMeta.assign(rsp.GetMeta().begin(), rsp.GetMeta().end()); - rowsAffected = rsp.GetAffectedRows(); + SetResultsetMeta(rsp.GetMeta()); LOG_MSG("Query id: " << rsp.GetQueryId()); LOG_MSG("Affected Rows list size: " << rowsAffected.size()); - for (size_t i = 0; i < resultMeta.size(); ++i) - { - LOG_MSG("\n[" << i << "] SchemaName: " << resultMeta[i].GetSchemaName() - << "\n[" << i << "] TypeName: " << resultMeta[i].GetTableName() - << "\n[" << i << "] ColumnName: " << resultMeta[i].GetColumnName() - << "\n[" << i << "] ColumnType: " << static_cast(resultMeta[i].GetDataType())); - } - cursor.reset(new Cursor(rsp.GetQueryId())); rowsAffectedIdx = 0; @@ -295,7 +293,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -333,7 +331,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -374,7 +372,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } @@ -397,6 +395,53 @@ namespace ignite return SqlResult::AI_SUCCESS; } + SqlResult::Type DataQuery::MakeRequestResultsetMeta() + { + const std::string& schema = connection.GetSchema(); + + QueryGetResultsetMetaRequest req(schema, sql); + QueryGetResultsetMetaResponse rsp; + + try + { + // Setting connection timeout to 1 second more than query timeout itself. + int32_t connectionTimeout = timeout ? timeout + 1 : 0; + bool success = connection.SyncMessage(req, rsp, connectionTimeout); + + if (!success) + { + diag.AddStatusRecord(SqlState::SHYT00_TIMEOUT_EXPIRED, "Query timeout expired"); + + return SqlResult::AI_ERROR; + } + } + catch (const OdbcError& err) + { + diag.AddStatusRecord(err); + + return SqlResult::AI_ERROR; + } + catch (const IgniteError& err) + { + diag.AddStatusRecord(err.GetText()); + + return SqlResult::AI_ERROR; + } + + if (rsp.GetStatus() != ResponseStatus::SUCCESS) + { + LOG_MSG("Error: " << rsp.GetError()); + + diag.AddStatusRecord(ResponseStatusToSqlState(rsp.GetStatus()), rsp.GetError()); + + return SqlResult::AI_ERROR; + } + + SetResultsetMeta(rsp.GetMeta()); + + return SqlResult::AI_SUCCESS; + } + SqlResult::Type DataQuery::ProcessConversionResult(app::ConversionResult::Type convRes, int32_t rowIdx, int32_t columnIdx) { @@ -456,6 +501,21 @@ namespace ignite return SqlResult::AI_ERROR; } + + void DataQuery::SetResultsetMeta(const meta::ColumnMetaVector& value) + { + resultMeta.assign(value.begin(), value.end()); + resultMetaAvailable = true; + + for (size_t i = 0; i < resultMeta.size(); ++i) + { + meta::ColumnMeta& meta = resultMeta.at(i); + LOG_MSG("\n[" << i << "] SchemaName: " << meta.GetSchemaName() + << "\n[" << i << "] TypeName: " << meta.GetTableName() + << "\n[" << i << "] ColumnName: " << meta.GetColumnName() + << "\n[" << i << "] ColumnType: " << static_cast(meta.GetDataType())); + } + } } } } diff --git a/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp b/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp index 47ed89b2bec21..2520b2006cde0 100644 --- a/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp @@ -28,7 +28,7 @@ namespace ignite { namespace query { - ForeignKeysQuery::ForeignKeysQuery(diagnostic::Diagnosable& diag, Connection& connection, + ForeignKeysQuery::ForeignKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& primaryCatalog, const std::string& primarySchema, const std::string& primaryTable, const std::string& foreignCatalog, const std::string& foreignSchema, const std::string& foreignTable) : @@ -81,9 +81,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector & ForeignKeysQuery::GetMeta() const + const meta::ColumnMetaVector* ForeignKeysQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type ForeignKeysQuery::FetchNextRow(app::ColumnBindingMap&) diff --git a/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp b/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp index bb6f90872969c..d179f3831e89a 100644 --- a/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp @@ -55,7 +55,7 @@ namespace ignite { namespace query { - PrimaryKeysQuery::PrimaryKeysQuery(diagnostic::Diagnosable& diag, + PrimaryKeysQuery::PrimaryKeysQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog, const std::string& schema, const std::string& table) : Query(diag, QueryType::PRIMARY_KEYS), @@ -103,9 +103,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector & PrimaryKeysQuery::GetMeta() const + const meta::ColumnMetaVector* PrimaryKeysQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type PrimaryKeysQuery::FetchNextRow(app::ColumnBindingMap & columnBindings) diff --git a/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp b/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp index 3f176f275511d..88eca7169a60d 100644 --- a/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp @@ -26,7 +26,7 @@ namespace ignite { namespace query { - SpecialColumnsQuery::SpecialColumnsQuery(diagnostic::Diagnosable& diag, + SpecialColumnsQuery::SpecialColumnsQuery(diagnostic::DiagnosableAdapter& diag, int16_t type, const std::string& catalog, const std::string& schema, const std::string& table, int16_t scope, int16_t nullable) : Query(diag, QueryType::SPECIAL_COLUMNS), @@ -71,9 +71,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector& SpecialColumnsQuery::GetMeta() const + const meta::ColumnMetaVector* SpecialColumnsQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type SpecialColumnsQuery::FetchNextRow(app::ColumnBindingMap&) diff --git a/modules/platforms/cpp/odbc/src/query/streaming_query.cpp b/modules/platforms/cpp/odbc/src/query/streaming_query.cpp index dd9302f772e9a..4bc19b166421d 100644 --- a/modules/platforms/cpp/odbc/src/query/streaming_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/streaming_query.cpp @@ -29,7 +29,7 @@ namespace ignite namespace query { StreamingQuery::StreamingQuery( - diagnostic::Diagnosable& diag, + diagnostic::DiagnosableAdapter& diag, Connection& connection, const app::ParameterSet& params) : Query(diag, QueryType::STREAMING), @@ -49,11 +49,9 @@ namespace ignite return connection.GetStreamingContext().Execute(sql, params); } - const meta::ColumnMetaVector& StreamingQuery::GetMeta() const + const meta::ColumnMetaVector* StreamingQuery::GetMeta() { - static meta::ColumnMetaVector empty; - - return empty; + return 0; } SqlResult::Type StreamingQuery::FetchNextRow(app::ColumnBindingMap&) diff --git a/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp b/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp index 53fe49d2ccf4b..d76576d418efb 100644 --- a/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp @@ -54,7 +54,7 @@ namespace ignite { namespace query { - TableMetadataQuery::TableMetadataQuery(diagnostic::Diagnosable& diag, + TableMetadataQuery::TableMetadataQuery(diagnostic::DiagnosableAdapter& diag, Connection& connection, const std::string& catalog,const std::string& schema, const std::string& table, const std::string& tableType) : Query(diag, QueryType::TABLE_METADATA), @@ -108,9 +108,9 @@ namespace ignite return result; } - const meta::ColumnMetaVector& TableMetadataQuery::GetMeta() const + const meta::ColumnMetaVector* TableMetadataQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type TableMetadataQuery::FetchNextRow(app::ColumnBindingMap& columnBindings) @@ -237,7 +237,7 @@ namespace ignite } catch (const IgniteError& err) { - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + diag.AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/query/type_info_query.cpp b/modules/platforms/cpp/odbc/src/query/type_info_query.cpp index c47161bebc6ae..3cc5787a3e548 100644 --- a/modules/platforms/cpp/odbc/src/query/type_info_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/type_info_query.cpp @@ -118,7 +118,7 @@ namespace ignite { namespace query { - TypeInfoQuery::TypeInfoQuery(diagnostic::Diagnosable& diag, int16_t sqlType) : + TypeInfoQuery::TypeInfoQuery(diagnostic::DiagnosableAdapter& diag, int16_t sqlType) : Query(diag, QueryType::TYPE_INFO), columnsMeta(), executed(false), @@ -191,9 +191,9 @@ namespace ignite return SqlResult::AI_SUCCESS; } - const meta::ColumnMetaVector & TypeInfoQuery::GetMeta() const + const meta::ColumnMetaVector* TypeInfoQuery::GetMeta() { - return columnsMeta; + return &columnsMeta; } SqlResult::Type TypeInfoQuery::FetchNextRow(app::ColumnBindingMap & columnBindings) diff --git a/modules/platforms/cpp/odbc/src/statement.cpp b/modules/platforms/cpp/odbc/src/statement.cpp index 6e1a9c5feb3bd..d88d44907e389 100644 --- a/modules/platforms/cpp/odbc/src/statement.cpp +++ b/modules/platforms/cpp/odbc/src/statement.cpp @@ -137,11 +137,7 @@ namespace ignite const meta::ColumnMetaVector* meta = GetMeta(); if (!meta) - { - AddStatusRecord(SqlState::SHY010_SEQUENCE_ERROR, "Query is not executed."); - return SqlResult::AI_ERROR; - } res = static_cast(meta->size()); @@ -358,7 +354,7 @@ namespace ignite { if (!buf) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Data buffer is NULL."); + AddStatusRecord("Data buffer is NULL."); return SqlResult::AI_ERROR; } @@ -889,7 +885,7 @@ namespace ignite { case SQL_DROP: { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Deprecated, call SQLFreeHandle instead"); + AddStatusRecord("Deprecated, call SQLFreeHandle instead"); return SqlResult::AI_ERROR; } @@ -1005,12 +1001,16 @@ namespace ignite return res; } - const meta::ColumnMetaVector* Statement::GetMeta() const + const meta::ColumnMetaVector* Statement::GetMeta() { if (!currentQuery.get()) + { + AddStatusRecord(SqlState::SHY010_SEQUENCE_ERROR, "Query is not executed."); + return 0; + } - return ¤tQuery->GetMeta(); + return currentQuery->GetMeta(); } bool Statement::DataAvailable() const @@ -1042,20 +1042,15 @@ namespace ignite strbuf, buflen, reslen, numbuf)); } - SqlResult::Type Statement::InternalGetColumnAttribute(uint16_t colIdx, - uint16_t attrId, char* strbuf, int16_t buflen, int16_t* reslen, - SqlLen* numbuf) + SqlResult::Type Statement::InternalGetColumnAttribute(uint16_t colIdx, uint16_t attrId, char* strbuf, + int16_t buflen, int16_t* reslen, SqlLen* numbuf) { const meta::ColumnMetaVector *meta = GetMeta(); if (!meta) - { - AddStatusRecord(SqlState::SHY010_SEQUENCE_ERROR, "Query is not executed."); - return SqlResult::AI_ERROR; - } - if (colIdx > meta->size() + 1 || colIdx < 1) + if (colIdx > meta->size() || colIdx < 1) { AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Column index is out of range.", 0, colIdx); @@ -1309,7 +1304,7 @@ namespace ignite } catch (const IgniteError& err) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } diff --git a/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp b/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp index b9ee94ad2cd0a..54d5f88124799 100644 --- a/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp +++ b/modules/platforms/cpp/odbc/src/streaming/streaming_context.cpp @@ -123,7 +123,7 @@ namespace ignite } catch (const IgniteError& err) { - connection->AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + connection->AddStatusRecord(err.GetText()); return SqlResult::AI_ERROR; } From b293cd11259488fdd14d7abbc25f57567398b8f2 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Wed, 11 Nov 2020 08:43:36 +0300 Subject: [PATCH 036/110] IGNITE-13357 .NET: Add IncludeExpired to ContinuousQuery and ContinuousQueryClient * Add `IncludeExpired` flag to thin and thick continuous query APIs: `ContinuousQuery`, `ContinuousQueryClient` * Thin client protocol not affected: the flag is already supported --- .../platform/cache/PlatformCache.java | 3 +- .../cache/query/PlatformContinuousQuery.java | 3 +- .../query/PlatformContinuousQueryImpl.java | 4 +- .../cpp/core/src/impl/cache/cache_impl.cpp | 1 + .../Continuous/ContinuousQueryAbstractTest.cs | 75 +++++++++++++++++ .../Query/Continuous/ContinuousQueryTest.cs | 5 +- .../Client/Cache/ContinuousQueryTest.cs | 71 ++++++++++++++++ .../Apache.Ignite.Core.csproj | 1 + .../Cache/Event/CacheEntryEventType.cs | 7 +- .../Cache/Query/Continuous/ContinuousQuery.cs | 11 +++ .../Query/Continuous/ContinuousQueryClient.cs | 11 +++ .../Impl/Cache/Event/CacheEntryExpireEvent.cs | 80 +++++++++++++++++++ .../Continuous/ContinuousQueryHandleImpl.cs | 1 + .../Query/Continuous/ContinuousQueryUtils.cs | 2 + .../Impl/Client/Cache/CacheClient.cs | 2 +- .../Apache.Ignite.DotNetCore.sln.DotSettings | 1 + 16 files changed, 270 insertions(+), 8 deletions(-) create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Event/CacheEntryExpireEvent.cs diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java index c67d8e01e561a..2ae500cc57e55 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java @@ -977,6 +977,7 @@ private IgniteFuture loadCacheAsync0(BinaryRawReaderEx reader, boolean loc case OP_QRY_CONTINUOUS: { long ptr = reader.readLong(); boolean loc = reader.readBoolean(); + boolean includeExpired = reader.readBoolean(); boolean hasFilter = reader.readBoolean(); Object filter = reader.readObjectDetached(); int bufSize = reader.readInt(); @@ -986,7 +987,7 @@ private IgniteFuture loadCacheAsync0(BinaryRawReaderEx reader, boolean loc PlatformContinuousQuery qry = platformCtx.createContinuousQuery(ptr, hasFilter, filter); - qry.start(cache, loc, bufSize, timeInterval, autoUnsubscribe, initQry); + qry.start(cache, loc, bufSize, timeInterval, autoUnsubscribe, initQry, includeExpired); return new PlatformContinuousQueryProxy(platformCtx, qry); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java index 2916da2e49cc2..4714760994d31 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQuery.java @@ -36,10 +36,11 @@ public interface PlatformContinuousQuery extends CacheEntryUpdatedListener, Plat * @param timeInterval Time interval. * @param autoUnsubscribe Auto-unsubscribe flag. * @param initialQry Initial query. + * @param includeExpired Whether to include expired events. * @throws org.apache.ignite.IgniteCheckedException If failed. */ public void start(IgniteCacheProxy cache, boolean loc, int bufSize, long timeInterval, boolean autoUnsubscribe, - Query initialQry) throws IgniteCheckedException; + Query initialQry, boolean includeExpired) throws IgniteCheckedException; /** * Close continuous query. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java index f9269a4f7957a..3adc296457e11 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/query/PlatformContinuousQueryImpl.java @@ -128,11 +128,12 @@ private static CacheEntryEventFilter getJavaFilter(Object filter, GridKernalCont * @param bufSize Buffer size. * @param timeInterval Time interval. * @param autoUnsubscribe Auto-unsubscribe flag. + * @param includeExpired Whether to include expired events. * @param initialQry Initial query. */ @SuppressWarnings("unchecked") @Override public void start(IgniteCacheProxy cache, boolean loc, int bufSize, long timeInterval, - boolean autoUnsubscribe, Query initialQry) throws IgniteCheckedException { + boolean autoUnsubscribe, Query initialQry, boolean includeExpired) throws IgniteCheckedException { lock.writeLock().lock(); try { @@ -148,6 +149,7 @@ private static CacheEntryEventFilter getJavaFilter(Object filter, GridKernalCont qry.setTimeInterval(timeInterval); qry.setAutoUnsubscribe(autoUnsubscribe); qry.setInitialQuery(initialQry); + qry.setIncludeExpired(includeExpired); cursor = cache.query(qry.setLocal(loc)); diff --git a/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp b/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp index 1994a0799304f..e0bddd05326fd 100644 --- a/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp +++ b/modules/platforms/cpp/core/src/impl/cache/cache_impl.cpp @@ -450,6 +450,7 @@ namespace ignite rawWriter.WriteInt64(handle); rawWriter.WriteBool(qry0.GetLocal()); + rawWriter.WriteBool(false); // IncludeExpired event::CacheEntryEventFilterHolderBase& filterOp = qry0.GetFilterHolder(); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryAbstractTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryAbstractTest.cs index e3b1fd15fb8d8..dd347cdcd24ab 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryAbstractTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryAbstractTest.cs @@ -29,6 +29,7 @@ namespace Apache.Ignite.Core.Tests.Cache.Query.Continuous using Apache.Ignite.Core.Binary; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Event; + using Apache.Ignite.Core.Cache.Expiry; using Apache.Ignite.Core.Cache.Query; using Apache.Ignite.Core.Cache.Query.Continuous; using Apache.Ignite.Core.Common; @@ -266,6 +267,78 @@ public void TestCallbackInjection() } } + /// + /// Tests that is false by default + /// and expiration events are not delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with default settings + /// - Check that Created events are delivered, but Expired events are not + /// + [Test] + public void TestIncludeExpiredIsFalseByDefaultAndExpiredEventsAreSkipped() + { + var cache = cache1.WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + var cb = new Listener(); + + var qry = new ContinuousQuery(cb); + Assert.IsFalse(qry.IncludeExpired); + + using (cache.QueryContinuous(qry)) + { + cache[1] = Entry(1); + + TestUtils.WaitForTrueCondition(() => !cache.ContainsKey(1)); + + cache[2] = Entry(2); + } + + var events = CB_EVTS.SelectMany(e => e.entries).ToList(); + Assert.AreEqual(2, events.Count); + + Assert.AreEqual(CacheEntryEventType.Created, events[0].EventType); + Assert.AreEqual(CacheEntryEventType.Created, events[1].EventType); + } + + /// + /// Tests that enabling causes + /// events to be delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with set to true + /// - Check that Expired events are delivered + /// + [Test] + public void TestExpiredEventsAreDeliveredWhenIncludeExpiredIsTrue() + { + var cache = cache1.WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + var cb = new Listener(); + + var qry = new ContinuousQuery(cb) + { + IncludeExpired = true + }; + + using (cache.QueryContinuous(qry)) + { + cache[1] = Entry(2); + + TestUtils.WaitForTrueCondition(() => CB_EVTS.Count == 2, 5000); + } + + var events = CB_EVTS.SelectMany(e => e.entries).ToList(); + + Assert.AreEqual(2, events.Count); + Assert.AreEqual(CacheEntryEventType.Created, events[0].EventType); + Assert.AreEqual(CacheEntryEventType.Expired, events[1].EventType); + + Assert.IsTrue(events[1].HasValue); + Assert.IsTrue(events[1].HasOldValue); + Assert.AreEqual(2, ((BinarizableEntry)events[1].Value).val); + Assert.AreEqual(2, ((BinarizableEntry)events[1].Value).val); + Assert.AreEqual(1, events[1].Key); + } + /// /// Test binarizable filter logic. /// @@ -1051,6 +1124,8 @@ private static ICacheEntryEvent CreateEvent(ICacheEntryEve return new CacheEntryCreateEvent(e.Key, e.Value); case CacheEntryEventType.Updated: return new CacheEntryUpdateEvent(e.Key, e.OldValue, e.Value); + case CacheEntryEventType.Expired: + return new CacheEntryExpireEvent(e.Key, e.OldValue); default: return new CacheEntryRemoveEvent(e.Key, e.OldValue); } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs index 5148dccee443a..f188baef56f92 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Continuous/ContinuousQueryTest.cs @@ -21,7 +21,6 @@ namespace Apache.Ignite.Core.Tests.Cache.Query.Continuous using System; using System.Collections.Concurrent; using System.Collections.Generic; - using System.Threading; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Event; using Apache.Ignite.Core.Cache.Query.Continuous; @@ -69,7 +68,7 @@ private static void PutEntry(ICache cache) cache.Put(entry.Id, entry); // Wait for events. - Thread.Sleep(100); + TestUtils.WaitForTrueCondition(() => Listener.Events.Count == 2); ICacheEntryEvent e; @@ -100,7 +99,7 @@ private class Data private class Listener : ICacheEntryEventListener { - public static readonly ConcurrentStack> Events + public static readonly ConcurrentStack> Events = new ConcurrentStack>(); public void OnEvent(IEnumerable> evts) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs index fad419111e0ac..960ede5164b9c 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ContinuousQueryTest.cs @@ -26,6 +26,7 @@ namespace Apache.Ignite.Core.Tests.Client.Cache using Apache.Ignite.Core.Binary; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Event; + using Apache.Ignite.Core.Cache.Expiry; using Apache.Ignite.Core.Cache.Query.Continuous; using Apache.Ignite.Core.Client; using Apache.Ignite.Core.Client.Cache; @@ -638,6 +639,76 @@ public void TestCustomTimeIntervalCausesIncompleteBatches() }); } + /// + /// Tests that is false by default + /// and expiration events are not delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with default settings + /// - Check that Created events are delivered, but Expired events are not + /// + [Test] + public void TestIncludeExpiredIsFalseByDefaultAndExpiredEventsAreSkipped() + { + var cache = Client.GetOrCreateCache(TestUtils.TestName) + .WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + + var events = new ConcurrentQueue>(); + var qry = new ContinuousQueryClient(new DelegateListener(events.Enqueue)); + Assert.IsFalse(qry.IncludeExpired); + + using (cache.QueryContinuous(qry)) + { + cache[1] = 2; + + TestUtils.WaitForTrueCondition(() => !cache.ContainsKey(1), 5000); + + cache[2] = 3; + } + + Assert.AreEqual(2, events.Count); + Assert.AreEqual(CacheEntryEventType.Created, events.First().EventType); + Assert.AreEqual(CacheEntryEventType.Created, events.Last().EventType); + } + + /// + /// Tests that enabling causes + /// events to be delivered. + /// + /// - Create a cache with expiry policy + /// - Start a continuous query with set to true + /// - Check that Expired events are delivered + /// + [Test] + public void TestExpiredEventsAreDeliveredWhenIncludeExpiredIsTrue() + { + var cache = Client.GetOrCreateCache(TestUtils.TestName) + .WithExpiryPolicy(new ExpiryPolicy(TimeSpan.FromMilliseconds(100), null, null)); + + var events = new ConcurrentQueue>(); + var qry = new ContinuousQueryClient(new DelegateListener(events.Enqueue)) + { + IncludeExpired = true + }; + + using (cache.QueryContinuous(qry)) + { + cache[1] = 2; + + TestUtils.WaitForTrueCondition(() => events.Count == 2, 5000); + } + + Assert.AreEqual(2, events.Count); + Assert.AreEqual(CacheEntryEventType.Created, events.First().EventType); + Assert.AreEqual(CacheEntryEventType.Expired, events.Last().EventType); + + Assert.IsTrue(events.Last().HasValue); + Assert.IsTrue(events.Last().HasOldValue); + Assert.AreEqual(2, events.Last().Value); + Assert.AreEqual(2, events.Last().OldValue); + Assert.AreEqual(1, events.Last().Key); + } + /// /// Tests batching behavior. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj index 50e3db755ad6d..0dec27719260b 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj @@ -101,6 +101,7 @@ + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs index 8339257710b9f..f712070b0a1dd 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Event/CacheEntryEventType.cs @@ -36,6 +36,11 @@ public enum CacheEntryEventType /// /// An event type indicating that the cache entry was removed. /// - Removed + Removed, + + /// + /// An event type indicating that the cache entry was removed by expiration policy. + /// + Expired } } \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs index 4c471dc860d6f..b24dae6d06336 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/Continuous/ContinuousQuery.cs @@ -166,6 +166,17 @@ public ContinuousQuery(ICacheEntryEventListener lsnr, ICacheEntryEventFi /// Defaults to false. /// public bool Local { get; set; } + + /// + /// Gets or sets a value indicating whether to notify about events. + /// + /// If true, then the remote listener will get notifications about expired cache entries. + /// Otherwise, only , , and + /// events will be passed to the listener. + /// + /// Defaults to false. + /// + public bool IncludeExpired { get; set; } /// /// Validate continuous query state. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs index 6d5d6f349c079..e678930b9dbd9 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/Query/Continuous/ContinuousQueryClient.cs @@ -97,5 +97,16 @@ public ContinuousQueryClient(ICacheEntryEventListener listener) : this() /// sent only when buffer is full. /// public TimeSpan TimeInterval { get; set; } + + /// + /// Gets or sets a value indicating whether to notify about events. + /// + /// If true, then the remote listener will get notifications about expired cache entries. + /// Otherwise, only , , and + /// events will be passed to the listener. + /// + /// Defaults to false. + /// + public bool IncludeExpired { get; set; } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Event/CacheEntryExpireEvent.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Event/CacheEntryExpireEvent.cs new file mode 100644 index 0000000000000..2b4acc6da4e1c --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Event/CacheEntryExpireEvent.cs @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Impl.Cache.Event +{ + using Apache.Ignite.Core.Cache.Event; + + /// + /// Cache entry expire event. + /// + internal class CacheEntryExpireEvent : ICacheEntryEvent + { + /** Key.*/ + private readonly TK _key; + + /** Old value.*/ + private readonly TV _oldVal; + + /// + /// Constructor. + /// + /// Key. + /// Old value. + public CacheEntryExpireEvent(TK key, TV oldVal) + { + _key = key; + _oldVal = oldVal; + } + + /** */ + public TK Key + { + get { return _key; } + } + + /** */ + public TV Value + { + get { return _oldVal; } + } + + /** */ + public TV OldValue + { + get { return _oldVal; } + } + + /** */ + public bool HasValue + { + get { return true; } + } + + /** */ + public bool HasOldValue + { + get { return true; } + } + + /** */ + public CacheEntryEventType EventType + { + get { return CacheEntryEventType.Expired; } + } + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs index 6c9012bd1d1b7..7918d5b6835c7 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryHandleImpl.cs @@ -108,6 +108,7 @@ public ContinuousQueryHandleImpl(ContinuousQuery qry, Marshaller marsh, { writer.WriteLong(_hnd); writer.WriteBoolean(qry.Local); + writer.WriteBoolean(qry.IncludeExpired); writer.WriteBoolean(_filter != null); var javaFilter = _filter as PlatformJavaObjectFactoryProxy; diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs index fc93c48473e7d..c34a25ee4233e 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryUtils.cs @@ -94,6 +94,8 @@ private static ICacheEntryEvent ReadEvent0(BinaryReader reader) return new CacheEntryUpdateEvent(key, oldVal, val); case 2: return new CacheEntryRemoveEvent(key, oldVal); + case 3: + return new CacheEntryExpireEvent(key, oldVal); default: throw new NotSupportedException(eventType.ToString()); } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs index 2f27bcfc3a8e0..4486f8209031a 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs @@ -1085,7 +1085,7 @@ private void WriteContinuousQuery(ClientRequestContext ctx, ContinuousQueryClien var w = ctx.Writer; w.WriteInt(continuousQuery.BufferSize); w.WriteLong((long) continuousQuery.TimeInterval.TotalMilliseconds); - w.WriteBoolean(false); // Include expired. + w.WriteBoolean(continuousQuery.IncludeExpired); if (continuousQuery.Filter == null) { diff --git a/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings b/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings index 59bf2e5dd4a9b..c39aa648d7763 100644 --- a/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings +++ b/modules/platforms/dotnet/Apache.Ignite.DotNetCore.sln.DotSettings @@ -8,6 +8,7 @@ DO_NOT_SHOW True False + True True True True From 1a3fd112b02133892c7c95d4be607079ffa83211 Mon Sep 17 00:00:00 2001 From: Ilya Kasnacheev Date: Wed, 11 Nov 2020 14:25:59 +0300 Subject: [PATCH 037/110] IGNITE-13665 When system worker is blocked, output its stack trace - Fixes #8442. --- .../apache/ignite/internal/IgnitionEx.java | 14 +++- .../ignite/internal/util/IgniteUtils.java | 16 ---- .../internal/worker/WorkersRegistry.java | 2 - .../failure/SystemWorkersBlockingTest.java | 82 +++++++++++-------- 4 files changed, 59 insertions(+), 55 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index a02c5ea525400..dfdae46fdc0eb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -1849,11 +1849,16 @@ private void start0(GridStartContext startCtx, IgniteConfiguration cfg, TimeBag WorkersRegistry workerRegistry = new WorkersRegistry( new IgniteBiInClosure() { - @Override public void apply(GridWorker deadWorker, FailureType failureType) { + @Override public void apply(GridWorker worker, FailureType failureType) { + IgniteException ex = new IgniteException(S.toString(GridWorker.class, worker)); + + Thread runner = worker.runner(); + + if (runner != null && runner != Thread.currentThread()) + ex.setStackTrace(runner.getStackTrace()); + if (grid != null) - grid.context().failure().process(new FailureContext( - failureType, - new IgniteException(S.toString(GridWorker.class, deadWorker)))); + grid.context().failure().process(new FailureContext(failureType, ex)); } }, IgniteSystemProperties.getLong(IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT, @@ -1899,6 +1904,7 @@ private void start0(GridStartContext startCtx, IgniteConfiguration cfg, TimeBag // Note, that we do not pre-start threads here as class loading pool may // not be needed. validateThreadPoolSize(cfg.getPeerClassLoadingThreadPoolSize(), "peer class loading"); + p2pExecSvc = new IgniteThreadPoolExecutor( "p2p", cfg.getIgniteInstanceName(), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java index 4f1af3833be30..b3b644d6cafe3 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java @@ -1505,22 +1505,6 @@ private static void logMessage(@Nullable IgniteLogger log, String msg, boolean i warn(log, msg); } - /** - * Dumps stack trace of the thread to the given log at warning level. - * - * @param t Thread to be dumped. - * @param log Logger. - */ - public static void dumpThread(Thread t, @Nullable IgniteLogger log) { - ThreadMXBean mxBean = ManagementFactory.getThreadMXBean(); - - GridStringBuilder sb = new GridStringBuilder(); - - printThreadInfo(mxBean.getThreadInfo(t.getId()), sb, Collections.emptySet()); - - warn(log, sb.toString()); - } - /** * Get deadlocks from the thread bean. * @param mxBean the bean diff --git a/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java b/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java index 5829b3cd97dfa..7af557dc45433 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/worker/WorkersRegistry.java @@ -229,8 +229,6 @@ public void setSystemWorkerBlockedTimeout(long val) { "[workerName=" + worker.name() + ", threadName=" + runner.getName() + ", blockedFor=" + heartbeatDelay / 1000 + "s]"); - U.dumpThread(worker.runner(), log); - workerFailedHnd.apply(worker, SYSTEM_WORKER_BLOCKED); } diff --git a/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java b/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java index 8455f878ef6a5..ccfc50750d6b7 100644 --- a/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/failure/SystemWorkersBlockingTest.java @@ -17,9 +17,12 @@ package org.apache.ignite.failure; +import java.util.Arrays; import java.util.HashSet; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.LockSupport; @@ -30,8 +33,8 @@ import org.apache.ignite.internal.util.worker.GridWorker; import org.apache.ignite.internal.worker.WorkersRegistry; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.GridAbstractTest; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; -import org.apache.ignite.thread.IgniteThread; import org.junit.Test; /** @@ -44,6 +47,12 @@ public class SystemWorkersBlockingTest extends GridCommonAbstractTest { /** Handler latch. */ private final CountDownLatch hndLatch = new CountDownLatch(1); + /** Blocking thread latch. */ + private final CountDownLatch blockLatch = new CountDownLatch(1); + + /** Worker executor. */ + private final ExecutorService workerExecutor = Executors.newSingleThreadExecutor(); + /** Reference to failure error. */ private final AtomicReference failureError = new AtomicReference<>(); @@ -81,6 +90,13 @@ public class SystemWorkersBlockingTest extends GridCommonAbstractTest { @Override protected void afterTest() throws Exception { super.afterTest(); + blockLatch.countDown(); + + if (workerExecutor.isTerminated()) { + workerExecutor.shutdownNow(); + workerExecutor.awaitTermination(2 * SYSTEM_WORKER_BLOCKED_TIMEOUT, TimeUnit.MILLISECONDS); + } + stopAllGrids(); } @@ -91,34 +107,23 @@ public class SystemWorkersBlockingTest extends GridCommonAbstractTest { public void testBlockingWorker() throws Exception { IgniteEx ignite = startGrid(0); - CountDownLatch blockLatch = new CountDownLatch(1); + GridWorker worker = new LatchingGridWorker(ignite); - GridWorker worker = new GridWorker(ignite.name(), "test-worker", log) { - @Override protected void body() throws InterruptedException { - blockLatch.await(); - } - }; + runWorker(worker); - IgniteThread runner = null; - try { - runner = runWorker(worker); + ignite.context().workersRegistry().register(worker); - ignite.context().workersRegistry().register(worker); + assertTrue(hndLatch.await(ignite.configuration().getFailureDetectionTimeout() * 2, + TimeUnit.MILLISECONDS)); - assertTrue(hndLatch.await(SYSTEM_WORKER_BLOCKED_TIMEOUT * 2, TimeUnit.MILLISECONDS)); + Throwable blockedExeption = failureError.get(); - Throwable err = failureError.get(); - - assertNotNull(err); - assertTrue(err.getMessage() != null && err.getMessage().contains("test-worker")); - } - finally { - if (runner != null) { - blockLatch.countDown(); + assertNotNull(blockedExeption); - runner.join(SYSTEM_WORKER_BLOCKED_TIMEOUT); - } - } + assertTrue(Arrays.stream(blockedExeption.getStackTrace()).anyMatch( + e -> CountDownLatch.class.getName().equals(e.getClassName()))); + assertTrue(Arrays.stream(blockedExeption.getStackTrace()).anyMatch( + e -> LatchingGridWorker.class.getName().equals(e.getClassName()))); } /** @@ -145,26 +150,37 @@ public void testSingleWorker_NotInInfiniteLoop() throws Exception { } }; - IgniteThread runner = runWorker(worker); + runWorker(worker); Thread.sleep(2 * SYSTEM_WORKER_BLOCKED_TIMEOUT); - runner.interrupt(); + workerExecutor.shutdownNow(); - assertTrue(finishLatch.await(SYSTEM_WORKER_BLOCKED_TIMEOUT, TimeUnit.MILLISECONDS)); + assertTrue(workerExecutor.awaitTermination(SYSTEM_WORKER_BLOCKED_TIMEOUT, TimeUnit.MILLISECONDS)); } /** - * @param worker Grid worker to run. - * @return Thread, running worker. + * Run worker and wait for its initialization. + * + * @param worker GridWorker to run. + * @throws IgniteInterruptedCheckedException If wait is interrupted. */ - private IgniteThread runWorker(GridWorker worker) throws IgniteInterruptedCheckedException { - IgniteThread runner = new IgniteThread(worker); - - runner.start(); + private void runWorker(GridWorker worker) throws IgniteInterruptedCheckedException { + workerExecutor.execute(worker); GridTestUtils.waitForCondition(() -> worker.runner() != null, 100); + } - return runner; + /** */ + private class LatchingGridWorker extends GridWorker { + /** */ + public LatchingGridWorker(IgniteEx ignite) { + super(ignite.name(), "test-worker", GridAbstractTest.log); + } + + /** */ + @Override protected void body() throws InterruptedException { + blockLatch.await(); + } } } From d8ffc2b2991e262b65dfb9ee32fd818852cd5aaf Mon Sep 17 00:00:00 2001 From: Anton Kalashnikov Date: Wed, 11 Nov 2020 15:12:20 +0300 Subject: [PATCH 038/110] IGNITE-13681 Lightweight checkpoint implementation, per data region checkpoint listeners. - Fixes #8433. Signed-off-by: Sergey Chugunov --- .../processors/cache/mvcc/txlog/TxLog.java | 9 +- .../GridCacheDatabaseSharedManager.java | 32 +- .../persistence/GridCacheOffheapManager.java | 2 +- .../IgniteCacheDatabaseSharedManager.java | 4 +- .../checkpoint/CheckpointManager.java | 7 +- .../checkpoint/CheckpointPagesWriter.java | 21 +- .../CheckpointPagesWriterFactory.java | 26 +- .../checkpoint/CheckpointWorkflow.java | 92 +++-- .../persistence/checkpoint/Checkpointer.java | 14 +- .../LightweightCheckpointManager.java | 337 ++++++++++++++++++ .../persistence/metastorage/MetaStorage.java | 4 +- .../CheckpointListenerForRegionTest.java | 191 ++++++++++ .../CheckpointStartLoggingTest.java | 2 +- .../checkpoint/LightweightCheckpointTest.java | 231 ++++++++++++ .../testsuites/IgnitePdsTestSuite2.java | 5 +- 15 files changed, 910 insertions(+), 67 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointListenerForRegionTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java index 8adb6b30e53a4..9b8c73bf26ebf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java @@ -33,6 +33,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; import org.apache.ignite.internal.processors.cache.CacheDiagnosticManager; import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; @@ -102,6 +103,8 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { PageLockListener txLogLockLsnr = diagnosticMgr.pageLockTracker().createPageLockTracker(txLogName); + DataRegion txLogDataRegion = mgr.dataRegion(TX_LOG_CACHE_NAME); + if (CU.isPersistenceEnabled(ctx.config())) { String txLogReuseListName = TX_LOG_CACHE_NAME + "##ReuseList"; PageLockListener txLogReuseListLockLsnr = diagnosticMgr.pageLockTracker().createPageLockTracker(txLogReuseListName); @@ -110,7 +113,7 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { try { IgniteWriteAheadLogManager wal = ctx.cache().context().wal(); - PageMemoryEx pageMemory = (PageMemoryEx)mgr.dataRegion(TX_LOG_CACHE_NAME).pageMemory(); + PageMemoryEx pageMemory = (PageMemoryEx)txLogDataRegion.pageMemory(); long metaId = pageMemory.metaPageId(TX_LOG_CACHE_ID); long metaPage = pageMemory.acquirePage(TX_LOG_CACHE_ID, metaId); @@ -195,14 +198,14 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { txLogLockLsnr ); - ((GridCacheDatabaseSharedManager)mgr).addCheckpointListener(this); + ((GridCacheDatabaseSharedManager)mgr).addCheckpointListener(this, txLogDataRegion); } finally { mgr.checkpointReadUnlock(); } } else { - PageMemory pageMemory = mgr.dataRegion(TX_LOG_CACHE_NAME).pageMemory(); + PageMemory pageMemory = txLogDataRegion.pageMemory(); ReuseList reuseList1 = mgr.reuseList(TX_LOG_CACHE_NAME); long treeRoot; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 37bd4642eb762..6a0ecb233e7f3 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -52,7 +52,6 @@ import java.util.function.ToLongFunction; import java.util.regex.Pattern; import java.util.stream.Collectors; - import org.apache.ignite.DataRegionMetricsProvider; import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCheckedException; @@ -136,6 +135,7 @@ import org.apache.ignite.internal.processors.compress.CompressionProcessor; import org.apache.ignite.internal.processors.port.GridPortRecord; import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.GridCountDownCallback; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.StripedExecutor; @@ -317,6 +317,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** Lock for releasing history for preloading. */ private ReentrantLock releaseHistForPreloadingLock = new ReentrantLock(); + /** Data regions which should be checkpointed. */ + protected final Set checkpointedDataRegions = new GridConcurrentHashSet<>(); + /** * @param ctx Kernal context. */ @@ -471,7 +474,7 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi persistenceCfg, storeMgr, this::isCheckpointInapplicableForWalRebalance, - this::dataRegions, + this::checkpointedDataRegions, this::cacheGroupContexts, this::getPageMemoryForCacheGroup, resolveThrottlingPolicy(), @@ -493,6 +496,11 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi } } + /** */ + public Collection checkpointedDataRegions() { + return checkpointedDataRegions; + } + /** */ private Collection cacheGroupContexts() { return cctx.cache().cacheGroups(); @@ -594,6 +602,16 @@ private void releaseFileLock() { fileLockHolder.close(); } + /** {@inheritDoc} */ + @Override public DataRegion addDataRegion(DataStorageConfiguration dataStorageCfg, DataRegionConfiguration dataRegionCfg, + boolean trackable) throws IgniteCheckedException { + DataRegion region = super.addDataRegion(dataStorageCfg, dataRegionCfg, trackable); + + checkpointedDataRegions.add(region); + + return region; + } + /** */ private void readMetastore() throws IgniteCheckedException { try { @@ -1557,11 +1575,19 @@ public File checkpointDirectory() { return checkpointManager.checkpointDirectory(); } + /** + * @param lsnr Listener. + * @param dataRegion Data region for which listener is corresponded to. + */ + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + checkpointManager.addCheckpointListener(lsnr, dataRegion); + } + /** * @param lsnr Listener. */ public void addCheckpointListener(CheckpointListener lsnr) { - checkpointManager.addCheckpointListener(lsnr); + checkpointManager.addCheckpointListener(lsnr, null); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index fa4fabe05d4c3..4bfda53b0389b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -225,7 +225,7 @@ public class GridCacheOffheapManager extends IgniteCacheOffheapManagerImpl imple persStoreMetrics = databaseSharedManager.persistentStoreMetricsImpl(); - databaseSharedManager.addCheckpointListener(this); + databaseSharedManager.addCheckpointListener(this, grp.dataRegion()); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 1d775669c07b8..42ad2a9374a19 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -413,7 +413,7 @@ protected List getDatabaseListeners(GridKernalContext * @param dataRegionCfg Data region config. * @throws IgniteCheckedException If failed to initialize swap path. */ - public void addDataRegion( + public DataRegion addDataRegion( DataStorageConfiguration dataStorageCfg, DataRegionConfiguration dataRegionCfg, boolean trackable @@ -441,6 +441,8 @@ public void addDataRegion( else if (dataRegionName.equals(DFLT_DATA_REG_DEFAULT_NAME)) U.warn(log, "Data Region with name 'default' isn't used as a default. " + "Please, check Data Region configuration."); + + return region; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java index 415e2cfbe4d3d..2beac7b1c3f4b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java @@ -170,7 +170,7 @@ public CheckpointManager( checkpointPagesWriterFactory = new CheckpointPagesWriterFactory( logger, snapshotMgr, - (fullPage, buf, tag) -> pageStoreManager.writeInternal(fullPage.groupId(), fullPage.pageId(), buf, tag, true), + (pageMemEx, fullPage, buf, tag) -> pageStoreManager.writeInternal(fullPage.groupId(), fullPage.pageId(), buf, tag, true), persStoreMetrics, throttlingPolicy, threadBuf, pageMemoryGroupResolver @@ -231,9 +231,10 @@ public void threadBuf(ThreadLocal threadBuf) { /** * @param lsnr Listener. + * @param dataRegion Data region for which listener is corresponded to. */ - public void addCheckpointListener(CheckpointListener lsnr) { - checkpointWorkflow.addCheckpointListener(lsnr); + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + checkpointWorkflow.addCheckpointListener(lsnr, dataRegion); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java index 77f9e2eb09ca3..79f774c087b73 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriter.java @@ -178,7 +178,7 @@ private GridConcurrentMultiPairQueue writePages( CheckpointMetricsTracker tracker = persStoreMetrics.metricsEnabled() ? this.tracker : null; - PageStoreWriter pageStoreWriter = createPageStoreWriter(pagesToRetry); + Map pageStoreWriters = new HashMap<>(); ByteBuffer tmpWriteBuf = threadBuf.get(); @@ -201,6 +201,8 @@ private GridConcurrentMultiPairQueue writePages( tmpWriteBuf.rewind(); + PageStoreWriter pageStoreWriter = pageStoreWriters.computeIfAbsent(pageMem, pageMemEx -> createPageStoreWriter(pageMemEx, pagesToRetry)); + pageMem.checkpointWritePage(fullId, tmpWriteBuf, pageStoreWriter, tracker); if (throttlingEnabled) { @@ -227,18 +229,20 @@ private GridConcurrentMultiPairQueue writePages( /** * Factory method for create {@link PageStoreWriter}. * + * @param pageMemEx * @param pagesToRetry List pages for retry. * @return Checkpoint page write context. */ - private PageStoreWriter createPageStoreWriter(Map> pagesToRetry) { + private PageStoreWriter createPageStoreWriter( + PageMemoryEx pageMemEx, + Map> pagesToRetry + ) { return new PageStoreWriter() { /** {@inheritDoc} */ @Override public void writePage(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException { if (tag == PageMemoryImpl.TRY_AGAIN_TAG) { - PageMemoryEx pageMem = pageMemoryGroupResolver.apply(fullPageId.groupId()); - - pagesToRetry.computeIfAbsent(pageMem, k -> new ArrayList<>()).add(fullPageId); + pagesToRetry.computeIfAbsent(pageMemEx, k -> new ArrayList<>()).add(fullPageId); return; } @@ -258,7 +262,7 @@ private PageStoreWriter createPageStoreWriter(Map curCpProgress.updateWrittenPages(1); - PageStore store = pageWriter.write(fullPageId, buf, tag); + PageStore store = pageWriter.write(pageMemEx, fullPageId, buf, tag); updStores.computeIfAbsent(store, k -> new LongAdder()).increment(); } @@ -268,12 +272,15 @@ private PageStoreWriter createPageStoreWriter(Map /** Interface which allows to write one page to page store. */ public interface CheckpointPageWriter { /** + * + * @param pageMemEx Page memory from which page should be written. * @param fullPageId Full page id. * @param buf Byte buffer. * @param tag Page tag. * @return {@link PageStore} which was used to write. * @throws IgniteCheckedException if fail. */ - PageStore write(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException; + PageStore write(PageMemoryEx pageMemEx, FullPageId fullPageId, ByteBuffer buf, int tag) + throws IgniteCheckedException; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java index eb3607b76c89f..8c882e1473eeb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointPagesWriterFactory.java @@ -19,6 +19,8 @@ import java.nio.ByteBuffer; import java.util.Collection; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; @@ -144,22 +146,13 @@ Runnable buildRecovery( AtomicInteger cpPagesCnt ) { return () -> { - PageStoreWriter pageStoreWriter = (fullPageId, buf, tag) -> { - assert tag != PageMemoryImpl.TRY_AGAIN_TAG : "Lock is held by other thread for page " + fullPageId; - - // Write buf to page store. - PageStore store = checkpointPageWriter.write(fullPageId, buf, tag); - - // Save store for future fsync. - updStores.add(store); - }; - GridConcurrentMultiPairQueue.Result res = new GridConcurrentMultiPairQueue.Result<>(); int pagesWritten = 0; ByteBuffer tmpWriteBuf = threadBuf.get(); + Map pageStoreWriters = new HashMap<>(); try { while (pages.next(res)) { // Fail-fast break if some exception occurred. @@ -168,6 +161,19 @@ Runnable buildRecovery( PageMemoryEx pageMem = res.getKey(); + PageStoreWriter pageStoreWriter = pageStoreWriters.computeIfAbsent( + pageMem, + (pageMemEx) -> (fullPageId, buf, tag) -> { + assert tag != PageMemoryImpl.TRY_AGAIN_TAG : "Lock is held by other thread for page " + fullPageId; + + // Write buf to page store. + PageStore store = checkpointPageWriter.write(pageMemEx, fullPageId, buf, tag); + + // Save store for future fsync. + updStores.add(store); + } + ); + // Write page content to page store via pageStoreWriter. // Tracker is null, because no need to track checkpoint metrics on recovery. pageMem.checkpointWritePage(res.getValue(), tmpWriteBuf, pageStoreWriter, null); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java index 6a97a188eae4c..93b7fed55c8ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointWorkflow.java @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.UUID; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinTask; @@ -39,6 +38,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Collectors; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteInterruptedException; @@ -74,7 +74,9 @@ import org.apache.ignite.internal.util.worker.WorkProgressDispatcher; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.thread.IgniteThreadPoolExecutor; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.jsr166.ConcurrentLinkedHashMap; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.LOST; @@ -106,6 +108,9 @@ public class CheckpointWorkflow { /** @see IgniteSystemProperties#CHECKPOINT_PARALLEL_SORT_THRESHOLD */ public static final int DFLT_CHECKPOINT_PARALLEL_SORT_THRESHOLD = 512 * 1024; + /****/ + private static final DataRegion NO_REGION = new DataRegion(null, null, null, null); + /** * Starting from this number of dirty pages in checkpoint, array will be sorted with {@link * Arrays#parallelSort(Comparable[])} in case of {@link CheckpointWriteOrder#SEQUENTIAL}. @@ -144,7 +149,7 @@ public class CheckpointWorkflow { private final CheckpointWriteOrder checkpointWriteOrder; /** Collections of checkpoint listeners. */ - private final Collection lsnrs = new CopyOnWriteArrayList<>(); + private final Map lsnrs = new ConcurrentLinkedHashMap<>(); /** Ignite instance name. */ private final String igniteInstanceName; @@ -228,7 +233,9 @@ public Checkpoint markCheckpointBegin( CheckpointMetricsTracker tracker, WorkProgressDispatcher workProgressDispatcher ) throws IgniteCheckedException { - List dbLsnrs = new ArrayList<>(lsnrs); + Collection checkpointedRegions = dataRegions.get(); + + List dbLsnrs = getRelevantCheckpointListeners(checkpointedRegions); CheckpointRecord cpRec = new CheckpointRecord(memoryRecoveryRecordPtr); @@ -283,7 +290,7 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress fillCacheGroupState(cpRec); //There are allowable to replace pages only after checkpoint entry was stored to disk. - cpPagesHolder = beginAllCheckpoints(dataRegions.get(), curr.futureFor(MARKER_STORED_TO_DISK)); + cpPagesHolder = beginAllCheckpoints(checkpointedRegions, curr.futureFor(MARKER_STORED_TO_DISK)); curr.currentCheckpointPagesCount(cpPagesHolder.pagesNum()); @@ -293,7 +300,8 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress if (dirtyPagesCount > 0 || curr.nextSnapshot() || hasPartitionsToDestroy) { // No page updates for this checkpoint are allowed from now on. - cpPtr = wal.log(cpRec); + if (wal != null) + cpPtr = wal.log(cpRec); if (cpPtr == null) cpPtr = CheckpointStatus.NULL_PTR; @@ -326,18 +334,22 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress tracker.onWalCpRecordFsyncStart(); // Sync log outside the checkpoint write lock. - wal.flush(cpPtr, true); + if (wal != null) + wal.flush(cpPtr, true); tracker.onWalCpRecordFsyncEnd(); - CheckpointEntry checkpointEntry = checkpointMarkersStorage.writeCheckpointEntry( - cpTs, - cpRec.checkpointId(), - cpPtr, - cpRec, - CheckpointEntryType.START, - skipSync - ); + CheckpointEntry checkpointEntry = null; + + if (checkpointMarkersStorage != null) + checkpointEntry = checkpointMarkersStorage.writeCheckpointEntry( + cpTs, + cpRec.checkpointId(), + cpPtr, + cpRec, + CheckpointEntryType.START, + skipSync + ); curr.transitTo(MARKER_STORED_TO_DISK); @@ -351,7 +363,7 @@ curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgress return new Checkpoint(checkpointEntry, cpPages, curr); } else { - if (curr.nextSnapshot()) + if (curr.nextSnapshot() && wal != null) wal.flush(null, true); return new Checkpoint(null, GridConcurrentMultiPairQueue.EMPTY, curr); @@ -563,23 +575,28 @@ public void markCheckpointEnd(Checkpoint chp) throws IgniteCheckedException { } if (chp.hasDelta()) { - checkpointMarkersStorage.writeCheckpointEntry( - chp.cpEntry.timestamp(), - chp.cpEntry.checkpointId(), - chp.cpEntry.checkpointMark(), - null, - CheckpointEntryType.END, - skipSync - ); - - wal.notchLastCheckpointPtr(chp.cpEntry.checkpointMark()); + if (checkpointMarkersStorage != null) + checkpointMarkersStorage.writeCheckpointEntry( + chp.cpEntry.timestamp(), + chp.cpEntry.checkpointId(), + chp.cpEntry.checkpointMark(), + null, + CheckpointEntryType.END, + skipSync + ); + + if (wal != null) + wal.notchLastCheckpointPtr(chp.cpEntry.checkpointMark()); } - checkpointMarkersStorage.onCheckpointFinished(chp); + if (checkpointMarkersStorage != null) + checkpointMarkersStorage.onCheckpointFinished(chp); CheckpointContextImpl emptyCtx = new CheckpointContextImpl(chp.progress, null, null, null); - List dbLsnrs = new ArrayList<>(lsnrs); + Collection checkpointedRegions = dataRegions.get(); + + List dbLsnrs = getRelevantCheckpointListeners(checkpointedRegions); for (CheckpointListener lsnr : dbLsnrs) lsnr.afterCheckpointEnd(emptyCtx); @@ -587,6 +604,17 @@ public void markCheckpointEnd(Checkpoint chp) throws IgniteCheckedException { chp.progress.transitTo(FINISHED); } + /** + * @param checkpointedRegions Regions which will be checkpointed. + * @return Checkpoint listeners which should be handled. + */ + @NotNull private List getRelevantCheckpointListeners(Collection checkpointedRegions) { + return lsnrs.entrySet().stream() + .filter(entry -> entry.getValue() == NO_REGION || checkpointedRegions.contains(entry.getValue())) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + } + /** * This method makes sense if node was stopped during the checkpoint(Start marker was written to disk while end * marker are not). It is able to write all pages to disk and create end marker. @@ -687,10 +715,13 @@ public void memoryRecoveryRecordPtr(WALPointer memoryRecoveryRecordPtr) { } /** + * Adding the listener which will be called only when given data region will be checkpointed. + * * @param lsnr Listener. + * @param dataRegion Data region for which listener is corresponded to. */ - public void addCheckpointListener(CheckpointListener lsnr) { - lsnrs.add(lsnr); + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + lsnrs.put(lsnr, dataRegion == null ? NO_REGION : dataRegion); } /** @@ -720,7 +751,8 @@ public void stop() { checkpointCollectPagesInfoPool = null; } - lsnrs.clear(); + for (CheckpointListener lsnr : lsnrs.keySet()) + lsnrs.remove(lsnr); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java index c0368f410f7ed..9e39dfe143e09 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java @@ -106,7 +106,8 @@ public class Checkpointer extends GridWorker { "walCpRecordFsyncDuration=%dms, " + "writeCheckpointEntryDuration=%dms, " + "splitAndSortCpPagesDuration=%dms, " + - "%s pages=%d, " + + "%s" + + "pages=%d, " + "reason='%s']"; /** Skip sync. */ @@ -408,8 +409,8 @@ private void doCheckpoint() { log.info( String.format( CHECKPOINT_STARTED_LOG_FORMAT, - chp.cpEntry.checkpointId(), - chp.cpEntry.checkpointMark(), + chp.cpEntry == null ? "" : chp.cpEntry.checkpointId(), + chp.cpEntry == null ? "" : chp.cpEntry.checkpointMark(), tracker.beforeLockDuration(), tracker.lockWaitDuration(), tracker.listenersExecuteDuration(), @@ -417,7 +418,7 @@ private void doCheckpoint() { tracker.walCpRecordFsyncDuration(), tracker.writeCheckpointEntryDuration(), tracker.splitAndSortCpPagesDuration(), - possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms," : "", + possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms, " : "", chp.pagesSize, chp.progress.reason() ) @@ -455,7 +456,7 @@ private void doCheckpoint() { if (chp.hasDelta() || destroyedPartitionsCnt > 0) { if (log.isInfoEnabled()) { - String walSegsCoveredMsg = prepareWalSegsCoveredMsg(chp.walSegsCoveredRange); + String walSegsCoveredMsg = chp.walSegsCoveredRange == null ? "" : prepareWalSegsCoveredMsg(chp.walSegsCoveredRange); log.info(String.format("Checkpoint finished [cpId=%s, pages=%d, markPos=%s, " + "walSegmentsCleared=%d, walSegmentsCovered=%s, markDuration=%dms, pagesWrite=%dms, fsync=%dms, " + @@ -852,6 +853,9 @@ public void shutdownNow() { * Restart worker in IgniteThread. */ public void start() { + if (runner() != null) + return; + assert runner() == null : "Checkpointer is running."; new IgniteThread(this).start(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java new file mode 100644 index 0000000000000..7a329f7d3e3a7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java @@ -0,0 +1,337 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.checkpoint; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Collection; +import java.util.Collections; +import java.util.UUID; +import java.util.function.Function; +import java.util.function.Supplier; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.LongJVMPauseDetector; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.GridCacheProcessor; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.DataStorageMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; +import org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer; +import org.apache.ignite.internal.processors.failure.FailureProcessor; +import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.lang.IgniteThrowableFunction; +import org.apache.ignite.internal.worker.WorkersRegistry; +import org.apache.ignite.lang.IgniteInClosure; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT; + +/** + * Like a sharp checkpoint algorithm implemented in {@link CheckpointManager} this checkpoint ensures that + * all pages marked dirty under {@link #checkpointTimeoutLock()} will be consistently saved to disk. + * + * But unlike {@link CheckpointManager} lightweight checkpoint doesn't store any checkpoint markers to disk + * nor write cp-related records to WAL log. + * + * This allows to use it in situations where no recovery is needed after crush in the middle of checkpoint + * but work can simply be replayed from the beginning. + * + * Such situations include defragmentation and node recovery after crush + * (regular sharp checkpoint cannot be used during recovery). + */ +public class LightweightCheckpointManager { + /** Checkpoint worker. */ + private volatile Checkpointer checkpointer; + + /** Main checkpoint steps. */ + private final CheckpointWorkflow checkpointWorkflow; + + /** Timeout checkpoint lock which should be used while write to memory happened. */ + final CheckpointTimeoutLock checkpointTimeoutLock; + + /** Checkpoint page writer factory. */ + private final CheckpointPagesWriterFactory checkpointPagesWriterFactory; + + /** Checkpointer builder. It allows to create a new checkpointer on each call. */ + private final Supplier checkpointerProvider; + + /** + * @param logger Logger producer. + * @param igniteInstanceName Ignite instance name. + * @param checkpointThreadName Name of main checkpoint thread. + * @param workersRegistry Workers registry. + * @param persistenceCfg Persistence configuration. + * @param dataRegions Data regions. + * @param pageMemoryGroupResolver Page memory resolver. + * @param throttlingPolicy Throttling policy. + * @param snapshotMgr Snapshot manager. + * @param persStoreMetrics Persistence metrics. + * @param longJvmPauseDetector Long JVM pause detector. + * @param failureProcessor Failure processor. + * @param cacheProcessor Cache processor. + * @throws IgniteCheckedException if fail. + */ + public LightweightCheckpointManager( + Function, IgniteLogger> logger, + String igniteInstanceName, + String checkpointThreadName, + WorkersRegistry workersRegistry, + DataStorageConfiguration persistenceCfg, + Supplier> dataRegions, + IgniteThrowableFunction pageMemoryGroupResolver, + PageMemoryImpl.ThrottlingPolicy throttlingPolicy, + IgniteCacheSnapshotManager snapshotMgr, + DataStorageMetricsImpl persStoreMetrics, + LongJVMPauseDetector longJvmPauseDetector, + FailureProcessor failureProcessor, + GridCacheProcessor cacheProcessor, + FilePageStoreManager pageStoreManager + ) throws IgniteCheckedException { + CheckpointReadWriteLock lock = new CheckpointReadWriteLock(logger); + + checkpointWorkflow = new CheckpointWorkflow( + logger, + null, + snapshotMgr, + null, + lock, + persistenceCfg.getCheckpointWriteOrder(), + dataRegions, + Collections::emptyList, + persistenceCfg.getCheckpointThreads(), + igniteInstanceName + ); + + ThreadLocal threadBuf = new ThreadLocal() { + /** {@inheritDoc} */ + @Override protected ByteBuffer initialValue() { + ByteBuffer tmpWriteBuf = ByteBuffer.allocateDirect(persistenceCfg.getPageSize()); + + tmpWriteBuf.order(ByteOrder.nativeOrder()); + + return tmpWriteBuf; + } + }; + + checkpointPagesWriterFactory = new CheckpointPagesWriterFactory( + logger, + snapshotMgr, + (pageMemEx, fullPage, buf, tag) -> + pageStoreManager.writeInternal(fullPage.groupId(), fullPage.pageId(), buf, tag, true), + persStoreMetrics, + throttlingPolicy, + threadBuf, + pageMemoryGroupResolver + ); + + checkpointerProvider = () -> new Checkpointer( + igniteInstanceName, + checkpointThreadName, + workersRegistry, + logger, + longJvmPauseDetector, + failureProcessor, + snapshotMgr, + persStoreMetrics, + cacheProcessor, + checkpointWorkflow, + checkpointPagesWriterFactory, + persistenceCfg.getCheckpointFrequency(), + persistenceCfg.getCheckpointThreads() + ); + + checkpointer = checkpointerProvider.get(); + + Long cfgCheckpointReadLockTimeout = persistenceCfg != null + ? persistenceCfg.getCheckpointReadLockTimeout() + : null; + + long checkpointReadLockTimeout = IgniteSystemProperties.getLong(IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT, + cfgCheckpointReadLockTimeout != null + ? cfgCheckpointReadLockTimeout + : workersRegistry.getSystemWorkerBlockedTimeout()); + + checkpointTimeoutLock = new CheckpointTimeoutLock( + logger, + failureProcessor, + dataRegions, + lock, + checkpointer, + checkpointReadLockTimeout + ); + } + + /** + * @return Checkpoint lock which can be used for protection of writing to memory. + */ + public CheckpointTimeoutLock checkpointTimeoutLock() { + return checkpointTimeoutLock; + } + + /** + * Replace thread local with buffers. Thread local should provide direct buffer with one page in length. + * + * @param threadBuf new thread-local with buffers for the checkpoint threads. + */ + public void threadBuf(ThreadLocal threadBuf) { + checkpointPagesWriterFactory.threadBuf(threadBuf); + } + + /** + * @param lsnr Listener. + * @param dataRegion + */ + public void addCheckpointListener(CheckpointListener lsnr, DataRegion dataRegion) { + checkpointWorkflow.addCheckpointListener(lsnr, dataRegion); + } + + /** + * @param lsnr Listener. + */ + public void removeCheckpointListener(CheckpointListener lsnr) { + checkpointWorkflow.removeCheckpointListener(lsnr); + } + + /** + * @param memoryRecoveryRecordPtr Memory recovery record pointer. + */ + public void memoryRecoveryRecordPtr(WALPointer memoryRecoveryRecordPtr) { + checkpointWorkflow.memoryRecoveryRecordPtr(memoryRecoveryRecordPtr); + } + + /** + * Start the new checkpoint immediately. + * + * @param reason Reason. + * @param lsnr Listener which will be called on finish. + * @return Triggered checkpoint progress. + */ + public CheckpointProgress forceCheckpoint( + String reason, + IgniteInClosure> lsnr + ) { + Checkpointer cp = this.checkpointer; + + if (cp == null) + return null; + + return cp.scheduleCheckpoint(0, reason, lsnr); + } + + /** + * + */ + public Checkpointer getCheckpointer() { + return checkpointer; + } + + /** + * @param context Group context. Can be {@code null} in case of crash recovery. + * @param groupId Group ID. + * @param partId Partition ID. + */ + public void schedulePartitionDestroy(@Nullable CacheGroupContext context, int groupId, int partId) { + Checkpointer cp = checkpointer; + + if (cp != null) + cp.schedulePartitionDestroy(context, groupId, partId); + } + + /** + * For test use only. + */ + public IgniteInternalFuture enableCheckpoints(boolean enable) { + return checkpointer.enableCheckpoints(enable); + } + + /** + * @throws IgniteCheckedException If failed. + */ + public void finalizeCheckpointOnRecovery( + long ts, + UUID id, + WALPointer ptr, + StripedExecutor exec + ) throws IgniteCheckedException { + assert checkpointer != null : "Checkpointer hasn't initialized yet"; + + checkpointer.finalizeCheckpointOnRecovery(ts, id, ptr, exec); + } + + /** + * @param grpId Group ID. + * @param partId Partition ID. + */ + public void cancelOrWaitPartitionDestroy(int grpId, int partId) throws IgniteCheckedException { + Checkpointer cp = checkpointer; + + if (cp != null) + checkpointer.cancelOrWaitPartitionDestroy(grpId, partId); + } + + /** + * @param cancel Cancel flag. + */ + public void stop(boolean cancel) { + checkpointTimeoutLock.stop(); + + Checkpointer cp = this.checkpointer; + + if (cp != null) + cp.shutdownCheckpointer(cancel); + + checkpointWorkflow.stop(); + + this.checkpointer = null; + } + + /** + * Initialize the checkpoint and prepare it to work. It should be called if the stop was called before. + */ + public void init() { + if (this.checkpointer == null) { + checkpointWorkflow.start(); + + this.checkpointer = checkpointerProvider.get(); + } + } + + /** + * Checkpoint starts to do their work after this method. + */ + public void start() { + assert checkpointer != null : "Checkpointer can't be null during the start"; + + this.checkpointer.start(); + } + + /** + * Checkpoint lock blocks when stop method is called. This method allows continuing the work with a checkpoint lock + * if needed. + */ + public void unblockCheckpointLock() { + checkpointTimeoutLock.start(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java index 7dd1e80e80b3d..b88cad88f6a21 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java @@ -196,7 +196,7 @@ else if (db.temporaryMetaStorage() != null) { /** {@inheritDoc} */ @Override public void beforeCheckpointBegin(Context ctx) { } - }); + }, dataRegion); } } } @@ -291,7 +291,7 @@ else if (!readOnly || getOrAllocateMetas(partId = PageIdAllocator.OLD_METASTORE_ ); if (!readOnly) - ((GridCacheDatabaseSharedManager)db).addCheckpointListener(this); + ((GridCacheDatabaseSharedManager)db).addCheckpointListener(this, dataRegion); } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointListenerForRegionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointListenerForRegionTest.java new file mode 100644 index 0000000000000..fb10775a438a0 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointListenerForRegionTest.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.checkpoint; + +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.NotNull; +import org.junit.Test; + +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME; + +/** + * + */ +public class CheckpointListenerForRegionTest extends GridCommonAbstractTest { + /** This number show how many mandatory methods will be called on checkpoint listener during checkpoint. */ + private static final int CALLS_COUNT_PER_CHECKPOINT = 3; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration storageCfg = new DataStorageConfiguration(); + + storageCfg.setCheckpointFrequency(100_000); + storageCfg.getDefaultDataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(300L * 1024 * 1024); + + cfg.setDataStorageConfiguration(storageCfg) + .setCacheConfiguration(new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAffinity(new RendezvousAffinityFunction(false, 16))); + + return cfg; + } + + /** + * 1. Start the one node. + * 2. Configure the default cache. + * 3. Set the checkpoint listeners(for default region and for all regions) to watch the checkpoint. + * 4. Fill the data and trigger the checkpoint. + * 5. Expected: Both listeners should be called. + * 6. Remove the default region from the checkpoint. + * 7. Fill the data and trigger the checkpoint. + * 8. Expected: The only listener for all regions should be called. + * 9. Return default region back to the checkpoint. + * 10. Fill the data and trigger the checkpoint. + * 11. Expected: Both listeners should be called. + * + * @throws Exception if fail. + */ + @Test + public void testCheckpointListenersInvokedOnlyIfRegionConfigured() throws Exception { + //given: One started node with default cache. + IgniteEx ignite0 = startGrid(0); + + ignite0.cluster().active(true); + + IgniteCache cache = ignite0.cache(DEFAULT_CACHE_NAME); + + GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)(ignite0.context().cache().context().database()); + + DataRegion defaultRegion = db.checkpointedDataRegions().stream() + .filter(region -> DFLT_DATA_REG_DEFAULT_NAME.equals(region.config().getName())) + .findFirst() + .orElse(null); + + assertNotNull("Expected default data region in checkpoint list is not found.", defaultRegion); + + //and: Configure the listeners(for default region and for all regions) for watching for checkpoint. + AtomicInteger checkpointListenerDefaultRegionCounter = checkpointListenerWatcher(db, defaultRegion); + AtomicInteger checkpointListenerAllRegionCounter = checkpointListenerWatcher(db, null); + + //when: Checkpoint happened. + fillDataAndCheckpoint(ignite0, cache); + + //then: Both listeners should be called. + assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get()); + assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get()); + + //Remove the default region from checkpoint. + db.checkpointedDataRegions().remove(defaultRegion); + + //when: Checkpoint happened. + fillDataAndCheckpoint(ignite0, cache); + + //then: Only listener for all regions should be called. + assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get()); + assertEquals(2 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get()); + + assertTrue( + "Expected default data region in all regions list is not found.", + db.dataRegions().stream().anyMatch(region -> DFLT_DATA_REG_DEFAULT_NAME.equals(region.config().getName())) + ); + + //Return default region back to the checkpoint. + db.checkpointedDataRegions().add(defaultRegion); + + //when: Checkpoint happened. + fillDataAndCheckpoint(ignite0, cache); + + //then: Both listeners should be called. + assertEquals(2 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get()); + assertEquals(3 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get()); + } + + /** + * Fill the data and trigger the checkpoint after that. + */ + private void fillDataAndCheckpoint( + IgniteEx ignite0, + IgniteCache cache + ) throws IgniteCheckedException { + for (int j = 0; j < 1024; j++) + cache.put(j, j); + + forceCheckpoint(ignite0); + } + + /** + * Add checkpoint listener which count the number of listener calls during each checkpoint. + * + * @param db Shared manager for manage the listeners. + * @param defaultRegion Region for which listener should be added. + * @return Integer which count the listener calls. + */ + @NotNull + private AtomicInteger checkpointListenerWatcher(GridCacheDatabaseSharedManager db, DataRegion defaultRegion) { + AtomicInteger checkpointListenerCounter = new AtomicInteger(); + + db.addCheckpointListener(new CheckpointListener() { + @Override public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException { + checkpointListenerCounter.getAndIncrement(); + } + + @Override public void onCheckpointBegin(Context ctx) throws IgniteCheckedException { + checkpointListenerCounter.getAndIncrement(); + } + + @Override public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException { + checkpointListenerCounter.getAndIncrement(); + } + }, defaultRegion); + return checkpointListenerCounter; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java index 51a4073a71c58..5d75b1ff8a73a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/CheckpointStartLoggingTest.java @@ -44,7 +44,7 @@ public class CheckpointStartLoggingTest extends GridCommonAbstractTest { "walCpRecordFsyncDuration=" + VALID_MS_PATTERN + ", " + "writeCheckpointEntryDuration=" + VALID_MS_PATTERN + ", " + "splitAndSortCpPagesDuration=" + VALID_MS_PATTERN + ", " + - ".* pages=[1-9][0-9]*, " + + ".*pages=[1-9][0-9]*, " + "reason=.*"; /** */ diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java new file mode 100644 index 0000000000000..85da92c122bbe --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.checkpoint; + +import java.io.File; +import java.nio.file.Paths; +import java.util.Arrays; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; +import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog; +import org.apache.ignite.internal.processors.cache.persistence.CheckpointState; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.LightweightCheckpointManager; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.METASTORE_DATA_REGION_NAME; +import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; + +/** + * + */ +public class LightweightCheckpointTest extends GridCommonAbstractTest { + /** Data region which should not be checkpointed. */ + public static final String NOT_CHECKPOINTED_REGION = "NotCheckpointedRegion"; + + /** Cache which should not be checkpointed. */ + public static final String NOT_CHECKPOINTED_CACHE = "notCheckpointedCache"; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + +// cleanPersistenceDir(); + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration storageCfg = new DataStorageConfiguration(); + + storageCfg.setWalMode(WALMode.NONE); + storageCfg.setCheckpointFrequency(100_000); + storageCfg.setDataRegionConfigurations(new DataRegionConfiguration() + .setName(NOT_CHECKPOINTED_REGION) + .setPersistenceEnabled(true) + .setMaxSize(300L * 1024 * 1024) + + ); + storageCfg.getDefaultDataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(300L * 1024 * 1024); + + cfg.setDataStorageConfiguration(storageCfg) + + .setCacheConfiguration( + new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAffinity(new RendezvousAffinityFunction(false, 16)) + .setDataRegionName(DFLT_DATA_REG_DEFAULT_NAME), + new CacheConfiguration<>(NOT_CHECKPOINTED_CACHE) + .setAffinity(new RendezvousAffinityFunction(false, 16)) + .setDataRegionName(NOT_CHECKPOINTED_REGION) + ); + + return cfg; + } + + /** + * 1. Start the one node with disabled WAL and with two caches. + * 2. Disable default checkpoint. + * 3. Create light checkpoint for one cache and configure checkpoint listener for it. + * 4. Fill the both caches. + * 5. Trigger the light checkpoint and wait for the finish. + * 6. Stop the node and start it again. + * 7. Expected: Cache which was checkpointed would have the all data meanwhile second cache would be empty. + * + * @throws Exception if fail. + */ + @Test + public void testLightCheckpointAbleToStoreOnlyGivenDataRegion() throws Exception { + //given: One started node with default cache and cache which won't be checkpointed. + IgniteEx ignite0 = startGrid(0); + ignite0.cluster().active(true); + + IgniteCache checkpointedCache = ignite0.cache(DEFAULT_CACHE_NAME); + IgniteCache notCheckpointedCache = ignite0.cache(NOT_CHECKPOINTED_CACHE); + + GridKernalContext context = ignite0.context(); + GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)(context.cache().context().database()); + + waitForCondition(() -> !db.getCheckpointer().currentProgress().inProgress(), 10_000); + + //and: disable the default checkpoint. + db.enableCheckpoints(false); + + DataRegion regionForCheckpoint = db.dataRegion(DFLT_DATA_REG_DEFAULT_NAME); + + //and: Create light checkpoint with only one region. + LightweightCheckpointManager lightweightCheckpointManager = new LightweightCheckpointManager( + context::log, + context.igniteInstanceName(), + "light-test-checkpoint", + context.workersRegistry(), + context.config().getDataStorageConfiguration(), + () -> Arrays.asList(regionForCheckpoint), + grpId -> getPageMemoryForCacheGroup(grpId, db, context), + PageMemoryImpl.ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY, + context.cache().context().snapshot(), + db.persistentStoreMetricsImpl(), + context.longJvmPauseDetector(), + context.failure(), + context.cache(), + (FilePageStoreManager)context.cache().context().pageStore() + ); + + //and: Add checkpoint listener for DEFAULT_CACHE in order of storing the meta pages. + lightweightCheckpointManager.addCheckpointListener( + (CheckpointListener)context.cache().cacheGroup(groupIdForCache(ignite0, DEFAULT_CACHE_NAME)).offheap(), + regionForCheckpoint + ); + + lightweightCheckpointManager.start(); + + //when: Fill the caches + for (int j = 0; j < 1024; j++) { + checkpointedCache.put(j, j); + notCheckpointedCache.put(j, j); + } + + //and: Trigger and wait for the checkpoint. + lightweightCheckpointManager.forceCheckpoint("test", null) + .futureFor(CheckpointState.FINISHED) + .get(); + + //and: Stop and start node. + stopAllGrids(); + + ignite0 = startGrid(0); + ignite0.cluster().active(true); + + checkpointedCache = ignite0.cache(DEFAULT_CACHE_NAME); + notCheckpointedCache = ignite0.cache(NOT_CHECKPOINTED_CACHE); + + //then: Checkpointed cache should have all data meanwhile uncheckpointed cache should be empty. + for (int j = 1; j < 1024; j++) { + assertEquals(j, checkpointedCache.get(j)); + assertNull(notCheckpointedCache.get(j)); + } + + GridCacheDatabaseSharedManager db2 = (GridCacheDatabaseSharedManager) + (ignite0.context().cache().context().database()); + + waitForCondition(() -> !db2.getCheckpointer().currentProgress().inProgress(), 10_000); + + String nodeFolderName = ignite0.context().pdsFolderResolver().resolveFolders().folderName(); + File cpMarkersDir = Paths.get(U.defaultWorkDirectory(), "db", nodeFolderName, "cp").toFile(); + + //then: Expected only two pairs checkpoint markers - both from the start of node. + assertEquals(4, cpMarkersDir.listFiles().length); + } + + /** + * @return Page memory which corresponds to grpId. + */ + private PageMemoryEx getPageMemoryForCacheGroup( + int grpId, + GridCacheDatabaseSharedManager db, + GridKernalContext context + ) throws IgniteCheckedException { + if (grpId == MetaStorage.METASTORAGE_CACHE_ID) + return (PageMemoryEx)db.dataRegion(METASTORE_DATA_REGION_NAME).pageMemory(); + + if (grpId == TxLog.TX_LOG_CACHE_ID) + return (PageMemoryEx)db.dataRegion(TxLog.TX_LOG_CACHE_NAME).pageMemory(); + + CacheGroupDescriptor desc = context.cache().cacheGroupDescriptors().get(grpId); + + if (desc == null) + return null; + + String memPlcName = desc.config().getDataRegionName(); + + return (PageMemoryEx)context.cache().context().database().dataRegion(memPlcName).pageMemory(); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java index 11b9ba5a38010..5510c4be52786 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.HistoricalRebalanceHeuristicsTest; import org.apache.ignite.internal.processors.cache.persistence.IgniteDataStorageMetricsSelfTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheStartStopWithFreqCheckpointTest; @@ -52,8 +51,10 @@ import org.apache.ignite.internal.processors.cache.persistence.db.SlowHistoricalRebalanceSmallHistoryTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointFailBeforeWriteMarkTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointFreeListTest; +import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointListenerForRegionTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.CheckpointStartLoggingTest; import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.IgniteCheckpointDirtyPagesForLowLoadTest; +import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.LightweightCheckpointTest; import org.apache.ignite.internal.processors.cache.persistence.db.filename.IgniteUidAsConsistentIdMigrationTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.FsyncWalRolloverDoesNotBlockTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteLocalWalSizeTest; @@ -212,6 +213,8 @@ public static void addRealPageStoreTests(List> suite, Collection GridTestUtils.addTestIfNeeded(suite, IgnitePdsCorruptedStoreTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CheckpointFailBeforeWriteMarkTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CheckpointFreeListTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, CheckpointListenerForRegionTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, LightweightCheckpointTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CheckpointStartLoggingTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, FreeListCachingTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgniteWalIteratorSwitchSegmentTest.class, ignoredTests); From 2b901e45266f934220f5500a5df659a7f682b60c Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Thu, 12 Nov 2020 03:49:50 +0300 Subject: [PATCH 039/110] IGNITE-13107: Fix memory leak in ODBC and C++ thin This closes #7890 --- .../include/ignite/common/concurrent_os.h | 8 + .../win/include/ignite/common/concurrent_os.h | 8 + .../platforms/cpp/odbc-test/CMakeLists.txt | 1 + .../cpp/odbc-test/include/test_server.h | 201 ++++++++++++++++++ .../odbc-test/project/vs/odbc-test.vcxproj | 2 + .../project/vs/odbc-test.vcxproj.filters | 6 + .../cpp/odbc-test/src/connection_test.cpp | 21 +- .../cpp/odbc-test/src/test_server.cpp | 182 ++++++++++++++++ modules/platforms/cpp/odbc/src/connection.cpp | 6 +- .../thin/transactions/transactions_proxy.h | 5 +- .../src/impl/transactions/transaction_impl.h | 14 +- .../impl/transactions/transactions_impl.cpp | 59 +++-- .../src/impl/transactions/transactions_impl.h | 20 +- 13 files changed, 484 insertions(+), 49 deletions(-) create mode 100644 modules/platforms/cpp/odbc-test/include/test_server.h create mode 100644 modules/platforms/cpp/odbc-test/src/test_server.cpp diff --git a/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h b/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h index 18ba54deb64f2..66f6656a7ddf9 100644 --- a/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h +++ b/modules/platforms/cpp/common/os/linux/include/ignite/common/concurrent_os.h @@ -402,6 +402,14 @@ namespace ignite // No-op. } + /** + * Destructor. + */ + ~ThreadLocalInstance() + { + Remove(); + } + /** * Get value. * diff --git a/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h b/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h index a4f6f58329104..b1e89164c49da 100644 --- a/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h +++ b/modules/platforms/cpp/common/os/win/include/ignite/common/concurrent_os.h @@ -412,6 +412,14 @@ namespace ignite // No-op. } + /** + * Destructor. + */ + ~ThreadLocalInstance() + { + Remove(); + } + /** * Get value. * diff --git a/modules/platforms/cpp/odbc-test/CMakeLists.txt b/modules/platforms/cpp/odbc-test/CMakeLists.txt index 9b87a60f23c73..d9cefb68efe9d 100644 --- a/modules/platforms/cpp/odbc-test/CMakeLists.txt +++ b/modules/platforms/cpp/odbc-test/CMakeLists.txt @@ -65,6 +65,7 @@ set(SOURCES src/teamcity/teamcity_boost.cpp src/authentication_test.cpp src/sql_parsing_test.cpp src/streaming_test.cpp + src/test_server.cpp ../odbc/src/log.cpp ../odbc/src/cursor.cpp ../odbc/src/diagnostic/diagnostic_record.cpp diff --git a/modules/platforms/cpp/odbc-test/include/test_server.h b/modules/platforms/cpp/odbc-test/include/test_server.h new file mode 100644 index 0000000000000..c50cc709c73ce --- /dev/null +++ b/modules/platforms/cpp/odbc-test/include/test_server.h @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _IGNITE_ODBC_TEST_TEST_SERVER +#define _IGNITE_ODBC_TEST_TEST_SERVER + +#include + +#include + +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0601 +#endif // _WIN32_WINNT + +#include +#include + +namespace ignite +{ + +/** + * Test Server Session. + */ +class TestServerSession +{ +public: + /** + * Construct new instance of class. + * @param service Asio service. + * @param responses Responses to provide to requests. + */ + TestServerSession(boost::asio::io_service& service, const std::vector< std::vector >& responses); + + /** + * Get socket. + */ + boost::asio::ip::tcp::socket& GetSocket() + { + return socket; + } + + /** + * Start session. + */ + void Start(); + + /** + * Get response at index. + * @param idx Index. + * @return Response. + */ + const std::vector& GetResponse(size_t idx) const + { + return responses.at(idx); + } + +private: + /** + * Receive next request. + */ + void ReadNextRequest(); + + /** + * Handle received request size. + * @param error Error. + * @param bytesTransferred Bytes transferred. + */ + void HandleRequestSizeReceived(const boost::system::error_code& error, size_t bytesTransferred); + + /** + * Handle received request. + * @param error Error. + * @param bytesTransferred Bytes transferred. + */ + void HandleRequestReceived(const boost::system::error_code& error, size_t bytesTransferred); + + /** + * Handle received request. + * @param error Error. + * @param bytesTransferred Bytes transferred. + */ + void HandleResponseSent(const boost::system::error_code& error, size_t bytesTransferred); + + // The socket used to communicate with the client. + boost::asio::ip::tcp::socket socket; + + // Received requests. + std::vector< std::vector > requests; + + // Responses to provide. + const std::vector< std::vector > responses; + + // Number of requests answered. + size_t requestsResponded; +}; + +/** + * Test Server. + */ +class TestServer +{ +public: + /** + * Constructor. + * @param port TCP port to listen. + */ + TestServer(uint16_t port = 11110); + + /** + * Destructor. + */ + ~TestServer(); + + /** + * Push new handshake response to send. + * @param accept Accept or reject response. + */ + void PushHandshakeResponse(bool accept) + { + std::vector rsp(4 + 1); + rsp[0] = 1; + rsp[4] = accept ? 1 : 0; + + PushResponse(rsp); + } + + /** + * Push new response to send. + * @param resp Response to push. + */ + void PushResponse(const std::vector& resp) + { + responses.push_back(resp); + } + + /** + * Get specified session. + * @param idx Index. + * @return Specified session. + */ + TestServerSession& GetSession(size_t idx = 0) + { + return *sessions.at(idx); + } + + /** + * Start server. + */ + void Start(); + + /** + * Stop server. + */ + void Stop(); + +private: + /** + * Start accepting connections. + */ + void StartAccept(); + + /** + * Handle accepted connection. + * @param session Accepted session. + * @param error Error. + */ + void HandleAccept(boost::shared_ptr session, const boost::system::error_code& error); + + // Service. + boost::asio::io_service service; + + // Acceptor. + boost::asio::ip::tcp::acceptor acceptor; + + // Reponses. + std::vector< std::vector > responses; + + // Sessions. + std::vector< boost::shared_ptr > sessions; + + // Server Thread. + boost::shared_ptr serverThread; +}; + +} // namespace ignite + +#endif //_IGNITE_ODBC_TEST_TEST_SERVER \ No newline at end of file diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj index 53a68b8925312..90abdc82a7404 100644 --- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj +++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj @@ -209,6 +209,7 @@ + @@ -219,6 +220,7 @@ + diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters index 3af88e20ea2a0..68c78cdcfc92f 100644 --- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters +++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters @@ -190,6 +190,9 @@ Externals + + Code + @@ -210,6 +213,9 @@ Code + + Code + diff --git a/modules/platforms/cpp/odbc-test/src/connection_test.cpp b/modules/platforms/cpp/odbc-test/src/connection_test.cpp index ee54ed265c2a4..6d4ed9a8edce1 100644 --- a/modules/platforms/cpp/odbc-test/src/connection_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/connection_test.cpp @@ -15,6 +15,7 @@ * limitations under the License. */ +#include "test_server.h" #ifdef _WIN32 # include #endif @@ -29,7 +30,6 @@ #include "ignite/ignite.h" #include "ignite/ignition.h" -#include "test_type.h" #include "test_utils.h" #include "odbc_test_suite.h" @@ -50,7 +50,7 @@ struct ConnectionTestSuiteFixture: odbc::OdbcTestSuite ConnectionTestSuiteFixture() : OdbcTestSuite() { - StartNode(); + // No-op. } /** @@ -109,6 +109,8 @@ BOOST_FIXTURE_TEST_SUITE(ConnectionTestSuite, ConnectionTestSuiteFixture) BOOST_AUTO_TEST_CASE(TestConnectionRestore) { + StartNode(); + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); // Check that query was successfully executed. @@ -129,4 +131,19 @@ BOOST_AUTO_TEST_CASE(TestConnectionRestore) BOOST_CHECK_EQUAL(ExecQueryAndReturnError(), ""); } +BOOST_AUTO_TEST_CASE(TestConnectionMemoryLeak) +{ + TestServer testServer(11100); + + testServer.PushHandshakeResponse(true); + testServer.Start(); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11100;SCHEMA=cache"); + + ExecQuery("Select * from Test"); + + Disconnect(); + Disconnect(); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/test_server.cpp b/modules/platforms/cpp/odbc-test/src/test_server.cpp new file mode 100644 index 0000000000000..cc6f877b3634c --- /dev/null +++ b/modules/platforms/cpp/odbc-test/src/test_server.cpp @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4355) +#endif //_MSC_VER + +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0601 +#endif // _WIN32_WINNT + +#include +#include +#include + +#ifdef _MSC_VER +# pragma warning(pop) +#endif //_MSC_VER + +#include +#include + +#include "test_server.h" + +namespace ignite +{ + +TestServerSession::TestServerSession(boost::asio::io_service& service, const std::vector< std::vector >& responses) : + socket(service), + responses(responses), + requestsResponded(0) +{ + // No-op. +} + +void TestServerSession::Start() +{ + ReadNextRequest(); +} + +void TestServerSession::ReadNextRequest() +{ + requests.push_back(std::vector()); + + std::vector& newRequest = requests.back(); + newRequest.resize(4); + + async_read(socket, boost::asio::buffer(newRequest.data(), newRequest.size()), + boost::bind(&TestServerSession::HandleRequestSizeReceived, this, + boost::asio::placeholders::error, + boost::asio::placeholders::bytes_transferred)); +} + +void TestServerSession::HandleRequestSizeReceived(const boost::system::error_code& error, size_t bytesTransferred) +{ + if (error || bytesTransferred != 4) + { + socket.close(); + + return; + } + + std::vector& newRequest = requests.back(); + impl::interop::InteropUnpooledMemory mem(4); + mem.Length(4); + + memcpy(mem.Data(), newRequest.data(), newRequest.size()); + int32_t size = impl::binary::BinaryUtils::ReadInt32(mem, 0); + + newRequest.resize(4 + size); + + async_read(socket, boost::asio::buffer(newRequest.data() + 4, size), + boost::bind(&TestServerSession::HandleRequestReceived, this, + boost::asio::placeholders::error, + boost::asio::placeholders::bytes_transferred)); +} + +void TestServerSession::HandleRequestReceived(const boost::system::error_code& error, size_t bytesTransferred) +{ + if (error || !bytesTransferred || requestsResponded == responses.size()) + { + socket.close(); + + return; + } + + const std::vector& response = responses.at(requestsResponded); + + async_write(socket, boost::asio::buffer(response.data(), response.size()), + boost::bind(&TestServerSession::HandleResponseSent, this, + boost::asio::placeholders::error, + boost::asio::placeholders::bytes_transferred)); + + ++requestsResponded; +} + +void TestServerSession::HandleResponseSent(const boost::system::error_code& error, size_t bytesTransferred) +{ + if (error || !bytesTransferred) + { + socket.close(); + + return; + } + + ReadNextRequest(); +} + + +TestServer::TestServer(uint16_t port) : + acceptor(service, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port)) +{ + // No-op. +} + +TestServer::~TestServer() +{ + Stop(); +} + +void TestServer::Start() +{ + if (!serverThread) + { + StartAccept(); + serverThread.reset(new boost::thread(boost::bind(&boost::asio::io_service::run, &service))); + } +} + +void TestServer::Stop() +{ + if (serverThread) + { + service.stop(); + serverThread->join(); + serverThread.reset(); + } +} + +void TestServer::StartAccept() +{ + using namespace boost::asio; + + boost::shared_ptr newSession; + newSession.reset(new TestServerSession(service, responses)); + + acceptor.async_accept(newSession->GetSocket(), + boost::bind(&TestServer::HandleAccept, this, newSession, placeholders::error)); +} + +void TestServer::HandleAccept(boost::shared_ptr session, const boost::system::error_code& error) +{ + if (!error) + { + session->Start(); + + sessions.push_back(session); + } + + StartAccept(); +} + +} // namespace ignite diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp index 38f1bf3e4cc34..a5beb0ca224f8 100644 --- a/modules/platforms/cpp/odbc/src/connection.cpp +++ b/modules/platforms/cpp/odbc/src/connection.cpp @@ -210,7 +210,9 @@ namespace ignite { AddStatusRecord(SqlState::S08003_NOT_CONNECTED, "Connection is not open."); - return SqlResult::AI_ERROR; + // It is important to return SUCCESS_WITH_INFO and not ERROR here, as if we return an error, Windows + // Driver Manager may decide that connection is not valid anymore which results in memory leak. + return SqlResult::AI_SUCCESS_WITH_INFO; } Close(); @@ -680,7 +682,7 @@ namespace ignite if (!rsp.GetError().empty()) constructor << "Additional info: " << rsp.GetError() << " "; - constructor << "Current version of the protocol, used by the server node is " + constructor << "Current version of the protocol, used by the server node is " << rsp.GetCurrentVer().ToString() << ", " << "driver protocol version introduced in version " << protocolVersion.ToString() << "."; diff --git a/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h b/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h index ce0802cf56286..64ad200d3de88 100644 --- a/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h +++ b/modules/platforms/cpp/thin-client/include/ignite/impl/thin/transactions/transactions_proxy.h @@ -65,7 +65,10 @@ namespace ignite /** * Destructor. */ - ~TransactionProxy() {}; + ~TransactionProxy() + { + // No-op. + }; /** * Commit the transaction. diff --git a/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h b/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h index dedf0bc60f6fe..b6f9aa13a00d6 100644 --- a/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h +++ b/modules/platforms/cpp/thin-client/src/impl/transactions/transaction_impl.h @@ -117,12 +117,7 @@ namespace ignite /** * Sets close flag to tx. */ - void Closed(); - - /** - * @return Current transaction. - */ - static SP_TransactionImpl GetCurrent(); + void SetClosed(); /** * Starts transaction. @@ -143,10 +138,10 @@ namespace ignite ignite::common::concurrent::SharedPointer > label); protected: /** Checks current thread state. */ - static void txThreadCheck(const TransactionImpl& tx); + void ThreadCheck(); /** Completes tc and clear state from storage. */ - static void txThreadEnd(TransactionImpl& tx); + void ThreadEnd(); private: /** Transactions implementation. */ @@ -155,9 +150,6 @@ namespace ignite /** Current transaction Id. */ int32_t txId; - /** Thread local instance of the transaction. */ - static ignite::common::concurrent::ThreadLocalInstance threadTx; - /** Concurrency. */ int concurrency; diff --git a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp index 1cd52e33af572..d78593276fe99 100644 --- a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp +++ b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.cpp @@ -32,11 +32,10 @@ namespace ignite { namespace transactions { - ThreadLocalInstance TransactionImpl::threadTx; - TransactionsImpl::TransactionsImpl(const SP_DataRouter& router) : router(router) { + // No-op. } template @@ -68,7 +67,7 @@ namespace ignite int32_t txSize, SharedPointer > label) { - SP_TransactionImpl tx = threadTx.Get(); + SP_TransactionImpl tx = txs.GetCurrent(); TransactionImpl* ptr = tx.Get(); @@ -87,42 +86,40 @@ namespace ignite tx = SP_TransactionImpl(new TransactionImpl(txs, curTxId, concurrency, isolation, timeout, txSize)); - threadTx.Set(tx); + txs.SetCurrent(tx); return tx; } - SP_TransactionImpl TransactionImpl::GetCurrent() + bool TransactionImpl::IsClosed() const + { + return closed; + } + + SP_TransactionImpl TransactionsImpl::GetCurrent() { SP_TransactionImpl tx = threadTx.Get(); TransactionImpl* ptr = tx.Get(); - if (ptr) + if (ptr && ptr->IsClosed()) { - if (ptr->IsClosed()) - { - tx = SP_TransactionImpl(); + threadTx.Remove(); - threadTx.Remove(); - } - } - else - { tx = SP_TransactionImpl(); } return tx; } - bool TransactionImpl::IsClosed() const + void TransactionsImpl::SetCurrent(const SP_TransactionImpl& impl) { - return closed; + threadTx.Set(impl); } - SP_TransactionImpl TransactionsImpl::GetCurrent() + void TransactionsImpl::ResetCurrent() { - return TransactionImpl::GetCurrent(); + threadTx.Remove(); } int32_t TransactionsImpl::TxCommit(int32_t txId) @@ -154,25 +151,25 @@ namespace ignite void TransactionImpl::Commit() { - txThreadCheck(*this); + ThreadCheck(); txs.TxCommit(txId); - txThreadEnd(*this); + ThreadEnd(); } void TransactionImpl::Rollback() { - txThreadCheck(*this); + ThreadCheck(); txs.TxRollback(txId); - txThreadEnd(*this); + ThreadEnd(); } void TransactionImpl::Close() { - txThreadCheck(*this); + ThreadCheck(); if (IsClosed()) { @@ -181,31 +178,31 @@ namespace ignite txs.TxClose(txId); - txThreadEnd(*this); + ThreadEnd(); } - void TransactionImpl::Closed() + void TransactionImpl::SetClosed() { closed = true; } - void TransactionImpl::txThreadEnd(TransactionImpl& tx) + void TransactionImpl::ThreadEnd() { - tx.Closed(); + this->SetClosed(); - threadTx.Set(0); + txs.ResetCurrent(); } - void TransactionImpl::txThreadCheck(const TransactionImpl& inTx) + void TransactionImpl::ThreadCheck() { - SP_TransactionImpl tx = threadTx.Get(); + SP_TransactionImpl tx = txs.GetCurrent(); TransactionImpl* ptr = tx.Get(); if (!ptr) throw IgniteError(IgniteError::IGNITE_ERR_TX_THIS_THREAD, TX_ALREADY_CLOSED); - if (ptr->TxId() != inTx.TxId()) + if (ptr->TxId() != this->TxId()) throw IgniteError(IgniteError::IGNITE_ERR_TX_THIS_THREAD, TX_DIFFERENT_THREAD); } } diff --git a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h index e8d3df9559bfb..278545d3f4ed9 100644 --- a/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h +++ b/modules/platforms/cpp/thin-client/src/impl/transactions/transactions_impl.h @@ -101,11 +101,23 @@ namespace ignite * Get active transaction for the current thread. * * @return Active transaction implementation for current thread - * or null pointer if there is no active transaction for - * the thread. + * or null pointer if there is no active transaction for the thread. */ SP_TransactionImpl GetCurrent(); + /** + * Set active transaction for the current thread. + * + * @param impl Active transaction implementation for current thread + * or null pointer if there is no active transaction for the thread. + */ + void SetCurrent(const SP_TransactionImpl& impl); + + /** + * Reset active transaction for the current thread. + */ + void ResetCurrent(); + /** * Synchronously send message and receive response. * @@ -115,10 +127,14 @@ namespace ignite */ template void SendTxMessage(const ReqT& req, RspT& rsp); + private: /** Data router. */ SP_DataRouter router; + /** Thread local instance of the transaction. */ + ignite::common::concurrent::ThreadLocalInstance threadTx; + IGNITE_NO_COPY_ASSIGNMENT(TransactionsImpl); }; } From c30d1ed7b6f6abdade9eb12ca765b701a2280c44 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Thu, 12 Nov 2020 11:20:12 +0300 Subject: [PATCH 040/110] IGNITE-13608 .NET: Add Partitions and UpdateBatchSize to SqlFieldsQuery * Add `Partitions` and `UpdateBatchSize` to `SqlFieldsQuery` and `QueryOptions` (LINQ) for thick and thin APIs (new feature flag added to thin client protocol) * Add `updateBatchSize` validation on the server side (invalid values were ignored previously) * Propagate `updateBatchSize` and `partitions` in Java thin client --- .../ignite/cache/query/SqlFieldsQuery.java | 2 + .../internal/client/thin/ClientUtils.java | 11 ++++ .../client/thin/ProtocolBitmaskFeature.java | 5 +- .../platform/cache/PlatformCache.java | 6 +- .../platform/client/ClientBitmaskFeature.java | 7 ++- .../ClientCacheSqlFieldsQueryRequest.java | 29 ++++++++++ .../ignite/client/FunctionalQueryTest.java | 27 +++++++++ .../ignite/cache/query/query_sql_fields.h | 3 + .../Cache/Query/CacheQueriesTest.cs | 56 ++++++++++++++----- .../Query/Linq/CacheLinqTest.Introspection.cs | 32 +++++++---- .../Client/Cache/SqlQueryTest.cs | 34 ++++++++++- .../Cache/Query/SqlFieldsQuery.cs | 39 ++++++++++--- .../Impl/Binary/BinaryHashCodeUtils.cs | 1 + .../Impl/Client/Cache/CacheClient.cs | 18 +++++- .../Impl/Client/ClientBitmaskFeature.cs | 8 ++- .../Apache.Ignite.Linq.csproj | 2 +- .../Impl/CacheFieldsQueryExecutor.cs | 8 ++- .../dotnet/Apache.Ignite.Linq/QueryOptions.cs | 24 +++++++- 18 files changed, 266 insertions(+), 46 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java index 6d6e16733c8de..882fd1f7dea05 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java @@ -409,6 +409,8 @@ public int getUpdateBatchSize() { * @return {@code this} for chaining. */ public SqlFieldsQuery setUpdateBatchSize(int updateBatchSize) { + A.ensure(updateBatchSize >= 1, "updateBatchSize cannot be lower than 1"); + this.updateBatchSize = updateBatchSize; return this; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java index 1e62a5fa83ce5..ad2b96dc554b2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java @@ -524,6 +524,17 @@ void write(SqlFieldsQuery qry, BinaryOutputStream out) { out.writeBoolean(qry.isLazy()); out.writeLong(qry.getTimeout()); out.writeBoolean(true); // include column names + + if (qry.getPartitions() != null) { + out.writeInt(qry.getPartitions().length); + + for (int part : qry.getPartitions()) + out.writeInt(part); + } + else + out.writeInt(-1); + + out.writeInt(qry.getUpdateBatchSize()); } /** Write Ignite binary object to output stream. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java index 3577a5cf31da0..0e90c1be97b55 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ProtocolBitmaskFeature.java @@ -43,7 +43,10 @@ public enum ProtocolBitmaskFeature { SERVICE_INVOKE(5), /** Feature for use default query timeout if the qry timeout isn't set explicitly. */ - DEFAULT_QRY_TIMEOUT(6); + DEFAULT_QRY_TIMEOUT(6), + + /** Additional SqlFieldsQuery properties: partitions, updateBatchSize */ + QRY_PARTITIONS_BATCH_SIZE(7); /** */ private static final EnumSet ALL_FEATURES_AS_ENUM_SET = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java index 2ae500cc57e55..d5e53e1bb4801 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java @@ -1456,6 +1456,8 @@ private Query readFieldsQuery(BinaryRawReaderEx reader) { boolean replicated = reader.readBoolean(); boolean collocated = reader.readBoolean(); String schema = reader.readString(); + int[] partitions = reader.readIntArray(); + int updateBatchSize = reader.readInt(); SqlFieldsQuery qry = QueryUtils.withQueryTimeout(new SqlFieldsQuery(sql), timeout, TimeUnit.MILLISECONDS) .setPageSize(pageSize) @@ -1466,7 +1468,9 @@ private Query readFieldsQuery(BinaryRawReaderEx reader) { .setLazy(lazy) .setReplicatedOnly(replicated) .setCollocated(collocated) - .setSchema(schema); + .setSchema(schema) + .setPartitions(partitions) + .setUpdateBatchSize(updateBatchSize); return qry; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java index f53c0a9b452e5..91a4fda44b961 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientBitmaskFeature.java @@ -39,11 +39,14 @@ public enum ClientBitmaskFeature implements ThinProtocolFeature { /** Cluster groups. */ CLUSTER_GROUPS(4), - /** Service invocation. */ + /** Service invocation. This flag is not necessary and exists for legacy reasons. */ SERVICE_INVOKE(5), /** Feature for use default query timeout if the qry timeout isn't set explicitly. */ - DEFAULT_QRY_TIMEOUT(6); + DEFAULT_QRY_TIMEOUT(6), + + /** Additional SqlFieldsQuery properties: partitions, updateBatchSize */ + QRY_PARTITIONS_BATCH_SIZE(7); /** */ private static final EnumSet ALL_FEATURES_AS_ENUM_SET = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java index 81391c53b14ff..505499e015f1c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheSqlFieldsQueryRequest.java @@ -48,6 +48,12 @@ public class ClientCacheSqlFieldsQueryRequest extends ClientCacheDataRequest imp /** Include field names flag. */ private final boolean includeFieldNames; + /** Partitions. */ + private final int[] partitions; + + /** Update batch size. */ + private final Integer updateBatchSize; + /** * Ctor. * @@ -95,10 +101,33 @@ public ClientCacheSqlFieldsQueryRequest(BinaryRawReaderEx reader, QueryUtils.withQueryTimeout(qry, timeout, TimeUnit.MILLISECONDS); this.qry = qry; + + if (protocolCtx.isFeatureSupported(ClientBitmaskFeature.QRY_PARTITIONS_BATCH_SIZE)) { + // Set qry values in process method so that validation errors are reported to the client. + int partCnt = reader.readInt(); + + if (partCnt >= 0) { + partitions = new int[partCnt]; + + for (int i = 0; i < partCnt; i++) + partitions[i] = reader.readInt(); + } else + partitions = null; + + updateBatchSize = reader.readInt(); + } else { + partitions = null; + updateBatchSize = null; + } } /** {@inheritDoc} */ @Override public ClientResponse process(ClientConnectionContext ctx) { + qry.setPartitions(partitions); + + if (updateBatchSize != null) + qry.setUpdateBatchSize(updateBatchSize); + ctx.incrementCursors(); try { diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java index f1b4e2ead2176..8614086f695f8 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.client; import java.lang.invoke.SerializedLambda; +import java.lang.reflect.Field; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -270,6 +271,32 @@ public void testMixedQueryAndCacheApiOperations() throws Exception { } } + /** Tests {@link SqlFieldsQuery} parameter validation. */ + @Test + public void testSqlParameterValidation() throws Exception { + try (Ignite ignored = Ignition.start(Config.getServerConfiguration()); + IgniteClient client = Ignition.startClient(new ClientConfiguration().setAddresses(Config.SERVER)) + ) { + // Set fields with reflection to bypass client-side validation and verify server-side check. + SqlFieldsQuery qry = new SqlFieldsQuery("SELECT * FROM Person"); + + Field updateBatchSize = SqlFieldsQuery.class.getDeclaredField("updateBatchSize"); + updateBatchSize.setAccessible(true); + updateBatchSize.setInt(qry, -1); + + GridTestUtils.assertThrowsAnyCause(null, () -> client.query(qry).getAll(), + ClientException.class, "updateBatchSize cannot be lower than 1"); + + Field parts = SqlFieldsQuery.class.getDeclaredField("parts"); + parts.setAccessible(true); + parts.set(qry, new int[] {-1}); + qry.setUpdateBatchSize(2); + + GridTestUtils.assertThrowsAnyCause(null, () -> client.query(qry).getAll(), + ClientException.class, "Illegal partition"); + } + } + /** */ private static ClientConfiguration getClientConfiguration() { return new ClientConfiguration().setAddresses(Config.SERVER) diff --git a/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h b/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h index 6ddd27c0b01ac..9c051a3dc8908 100644 --- a/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h +++ b/modules/platforms/cpp/core/include/ignite/cache/query/query_sql_fields.h @@ -383,6 +383,9 @@ namespace ignite writer.WriteNull(); else writer.WriteString(schema); + + writer.WriteInt32Array(NULL, 0); // Partitions + writer.WriteInt32(1); // UpdateBatchSize } private: diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs index 54180350b6cee..2d1b157b4af5d 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs @@ -70,7 +70,7 @@ public void StartGrids() }); } } - + /// /// Gets the name mapper. /// @@ -89,7 +89,7 @@ public void StopGrids() } /// - /// + /// /// [SetUp] public void BeforeTest() @@ -98,7 +98,7 @@ public void BeforeTest() } /// - /// + /// /// [TearDown] public void AfterTest() @@ -127,7 +127,7 @@ private static IIgnite GetIgnite() } /// - /// + /// /// /// private static ICache Cache() @@ -348,7 +348,7 @@ private void CheckEnumeratorQuery(SqlQuery qry) /// Check SQL query. /// [Test] - public void TestSqlQuery([Values(true, false)] bool loc, [Values(true, false)] bool keepBinary, + public void TestSqlQuery([Values(true, false)] bool loc, [Values(true, false)] bool keepBinary, [Values(true, false)] bool distrJoin) { var cache = Cache(); @@ -377,7 +377,7 @@ public void TestSqlQuery([Values(true, false)] bool loc, [Values(true, false)] b /// Check SQL fields query. /// [Test] - public void TestSqlFieldsQuery([Values(true, false)] bool loc, [Values(true, false)] bool distrJoin, + public void TestSqlFieldsQuery([Values(true, false)] bool loc, [Values(true, false)] bool distrJoin, [Values(true, false)] bool enforceJoinOrder, [Values(true, false)] bool lazy) { int cnt = MaxItemCnt; @@ -609,7 +609,7 @@ private static void CheckScanQuery(bool loc, bool keepBinary) // Exception exp = PopulateCache(cache, loc, cnt, x => x < 50); qry = new ScanQuery(new ScanQueryFilter {ThrowErr = true}); - + var ex = Assert.Throws(() => ValidateQueryResults(cache, qry, exp, keepBinary)); Assert.AreEqual(ScanQueryFilter.ErrMessage, ex.Message); } @@ -658,7 +658,7 @@ private void CheckScanQueryPartitions(bool loc, bool keepBinary) ValidateQueryResults(cache, qry, exp0, keepBinary); } - + } /// @@ -791,7 +791,7 @@ public void TestCustomKeyValueFieldNames() cache[1] = new QueryPerson("John", 33); row = cache.Query(new SqlFieldsQuery("select * from QueryPerson")).GetAll()[0]; - + Assert.AreEqual(3, row.Count); Assert.AreEqual(33, row[0]); Assert.AreEqual(1, row[1]); @@ -861,7 +861,7 @@ public void TestFieldNames() var names = cur.FieldNames; Assert.AreEqual(new[] {"AGE", "NAME" }, names); - + cur.Dispose(); Assert.AreSame(names, cur.FieldNames); @@ -878,7 +878,7 @@ public void TestFieldNames() qry.Sql = "SELECT 1, AGE FROM QueryPerson"; cur = cache.Query(qry); cur.Dispose(); - + Assert.AreEqual(new[] { "1", "AGE" }, cur.FieldNames); } @@ -933,6 +933,36 @@ public void TestFieldsMetadata() ); } + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestPartitionsValidation() + { + var cache = Cache(); + var qry = new SqlFieldsQuery("SELECT * FROM QueryPerson") { Partitions = new int[0] }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Partitions must not be empty.", ex.Message); + + qry.Partitions = new[] {-1, -2}; + ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Illegal partition", ex.Message); + } + + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestUpdateBatchSizeValidation() + { + var cache = Cache(); + var qry = new SqlFieldsQuery("SELECT * FROM QueryPerson") { UpdateBatchSize = -1 }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("updateBatchSize cannot be lower than 1", ex.Message); + } + /// /// Validates fields metadata collection /// @@ -1045,7 +1075,7 @@ private static void ValidateQueryResults(ICache cache, QueryBa /// /// Asserts that all expected entries have been received. /// - private static void AssertMissingExpectedKeys(ICollection exp, ICache cache, + private static void AssertMissingExpectedKeys(ICollection exp, ICache cache, IList> all) { if (exp.Count == 0) @@ -1058,7 +1088,7 @@ private static void AssertMissingExpectedKeys(ICollection exp, ICache x.Key > 10).ToCacheQueryable(); Assert.AreEqual(cache.Name, query.CacheName); @@ -76,7 +78,6 @@ public void TestIntrospection() Assert.IsTrue(fq.Local); Assert.AreEqual(PersonCount - 11, cache.Query(fq).GetAll().Count); Assert.AreEqual(999, fq.PageSize); - Assert.IsFalse(fq.EnableDistributedJoins); Assert.IsTrue(fq.EnforceJoinOrder); #pragma warning disable 618 Assert.IsTrue(fq.ReplicatedOnly); @@ -84,22 +85,30 @@ public void TestIntrospection() Assert.IsTrue(fq.Colocated); Assert.AreEqual(TimeSpan.FromSeconds(2.5), fq.Timeout); Assert.IsTrue(fq.Lazy); + Assert.IsTrue(fq.EnableDistributedJoins); + Assert.AreEqual(12, fq.UpdateBatchSize); + Assert.IsNull(fq.Partitions); var str = query.ToString(); Assert.AreEqual(GetSqlEscapeAll() ? "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.\"Person\" as _T0 where " + "(_T0.\"_KEY\" > ?), Arguments=[10], " + - "Local=True, PageSize=999, EnableDistributedJoins=False, EnforceJoinOrder=True, " + - "Timeout=00:00:02.5000000, ReplicatedOnly=True, Colocated=True, Schema=, Lazy=True]]" + "Local=True, PageSize=999, EnableDistributedJoins=True, EnforceJoinOrder=True, " + + "Timeout=00:00:02.5000000, Partitions=[], UpdateBatchSize=12, " + + "Colocated=True, Schema=, Lazy=True]]" : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.Person as _T0 where " + "(_T0._KEY > ?), Arguments=[10], " + - "Local=True, PageSize=999, EnableDistributedJoins=False, EnforceJoinOrder=True, " + - "Timeout=00:00:02.5000000, ReplicatedOnly=True, Colocated=True, Schema=, Lazy=True]]", str); + "Local=True, PageSize=999, EnableDistributedJoins=True, EnforceJoinOrder=True, " + + "Timeout=00:00:02.5000000, Partitions=[], UpdateBatchSize=12, " + + "Colocated=True, Schema=, Lazy=True]]", str); // Check fields query - var fieldsQuery = cache.AsCacheQueryable().Select(x => x.Value.Name).ToCacheQueryable(); + var fieldsQuery = cache + .AsCacheQueryable(new QueryOptions {Partitions = new[] {1, 2}}) + .Select(x => x.Value.Name) + .ToCacheQueryable(); Assert.AreEqual(cache.Name, fieldsQuery.CacheName); #pragma warning disable 618 // Type or member is obsolete @@ -117,17 +126,18 @@ public void TestIntrospection() Assert.IsFalse(fq.EnableDistributedJoins); Assert.IsFalse(fq.EnforceJoinOrder); Assert.IsFalse(fq.Lazy); + Assert.AreEqual(new[] {1, 2}, fq.Partitions); str = fieldsQuery.ToString(); Assert.AreEqual(GetSqlEscapeAll() ? "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0.\"Name\" from PERSON_ORG_SCHEMA.\"Person\" as _T0, Arguments=[], Local=False, " + "PageSize=1024, EnableDistributedJoins=False, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]" + "Timeout=00:00:00, Partitions=[1, 2], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]" : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0.NAME from PERSON_ORG_SCHEMA.Person as _T0, Arguments=[], Local=False, " + "PageSize=1024, EnableDistributedJoins=False, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]", str); + "Timeout=00:00:00, Partitions=[1, 2], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]", str); // Check distributed joins flag propagation var distrQuery = cache.AsCacheQueryable(new QueryOptions { EnableDistributedJoins = true }) @@ -144,13 +154,13 @@ public void TestIntrospection() "(((_T0.\"_KEY\" > ?) and (_T0.\"age1\" > ?)) " + "and (_T0.\"Name\" like \'%\' || ? || \'%\') ), Arguments=[10, 20, x], Local=False, " + "PageSize=1024, EnableDistributedJoins=True, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]" + "Timeout=00:00:00, Partitions=[], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]" : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " + "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.Person as _T0 where " + "(((_T0._KEY > ?) and (_T0.AGE1 > ?)) " + "and (_T0.NAME like \'%\' || ? || \'%\') ), Arguments=[10, 20, x], Local=False, " + "PageSize=1024, EnableDistributedJoins=True, EnforceJoinOrder=False, " + - "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]", str); + "Timeout=00:00:00, Partitions=[], UpdateBatchSize=1, Colocated=False, Schema=, Lazy=False]]", str); } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs index 9584af82590d7..7bbf522fed75e 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs @@ -80,7 +80,7 @@ public void TestSqlQueryDistributedJoins() var qry = new SqlQuery(typeof(Person), string.Format("from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", CacheName, CacheName2)); - + Assert.Greater(Count, cache.Query(qry).Count()); // Distributed join fixes the problem. @@ -136,7 +136,7 @@ public void TestFieldsQueryDistributedJoins() // Non-distributed join returns incomplete results. var qry = new SqlFieldsQuery(string.Format( - "select p2.Name from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", + "select p2.Name from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", CacheName, CacheName2)); Assert.Greater(Count, cache.Query(qry).Count()); @@ -228,5 +228,35 @@ public void TestDml() Assert.AreEqual(1, res[0][0]); Assert.AreEqual("baz", cache[-10].Name); } + + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestPartitionsValidation() + { + var cache = GetClientCache(); + var qry = new SqlFieldsQuery("SELECT * FROM Person") { Partitions = new int[0] }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Partitions must not be empty.", ex.Message); + + qry.Partitions = new[] {-1, -2}; + ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("Illegal partition", ex.Message); + } + + /// + /// Tests argument propagation and validation. + /// + [Test] + public void TestUpdateBatchSizeValidation() + { + var cache = GetClientCache(); + var qry = new SqlFieldsQuery("SELECT * FROM Person") { UpdateBatchSize = -1 }; + + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + StringAssert.EndsWith("updateBatchSize cannot be lower than 1", ex.Message); + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs index 07f7b1a9a42ad..40c1622e22c66 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs @@ -32,6 +32,9 @@ public class SqlFieldsQuery : IQueryBaseInternal /// Default page size. public const int DefaultPageSize = 1024; + /// Default value for . + public const int DefaultUpdateBatchSize = 1; + /// /// Constructor. /// @@ -55,6 +58,7 @@ public SqlFieldsQuery(string sql, bool loc, params object[] args) Arguments = args; PageSize = DefaultPageSize; + UpdateBatchSize = DefaultUpdateBatchSize; } /// @@ -152,6 +156,21 @@ public SqlFieldsQuery(string sql, bool loc, params object[] args) /// public bool Lazy { get; set; } + /// + /// Gets or sets partitions for the query. + /// + /// The query will be executed only on nodes which are primary for specified partitions. + /// + [SuppressMessage("Microsoft.Performance", "CA1819:PropertiesShouldNotReturnArrays")] + public int[] Partitions { get; set; } + + /// + /// Gets or sets batch size for update queries. + /// + /// Default is 1 (. + /// + public int UpdateBatchSize { get; set; } + /// /// Returns a that represents this instance. /// @@ -160,15 +179,19 @@ public SqlFieldsQuery(string sql, bool loc, params object[] args) /// public override string ToString() { - var args = string.Join(", ", Arguments.Select(x => x == null ? "null" : x.ToString())); + var args = Arguments == null + ? "" + : string.Join(", ", Arguments.Select(x => x == null ? "null" : x.ToString())); + + var parts = Partitions == null + ? "" + : string.Join(", ", Partitions.Select(x => x.ToString())); return string.Format("SqlFieldsQuery [Sql={0}, Arguments=[{1}], Local={2}, PageSize={3}, " + - "EnableDistributedJoins={4}, EnforceJoinOrder={5}, Timeout={6}, ReplicatedOnly={7}" + - ", Colocated={8}, Schema={9}, Lazy={10}]", Sql, args, Local, -#pragma warning disable 618 - PageSize, EnableDistributedJoins, EnforceJoinOrder, Timeout, ReplicatedOnly, -#pragma warning restore 618 - Colocated, Schema, Lazy); + "EnableDistributedJoins={4}, EnforceJoinOrder={5}, Timeout={6}, Partitions=[{7}], " + + "UpdateBatchSize={8}, Colocated={9}, Schema={10}, Lazy={11}]", Sql, args, Local, + PageSize, EnableDistributedJoins, EnforceJoinOrder, Timeout, parts, + UpdateBatchSize, Colocated, Schema, Lazy); } /** */ @@ -197,6 +220,8 @@ internal void Write(BinaryWriter writer) #pragma warning restore 618 writer.WriteBoolean(Colocated); writer.WriteString(Schema); // Schema + writer.WriteIntArray(Partitions); + writer.WriteInt(UpdateBatchSize); } /** */ diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs index de09fdd6236c3..6c39425191f10 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryHashCodeUtils.cs @@ -114,6 +114,7 @@ public static unsafe int GetHashCode(T val, Marshaller marsh, IDictionary(T val, Marshaller marsh, IDictionary affinityKeyFieldIds) { using (var stream = new BinaryHeapStream(128)) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs index 4486f8209031a..bfb435ede7dfb 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs @@ -331,7 +331,7 @@ public CacheResult GetAndReplace(TK key, TV val) IgniteArgumentCheck.NotNull(val, "val"); _ignite.Transactions.StartTxIfNeeded(); - + return DoOutInOpAffinity(ClientOp.CacheGetAndReplace, key, val, UnmarshalCacheResult); } @@ -951,6 +951,22 @@ private static void WriteSqlFieldsQuery(IBinaryRawWriter writer, SqlFieldsQuery writer.WriteBoolean(qry.Lazy); writer.WriteTimeSpanAsLong(qry.Timeout); writer.WriteBoolean(includeColumns); + + if (qry.Partitions != null) + { + writer.WriteInt(qry.Partitions.Length); + + foreach (var part in qry.Partitions) + { + writer.WriteInt(part); + } + } + else + { + writer.WriteInt(-1); + } + + writer.WriteInt(qry.UpdateBatchSize); } /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs index 56303d59c9a27..5b47c20f26a17 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientBitmaskFeature.cs @@ -19,6 +19,7 @@ namespace Apache.Ignite.Core.Impl.Client { /// /// Client feature ids. Values represent the index in the bit array. + /// Unsupported flags must be commented out. /// internal enum ClientBitmaskFeature { @@ -26,6 +27,9 @@ internal enum ClientBitmaskFeature ExecuteTaskByName = 1, // ClusterStates = 2, ClusterGroupGetNodesEndpoints = 3, - ClusterGroups = 4 + ClusterGroups = 4, + ServiceInvoke = 5, // The flag is not necessary and exists for legacy reasons + // DefaultQueryTimeout = 6, // IGNITE-13692 + QueryPartitionsBatchSize = 7 } -} \ No newline at end of file +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj index 60a2f24ea7cd0..d71d1f0dbd647 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj @@ -15,7 +15,7 @@ true bin\Debug\ - DEBUG;TRACE + DEBUG;TRACE;CODE_ANALYSIS full AnyCPU prompt diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs index 43d8e64c8454a..780dd4e6dd4b3 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs @@ -38,7 +38,7 @@ internal class CacheFieldsQueryExecutor : IQueryExecutor { /** */ private readonly ICacheInternal _cache; - + /** */ private readonly QueryOptions _options; @@ -210,7 +210,9 @@ internal SqlFieldsQuery GetFieldsQuery(string text, object[] args) Colocated = _options.Colocated, Local = _options.Local, Arguments = args, - Lazy = _options.Lazy + Lazy = _options.Lazy, + UpdateBatchSize = _options.UpdateBatchSize, + Partitions = _options.Partitions }; } @@ -297,4 +299,4 @@ private static Func GetCompiledCtor(ConstructorInfo }); } } -} \ No newline at end of file +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs index 994baf2648340..3249835df53b7 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/QueryOptions.cs @@ -19,6 +19,7 @@ namespace Apache.Ignite.Linq { using System; using System.ComponentModel; + using System.Diagnostics.CodeAnalysis; using Apache.Ignite.Core.Cache.Configuration; using Apache.Ignite.Core.Cache.Query; @@ -30,16 +31,20 @@ public class QueryOptions /// Default page size. public const int DefaultPageSize = SqlFieldsQuery.DefaultPageSize; + /// Default value for . + public const int DefaultUpdateBatchSize = SqlFieldsQuery.DefaultUpdateBatchSize; + /// /// Initializes a new instance of the class. /// public QueryOptions() { PageSize = DefaultPageSize; + UpdateBatchSize = DefaultUpdateBatchSize; } /// - /// Local flag. When set query will be executed only on local node, so only local + /// Local flag. When set query will be executed only on local node, so only local /// entries will be returned as query result. /// /// Defaults to false. @@ -53,7 +58,7 @@ public QueryOptions() public int PageSize { get; set; } /// - /// Gets or sets the name of the table. + /// Gets or sets the name of the table. /// /// Table name is equal to short class name of a cache value. /// When a cache has only one type of values, or only one defined, @@ -124,5 +129,20 @@ public QueryOptions() /// consumption at the cost of moderate performance hit. /// public bool Lazy { get; set; } + + /// + /// Gets or sets partitions for the query. + /// + /// The query will be executed only on nodes which are primary for specified partitions. + /// + [SuppressMessage("Microsoft.Performance", "CA1819:PropertiesShouldNotReturnArrays")] + public int[] Partitions { get; set; } + + /// + /// Gets or sets batch size for update queries. + /// + /// Default is 1 (. + /// + public int UpdateBatchSize { get; set; } } } From 431f7e76acf4b4a13f18b6f59167c68fc01e31a6 Mon Sep 17 00:00:00 2001 From: ibessonov Date: Thu, 12 Nov 2020 16:42:06 +0300 Subject: [PATCH 041/110] IGNITE-13684 Page memory refactoring and improvements needed for defragmentation (IEP-47) - Fixes #8435. Signed-off-by: Sergey Chugunov --- .../internal/pagemem/PageIdAllocator.java | 5 + .../pagemem/store/IgnitePageStoreManager.java | 29 +--- .../pagemem/store/PageStoreCollection.java | 41 +++++ .../processors/cache/CacheGroupContext.java | 2 +- .../cache/IgniteCacheOffheapManager.java | 7 + .../cache/IgniteCacheOffheapManagerImpl.java | 145 ++++++++++++----- .../cache/PartitionUpdateCounterMvccImpl.java | 15 ++ .../PartitionUpdateCounterTrackingImpl.java | 8 +- .../cache/mvcc/MvccProcessorImpl.java | 3 +- .../processors/cache/mvcc/txlog/TxLog.java | 2 +- .../cache/persistence/DataStructure.java | 22 ++- .../GridCacheDatabaseSharedManager.java | 32 ++-- .../persistence/GridCacheOffheapManager.java | 129 +++++++++++----- .../IgniteCacheDatabaseSharedManager.java | 28 +++- .../checkpoint/CheckpointManager.java | 2 +- .../LightweightCheckpointManager.java | 2 +- .../cache/persistence/file/FileIOFactory.java | 1 + .../cache/persistence/file/FilePageStore.java | 16 +- .../file/FilePageStoreFactory.java | 6 +- .../file/FilePageStoreManager.java | 144 ++++------------- .../persistence/file/FilePageStoreV2.java | 4 +- .../file/FileVersionCheckingFactory.java | 6 +- .../cache/persistence/freelist/PagesList.java | 3 +- .../persistence/pagemem/PageMemoryEx.java | 17 +- .../persistence/pagemem/PageMemoryImpl.java | 45 +++--- .../pagemem/PageReadWriteManager.java | 58 +++++++ .../pagemem/PageReadWriteManagerImpl.java | 146 ++++++++++++++++++ .../snapshot/IgniteSnapshotManager.java | 3 +- .../cache/persistence/tree/BPlusTree.java | 16 +- .../persistence/tree/io/PageIoResolver.java | 29 ++++ .../persistence/tree/util/InsertLast.java | 24 +++ .../persistence/tree/util/PageHandler.java | 24 +-- .../cache/tree/CacheDataRowStore.java | 19 +-- .../cache/WalModeChangeAdvancedSelfTest.java | 1 - ...itePdsRecoveryAfterFileCorruptionTest.java | 2 +- .../IgnitePdsTaskCancelingTest.java | 4 +- ...pointSimulationWithRealCpDisabledTest.java | 2 +- .../pagemem/BPlusTreePageMemoryImplTest.java | 1 + .../BPlusTreeReuseListPageMemoryImplTest.java | 1 + ...itePageMemReplaceDelayedWriteUnitTest.java | 2 +- .../IndexStoragePageMemoryImplTest.java | 1 + .../pagemem/NoOpPageStoreManager.java | 17 +- .../pagemem/PageMemoryImplNoLoadTest.java | 1 + .../pagemem/PageMemoryImplTest.java | 11 +- ...DurableBackgroundCleanupIndexTreeTask.java | 4 +- .../processors/query/h2/IgniteH2Indexing.java | 1 - .../processors/query/h2/database/H2Tree.java | 74 +++++++-- .../query/h2/database/H2TreeIndex.java | 57 +++++-- .../AbstractInlineIndexColumn.java | 2 +- .../ObjectHashInlineIndexColumn.java | 94 ++++++++++- .../database/io/AbstractH2ExtrasInnerIO.java | 5 + .../database/io/AbstractH2ExtrasLeafIO.java | 8 +- .../h2/database/io/AbstractH2InnerIO.java | 5 + .../h2/database/io/AbstractH2LeafIO.java | 7 +- .../query/h2/database/io/H2ExtrasLeafIO.java | 2 +- .../query/h2/database/io/H2LeafIO.java | 2 +- .../h2/database/io/H2MvccExtrasLeafIO.java | 4 +- .../query/h2/database/io/H2MvccLeafIO.java | 2 +- .../query/h2/database/io/H2RowLinkIO.java | 5 + .../processors/query/h2/opt/H2CacheRow.java | 14 ++ .../LongDestroyDurableBackgroundTaskTest.java | 7 +- ...ltipleParallelCacheDeleteDeadlockTest.java | 7 +- 62 files changed, 978 insertions(+), 398 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStoreCollection.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManager.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManagerImpl.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIoResolver.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java index d91d31da32957..73956959d0004 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java @@ -19,6 +19,8 @@ import org.apache.ignite.IgniteCheckedException; +import static org.apache.ignite.internal.pagemem.PageIdUtils.pageId; + /** * Allocates page ID's. */ @@ -41,6 +43,9 @@ public interface PageIdAllocator { /** Special partition reserved for metastore space. */ public static final int METASTORE_PARTITION = 0x1; + /** Cache group meta page id. */ + public static final long META_PAGE_ID = pageId(INDEX_PARTITION, FLAG_IDX, 0); + /** * Allocates a page from the space for the given partition ID and the given flags. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java index 8216b96a707e1..50af2a445a57a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java @@ -19,6 +19,7 @@ import java.nio.ByteBuffer; import java.util.Map; +import java.util.function.LongConsumer; import java.util.function.Predicate; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; @@ -27,13 +28,13 @@ import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.GridCacheSharedManager; import org.apache.ignite.internal.processors.cache.StoredCacheData; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; /** * */ -public interface IgnitePageStoreManager extends GridCacheSharedManager, IgniteChangeGlobalStateSupport { +public interface IgnitePageStoreManager extends GridCacheSharedManager, IgniteChangeGlobalStateSupport, PageReadWriteManager { /** * Invoked before starting checkpoint recover. */ @@ -53,7 +54,7 @@ public interface IgnitePageStoreManager extends GridCacheSharedManager, IgniteCh * @param tracker Allocation tracker. * @throws IgniteCheckedException If failed. */ - void initialize(int cacheId, int partitions, String workingDir, LongAdderMetric tracker) + void initialize(int cacheId, int partitions, String workingDir, LongConsumer tracker) throws IgniteCheckedException; /** @@ -101,16 +102,6 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac */ public void onPartitionDestroyed(int grpId, int partId, int tag) throws IgniteCheckedException; - /** - * Reads a page for the given cache ID. Cache ID may be {@code 0} if the page is a meta page. - * - * @param grpId Cache group ID. - * @param pageId PageID to read. - * @param pageBuf Page buffer to write to. - * @throws IgniteCheckedException If failed to read the page. - */ - public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException; - /** * Checks if partition store exists. * @@ -139,7 +130,7 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac * @param pageBuf Page buffer to write. * @throws IgniteCheckedException If failed to write page. */ - public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException; + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException; /** * Gets page offset within the page store file. @@ -176,7 +167,7 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac * @return Allocated page ID. * @throws IgniteCheckedException If IO exception occurred while allocating a page ID. */ - public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException; + @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException; /** * Gets total number of allocated pages for the given space. @@ -188,14 +179,6 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac */ public int pages(int grpId, int partId) throws IgniteCheckedException; - /** - * Gets meta page ID for specified cache. - * - * @param grpId Cache group ID. - * @return Meta page ID. - */ - public long metaPageId(int grpId); - /** * @return Saved cache configurations. * @throws IgniteCheckedException If failed. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStoreCollection.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStoreCollection.java new file mode 100644 index 0000000000000..8caf0409fab3f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStoreCollection.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.store; + +import java.util.Collection; +import org.apache.ignite.IgniteCheckedException; + +/** + * A collection that contains {@link PageStore} elements. + */ +public interface PageStoreCollection { + /** + * @param grpId Cache group ID. + * @param partId Partition ID. + * @return Page store for the corresponding parameters. + * @throws IgniteCheckedException If cache or partition with the given ID was not created. + */ + public PageStore getStore(int grpId, int partId) throws IgniteCheckedException; + + /** + * @param grpId Cache group ID. + * @return Collection of related page stores. + * @throws IgniteCheckedException If failed. + */ + public Collection getStores(int grpId) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java index 3621d0b4273b2..19944576c6615 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java @@ -200,7 +200,7 @@ public class CacheGroupContext { * @param persistenceEnabled Persistence enabled flag. * @param walEnabled Wal enabled flag. */ - CacheGroupContext( + public CacheGroupContext( GridCacheSharedContext ctx, int grpId, UUID rcvdFrom, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java index d0fd64c4b67a1..ceebc72d190fa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorage; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; +import org.apache.ignite.internal.processors.cache.tree.CacheDataTree; import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult; import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow; @@ -606,6 +607,12 @@ interface OffheapInvokeClosure extends IgniteTree.InvokeClosure { * */ interface CacheDataStore { + + /** + * @return Cache data tree object. + */ + public CacheDataTree tree(); + /** * Initialize data store if it exists. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 048179af505c6..4743d895a5896 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import javax.cache.Cache; import javax.cache.processor.EntryProcessor; import org.apache.ignite.IgniteCheckedException; @@ -43,6 +44,8 @@ import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.metric.IoStatisticsHolder; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccMarkUpdatedRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateNewTxStateHintRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateTxStateHintRecord; @@ -152,10 +155,6 @@ public class IgniteCacheOffheapManagerImpl implements IgniteCacheOffheapManager /** The maximum number of entries that can be preloaded under checkpoint read lock. */ public static final int PRELOAD_SIZE_UNDER_CHECKPOINT_LOCK = 100; - /** */ - private final boolean failNodeOnPartitionInconsistency = Boolean.getBoolean( - IgniteSystemProperties.IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY); - /** Batch size for cache removals during destroy. */ private static final int BATCH_SIZE = 1000; @@ -183,9 +182,6 @@ public class IgniteCacheOffheapManagerImpl implements IgniteCacheOffheapManager /** */ protected final GridSpinBusyLock busyLock = new GridSpinBusyLock(); - /** */ - private int updateValSizeThreshold; - /** */ protected GridStripedLock partStoreLock = new GridStripedLock(Runtime.getRuntime().availableProcessors()); @@ -200,8 +196,6 @@ public class IgniteCacheOffheapManagerImpl implements IgniteCacheOffheapManager this.grp = grp; this.log = ctx.logger(getClass()); - updateValSizeThreshold = ctx.database().pageSize() / 2; - if (grp.affinityNode()) { ctx.database().checkpointReadLock(); @@ -1300,7 +1294,7 @@ protected CacheDataStore createCacheDataStore0(int p) throws IgniteCheckedExcept lsnr ); - return new CacheDataStoreImpl(p, rowStore, dataTree); + return new CacheDataStoreImpl(p, rowStore, dataTree, () -> pendingEntries, grp, busyLock, log); } /** {@inheritDoc} */ @@ -1447,7 +1441,7 @@ private int expireInternal( /** * */ - protected class CacheDataStoreImpl implements CacheDataStore { + public static class CacheDataStoreImpl implements CacheDataStore { /** */ private final int partId; @@ -1457,6 +1451,15 @@ protected class CacheDataStoreImpl implements CacheDataStore { /** */ private final CacheDataTree dataTree; + /** */ + private final Supplier pendingEntries; + + /** */ + private final CacheGroupContext grp; + + /** */ + private final GridSpinBusyLock busyLock; + /** Update counter. */ protected final PartitionUpdateCounter pCntr; @@ -1467,13 +1470,24 @@ protected class CacheDataStoreImpl implements CacheDataStore { private final IntMap cacheSizes = new IntRWHashMap(); /** Mvcc remove handler. */ - private final PageHandler mvccUpdateMarker = new MvccMarkUpdatedHandler(); + private final PageHandler mvccUpdateMarker; /** Mvcc update tx state hint handler. */ - private final PageHandler mvccUpdateTxStateHint = new MvccUpdateTxStateHintHandler(); + private final PageHandler mvccUpdateTxStateHint; /** */ - private final PageHandler mvccApplyChanges = new MvccApplyChangesHandler(); + private final PageHandler mvccApplyChanges; + + /** */ + private final IgniteLogger log; + + /** */ + private final Boolean failNodeOnPartitionInconsistency = Boolean.getBoolean( + IgniteSystemProperties.IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY + ); + + /** */ + private final int updateValSizeThreshold; /** * @param partId Partition number. @@ -1483,18 +1497,37 @@ protected class CacheDataStoreImpl implements CacheDataStore { public CacheDataStoreImpl( int partId, CacheDataRowStore rowStore, - CacheDataTree dataTree + CacheDataTree dataTree, + Supplier pendingEntries, + CacheGroupContext grp, + GridSpinBusyLock busyLock, + IgniteLogger log ) { this.partId = partId; this.rowStore = rowStore; this.dataTree = dataTree; + this.pendingEntries = pendingEntries; + this.grp = grp; + this.busyLock = busyLock; + this.log = log; PartitionUpdateCounter delegate = grp.mvccEnabled() ? new PartitionUpdateCounterMvccImpl(grp) : !grp.persistenceEnabled() || grp.hasAtomicCaches() ? new PartitionUpdateCounterVolatileImpl(grp) : new PartitionUpdateCounterTrackingImpl(grp); - pCntr = ctx.logger(PartitionUpdateCounterDebugWrapper.class).isDebugEnabled() ? + pCntr = grp.shared().logger(PartitionUpdateCounterDebugWrapper.class).isDebugEnabled() ? new PartitionUpdateCounterDebugWrapper(partId, delegate) : delegate; + + updateValSizeThreshold = grp.shared().database().pageSize() / 2; + + mvccUpdateMarker = new MvccMarkUpdatedHandler(grp); + mvccUpdateTxStateHint = new MvccUpdateTxStateHintHandler(grp); + mvccApplyChanges = new MvccApplyChangesHandler(grp); + } + + /** {@inheritDoc} */ + @Override public CacheDataTree tree() { + return dataTree; } /** @@ -1640,7 +1673,7 @@ void decrementSize(int cacheId) { grp.cacheOrGroupName() + ", partId=" + partId + ']', e); if (failNodeOnPartitionInconsistency) - ctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + grp.shared().kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); } } @@ -2985,11 +3018,11 @@ private void afterRowFound(@Nullable CacheDataRow row, KeyCacheObject key) throw while (cur.next()) { if (++rmv == BATCH_SIZE) { - ctx.database().checkpointReadUnlock(); + grp.shared().database().checkpointReadUnlock(); rmv = 0; - ctx.database().checkpointReadLock(); + grp.shared().database().checkpointReadLock(); } CacheDataRow row = cur.get(); @@ -3019,9 +3052,9 @@ private void afterRowFound(@Nullable CacheDataRow row, KeyCacheObject key) throw throw new IgniteCheckedException("Fail destroy store", ex); // Allow checkpointer to progress if a partition contains less than BATCH_SIZE keys. - ctx.database().checkpointReadUnlock(); + grp.shared().database().checkpointReadUnlock(); - ctx.database().checkpointReadLock(); + grp.shared().database().checkpointReadLock(); } /** {@inheritDoc} */ @@ -3053,7 +3086,7 @@ public void restoreState(long size, long updCntr, @Nullable Map c /** {@inheritDoc} */ @Override public PendingEntriesTree pendingTree() { - return pendingEntries; + return pendingEntries.get(); } /** {@inheritDoc} */ @@ -3251,16 +3284,26 @@ public boolean found() { /** * Mvcc remove handler. */ - private final class MvccMarkUpdatedHandler extends PageHandler { + private static final class MvccMarkUpdatedHandler extends PageHandler { + /** */ + private final CacheGroupContext grp; + + private MvccMarkUpdatedHandler(CacheGroupContext grp) { + this.grp = grp; + } + /** {@inheritDoc} */ @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc, MvccUpdateDataRow updateDataRow, int itemId, IoStatisticsHolder statHolder) throws IgniteCheckedException { assert grp.mvccEnabled(); + PageMemory pageMem = grp.dataRegion().pageMemory(); + IgniteWriteAheadLogManager wal = grp.shared().wal(); + DataPageIO iox = (DataPageIO)io; int off = iox.getPayloadOffset(pageAddr, itemId, - grp.dataRegion().pageMemory().realPageSize(grp.groupId()), MVCC_INFO_SIZE); + pageMem.realPageSize(grp.groupId()), MVCC_INFO_SIZE); long newCrd = iox.newMvccCoordinator(pageAddr, off); long newCntr = iox.newMvccCounter(pageAddr, off); @@ -3273,8 +3316,8 @@ private final class MvccMarkUpdatedHandler extends PageHandler { + private static final class MvccUpdateTxStateHintHandler extends PageHandler { + /** */ + private final CacheGroupContext grp; + + /** */ + private MvccUpdateTxStateHintHandler(CacheGroupContext grp) { + this.grp = grp; + } + /** {@inheritDoc} */ @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc, Void ignore, int itemId, IoStatisticsHolder statHolder) throws IgniteCheckedException { DataPageIO iox = (DataPageIO)io; + PageMemory pageMem = grp.dataRegion().pageMemory(); + IgniteWriteAheadLogManager wal = grp.shared().wal(); + int off = iox.getPayloadOffset(pageAddr, itemId, - grp.dataRegion().pageMemory().realPageSize(grp.groupId()), MVCC_INFO_SIZE); + pageMem.realPageSize(grp.groupId()), MVCC_INFO_SIZE); long crd = iox.mvccCoordinator(pageAddr, off); long cntr = iox.mvccCounter(pageAddr, off); @@ -3305,8 +3359,8 @@ private final class MvccUpdateTxStateHintHandler extends PageHandler { + private static final class MvccApplyChangesHandler extends PageHandler { + /** */ + private final CacheGroupContext grp; + + /** */ + private MvccApplyChangesHandler(CacheGroupContext grp) { + this.grp = grp; + } + /** {@inheritDoc} */ @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc, MvccDataRow newRow, int itemId, IoStatisticsHolder statHolder) throws IgniteCheckedException { @@ -3345,8 +3407,11 @@ private final class MvccApplyChangesHandler extends PageHandler(queue); + copy.initCntr = initCntr; + copy.reserveCntr.set(reserveCntr.get()); + + return copy; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java index 7671396ba8dc3..0f9d72c5b84bf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionUpdateCounterTrackingImpl.java @@ -68,16 +68,16 @@ public class PartitionUpdateCounterTrackingImpl implements PartitionUpdateCounte private static final byte VERSION = 1; /** Queue of applied out of order counter updates. */ - private NavigableMap queue = new TreeMap<>(); + protected NavigableMap queue = new TreeMap<>(); /** LWM. */ - private final AtomicLong cntr = new AtomicLong(); + protected final AtomicLong cntr = new AtomicLong(); /** HWM. */ protected final AtomicLong reserveCntr = new AtomicLong(); /** */ - private boolean first = true; + protected boolean first = true; /** */ protected final CacheGroupContext grp; @@ -86,7 +86,7 @@ public class PartitionUpdateCounterTrackingImpl implements PartitionUpdateCounte * Initial counter points to last sequential update after WAL recovery. * @deprecated TODO FIXME https://issues.apache.org/jira/browse/IGNITE-11794 */ - @Deprecated private volatile long initCntr; + @Deprecated protected volatile long initCntr; /** * @param grp Group. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java index 472c6815fdba9..9fbf83c35473a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java @@ -34,7 +34,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -405,7 +404,7 @@ private void txLogPageStoreInit(IgniteCacheDatabaseSharedManager mgr) throws Ign //noinspection ConstantConditions ctx.cache().context().pageStore().initialize(TX_LOG_CACHE_ID, 0, - TX_LOG_CACHE_NAME, mgr.dataRegion(TX_LOG_CACHE_NAME).memoryMetrics().totalAllocatedPages()); + TX_LOG_CACHE_NAME, mgr.dataRegion(TX_LOG_CACHE_NAME).memoryMetrics().totalAllocatedPages()::add); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java index 9b8c73bf26ebf..0e98b8d0d206c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java @@ -115,7 +115,7 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { IgniteWriteAheadLogManager wal = ctx.cache().context().wal(); PageMemoryEx pageMemory = (PageMemoryEx)txLogDataRegion.pageMemory(); - long metaId = pageMemory.metaPageId(TX_LOG_CACHE_ID); + long metaId = PageMemory.META_PAGE_ID; long metaPage = pageMemory.acquirePage(TX_LOG_CACHE_ID, metaId); long treeRoot, reuseListRoot; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java index 300e98ac6237c..68fd48e48da8d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java @@ -29,6 +29,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.RecycleRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.RotatedIdPartRecord; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -66,18 +67,24 @@ public abstract class DataStructure { /** */ protected ReuseList reuseList; + /** */ + protected final PageIoResolver pageIoRslvr; + /** * @param cacheGrpId Cache group ID. * @param grpName Cache group name. * @param pageMem Page memory. * @param wal Write ahead log manager. + * @param lockLsnr Page lock listener. + * @param pageIoRslvr Page IO resolver. */ public DataStructure( int cacheGrpId, String grpName, PageMemory pageMem, IgniteWriteAheadLogManager wal, - PageLockListener lockLsnr + PageLockListener lockLsnr, + PageIoResolver pageIoRslvr ) { assert pageMem != null; @@ -86,6 +93,7 @@ public DataStructure( this.pageMem = pageMem; this.wal = wal; this.lockLsnr = lockLsnr == null ? NOOP_LSNR : lockLsnr; + this.pageIoRslvr = pageIoRslvr; } /** @@ -251,7 +259,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, lockLsnr, h, - null, null, null, null, intArg, lockFailed, statHolder); + null, null, null, null, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -272,7 +280,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, lockLsnr, h, - null, null, null, arg, intArg, lockFailed, statHolder); + null, null, null, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -295,7 +303,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, page, lockLsnr, h, - null, null, null, arg, intArg, lockFailed, statHolder); + null, null, null, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -318,7 +326,7 @@ protected final R write( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.writePage(pageMem, grpId, pageId, lockLsnr, h, - init, wal, null, arg, intArg, lockFailed, statHolder); + init, wal, null, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -339,7 +347,7 @@ protected final R read( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.readPage(pageMem, grpId, pageId, lockLsnr, - h, arg, intArg, lockFailed, statHolder); + h, arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** @@ -362,7 +370,7 @@ protected final R read( R lockFailed, IoStatisticsHolder statHolder) throws IgniteCheckedException { return PageHandler.readPage(pageMem, grpId, pageId, page, lockLsnr, h, - arg, intArg, lockFailed, statHolder); + arg, intArg, lockFailed, statHolder, pageIoRslvr); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 6a0ecb233e7f3..8f6f6835342ff 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -126,6 +126,7 @@ import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; @@ -790,7 +791,7 @@ private void finishRecovery() throws IgniteCheckedException { walTail = CheckpointStatus.NULL_PTR.equals(status.endPtr) ? null : status.endPtr; } - cctx.wal().resumeLogging(walTail); + resumeWalLogging(); walTail = null; @@ -947,10 +948,11 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi DataStorageConfiguration memCfg, DataRegionConfiguration plcCfg, DataRegionMetricsImpl memMetrics, - final boolean trackable + final boolean trackable, + PageReadWriteManager pmPageMgr ) { if (!plcCfg.isPersistenceEnabled()) - return super.createPageMemory(memProvider, memCfg, plcCfg, memMetrics, trackable); + return super.createPageMemory(memProvider, memCfg, plcCfg, memMetrics, trackable, pmPageMgr); memMetrics.persistenceEnabled(true); @@ -991,6 +993,7 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi chpBufSize ), cctx, + pmPageMgr, memCfg.getPageSize(), (fullId, pageBuf, tag) -> { memMetrics.onPageWritten(); @@ -999,7 +1002,7 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi snapshotMgr.beforePageWrite(fullId); // Write page to disk. - storeMgr.write(fullId.groupId(), fullId.pageId(), pageBuf, tag); + pmPageMgr.write(fullId.groupId(), fullId.pageId(), pageBuf, tag, true); getCheckpointer().currentProgress().updateEvictedPages(1); }, @@ -1320,6 +1323,7 @@ private String cacheInfo(GridCacheContext cacheCtx) { /** * Gets the checkpoint read lock. While this lock is held, checkpoint thread will not acquireSnapshotWorker memory * state. + * * @throws IgniteException If failed. */ @Override public void checkpointReadLock() { @@ -1630,6 +1634,8 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { checkpointReadLock(); + RestoreLogicalState logicalState; + try { // Preform early regions startup before restoring state. initAndStartRegions(kctx.config().getDataStorageConfiguration()); @@ -1652,7 +1658,7 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { CheckpointStatus status = readCheckpointStatus(); - RestoreLogicalState logicalState = applyLogicalUpdates( + logicalState = applyLogicalUpdates( status, groupsWithEnabledWal(), logicalRecords(), @@ -1666,10 +1672,6 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { } startTimer.finishGlobalStage("Restore logical state"); - - walTail = tailPointer(logicalState); - - cctx.wal().onDeActivate(kctx); } catch (IgniteCheckedException e) { releaseFileLock(); @@ -1679,6 +1681,15 @@ private CheckpointStatus readCheckpointStatus() throws IgniteCheckedException { finally { checkpointReadUnlock(); } + + walTail = tailPointer(logicalState); + + cctx.wal().onDeActivate(kctx); + } + + /** */ + public void resumeWalLogging() throws IgniteCheckedException { + cctx.wal().resumeLogging(walTail); } /** @@ -3041,7 +3052,8 @@ else if (key.startsWith(WAL_GLOBAL_KEY_PREFIX)) * @param log Logger. * @throws IgniteCheckedException If failed. */ - private static void dumpPartitionsInfo(GridCacheSharedContext cctx, IgniteLogger log) throws IgniteCheckedException { + private static void dumpPartitionsInfo(GridCacheSharedContext cctx, + IgniteLogger log) throws IgniteCheckedException { for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal() || !grp.persistenceEnabled()) continue; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 4bfda53b0389b..cb7b6dc7e2011 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -68,6 +68,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryEx; import org.apache.ignite.internal.processors.cache.GridCacheMvccEntryInfo; +import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.GridCacheTtlManager; import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManagerImpl; import org.apache.ignite.internal.processors.cache.KeyCacheObject; @@ -91,6 +92,7 @@ import org.apache.ignite.internal.processors.cache.persistence.partstate.PartitionAllocationMap; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorage; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorageImpl; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIOV2; @@ -110,6 +112,7 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner; import org.apache.ignite.internal.util.GridLongList; +import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.lang.IgniteInClosure2X; import org.apache.ignite.internal.util.lang.IgnitePredicateX; @@ -243,7 +246,7 @@ public IndexStorage getIndexStorage() { boolean exists = ctx.pageStore() != null && ctx.pageStore().exists(grp.groupId(), p); - return new GridCacheDataStore(p, exists); + return new GridCacheDataStore(grp, p, exists, busyLock, log); } /** {@inheritDoc} */ @@ -766,7 +769,7 @@ private GridDhtLocalPartition getPartition(CacheDataStore store) { * return null if counter page does not exist. * @throws IgniteCheckedException If page memory operation failed. */ - @Nullable private static Map readSharedGroupCacheSizes(PageSupport pageMem, int grpId, + @Nullable public static Map readSharedGroupCacheSizes(PageSupport pageMem, int grpId, long cntrsPageId) throws IgniteCheckedException { if (cntrsPageId == 0L) @@ -817,7 +820,7 @@ private GridDhtLocalPartition getPartition(CacheDataStore store) { * @return new counter page Id. Same as {@code cntrsPageId} or new value if cache size pages were initialized. * @throws IgniteCheckedException if page memory operation failed. */ - private static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, + public static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, long cntrsPageId, int partId, Map sizes) throws IgniteCheckedException { byte[] data = PagePartitionCountersIO.VERSIONS.latest().serializeCacheSizes(sizes); @@ -880,7 +883,7 @@ private void addPartitions(Context ctx) throws IgniteCheckedException { int grpId = grp.groupId(); PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); - long metaPageId = pageMem.metaPageId(grpId); + long metaPageId = PageMemory.META_PAGE_ID; long metaPage = pageMem.acquirePage(grpId, metaPageId); try { @@ -1048,7 +1051,7 @@ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { IgniteWriteAheadLogManager wal = ctx.wal(); int grpId = grp.groupId(); - long metaId = pageMem.metaPageId(grpId); + long metaId = PageMemory.META_PAGE_ID; long metaPage = pageMem.acquirePage(grpId, metaId); try { @@ -1191,7 +1194,7 @@ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { int cleared = 0; for (CacheDataStore store : cacheDataStores()) { - cleared += ((GridCacheDataStore)store).purgeExpired(cctx, c, amount - cleared); + cleared += ((GridCacheDataStore)store).purgeExpired(cctx, c, unwindThrottlingTimeout, amount - cleared); if (amount != -1 && cleared >= amount) return true; @@ -1240,7 +1243,7 @@ long freeSpace() { for (CacheDataStore store : partDataStores.values()) { assert store instanceof GridCacheDataStore; - AbstractFreeList freeList = ((GridCacheDataStore)store).freeList; + AbstractFreeList freeList = ((GridCacheDataStore)store).getCacheStoreFreeList(); if (freeList == null) continue; @@ -1262,7 +1265,7 @@ long emptyDataPages() { for (CacheDataStore store : partDataStores.values()) { assert store instanceof GridCacheDataStore; - AbstractFreeList freeList = ((GridCacheDataStore)store).freeList; + AbstractFreeList freeList = ((GridCacheDataStore)store).getCacheStoreFreeList(); if (freeList == null) continue; @@ -1315,7 +1318,7 @@ private void saveIndexReencryptionStatus(int grpId) throws IgniteCheckedExceptio PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); - long metaPageId = pageMem.metaPageId(grpId); + long metaPageId = PageIdAllocator.META_PAGE_ID; long metaPage = pageMem.acquirePage(grpId, metaPageId); try { @@ -1352,6 +1355,11 @@ private void saveIndexReencryptionStatus(int grpId) throws IgniteCheckedExceptio } } + /** */ + public GridSpinBusyLock busyLock() { + return busyLock; + } + /** * */ @@ -1763,22 +1771,22 @@ private DataEntryRow(DataEntry entry) { /** * */ - private static class Metas { + static class Metas { /** */ @GridToStringInclude - private final RootPage reuseListRoot; + public final RootPage reuseListRoot; /** */ @GridToStringInclude - private final RootPage treeRoot; + public final RootPage treeRoot; /** */ @GridToStringInclude - private final RootPage pendingTreeRoot; + public final RootPage pendingTreeRoot; /** */ @GridToStringInclude - private final RootPage partMetastoreReuseListRoot; + public final RootPage partMetastoreReuseListRoot; /** * @param treeRoot Metadata storage root. @@ -1800,10 +1808,13 @@ private static class Metas { /** * */ - public class GridCacheDataStore implements CacheDataStore { + public static class GridCacheDataStore implements CacheDataStore { /** */ private final int partId; + /** */ + private final CacheGroupContext grp; + /** */ private volatile AbstractFreeList freeList; @@ -1831,19 +1842,38 @@ public class GridCacheDataStore implements CacheDataStore { /** */ private final boolean exists; + /** */ + private final GridSpinBusyLock busyLock; + + /** */ + private final IgniteLogger log; + /** */ private final AtomicBoolean init = new AtomicBoolean(); /** */ private final CountDownLatch latch = new CountDownLatch(1); + /** */ + private CacheDataTree dataTree; + /** * @param partId Partition. * @param exists {@code True} if store exists. */ - private GridCacheDataStore(int partId, boolean exists) { + public GridCacheDataStore(CacheGroupContext grp, int partId, boolean exists, + GridSpinBusyLock busyLock, + IgniteLogger log) { + this.grp = grp; this.partId = partId; this.exists = exists; + this.busyLock = busyLock; + this.log = log; + } + + /** */ + public AbstractFreeList getCacheStoreFreeList() { + return freeList; } /** @@ -1864,7 +1894,7 @@ private String partitionMetaStoreName() { * @return Name of data tree. */ private String dataTreeName() { - return grp.cacheOrGroupName() + "-" + treeName(partId); + return grp.cacheOrGroupName() + "-" + BPlusTree.treeName("p-" + partId, "CacheData"); } /** @@ -1892,6 +1922,10 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException return null; } + final GridCacheSharedContext ctx = grp.shared(); + + AtomicLong pageListCacheLimit = ((GridCacheDatabaseSharedManager) ctx.database()).pageListCacheLimitHolder(grp.dataRegion()); + if (init.compareAndSet(false, true)) { IgniteCacheDatabaseSharedManager dbMgr = ctx.database(); @@ -1904,7 +1938,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException PageIdUtils.partId(metas.treeRoot.pageId().pageId()) != partId || PageIdUtils.partId(metas.pendingTreeRoot.pageId().pageId()) != partId || PageIdUtils.partId(metas.partMetastoreReuseListRoot.pageId().pageId()) != partId - ) { + ) { throw new IgniteCheckedException("Invalid meta root allocated [" + "cacheOrGroupName=" + grp.cacheOrGroupName() + ", partId=" + partId + @@ -1929,7 +1963,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); } @@ -1954,7 +1988,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); } @@ -1966,7 +2000,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException RootPage treeRoot = metas.treeRoot; - CacheDataTree dataTree = new CacheDataTree( + dataTree = new CacheDataTree( grp, dataTreeName, freeList, @@ -1977,7 +2011,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); } @@ -1998,15 +2032,24 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - assert grp.shared().database().checkpointLockIsHeldByThread(); + assert ctx.database().checkpointLockIsHeldByThread(); return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); } }; - PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx) grp.dataRegion().pageMemory(); + + int grpId = grp.groupId(); - delegate0 = new CacheDataStoreImpl(partId, rowStore, dataTree) { + delegate0 = new CacheDataStoreImpl(partId, + rowStore, + dataTree, + () -> pendingTree0, + grp, + busyLock, + log + ) { /** {@inheritDoc} */ @Override public PendingEntriesTree pendingTree() { return pendingTree0; @@ -2019,20 +2062,20 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException if (pageStoreMgr == null) return; - final int pages = pageStoreMgr.pages(grp.groupId(), partId); + final int pages = pageStoreMgr.pages(grpId, partId); - long pageId = pageMem.partitionMetaPageId(grp.groupId(), partId); + long pageId = pageMem.partitionMetaPageId(grpId, partId); // For each page sequentially pin/unpin. for (int pageNo = 0; pageNo < pages; pageId++, pageNo++) { long pagePointer = -1; try { - pagePointer = pageMem.acquirePage(grp.groupId(), pageId); + pagePointer = pageMem.acquirePage(grpId, pageId); } finally { if (pagePointer != -1) - pageMem.releasePage(grp.groupId(), pageId, pagePointer); + pageMem.releasePage(grpId, pageId, pagePointer); } } } @@ -2043,7 +2086,6 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException if (!pendingTree0.isEmpty()) grp.caches().forEach(cctx -> cctx.ttl().hasPendingEntries(true)); - int grpId = grp.groupId(); long partMetaId = pageMem.partitionMetaPageId(grpId, partId); long partMetaPage = pageMem.acquirePage(grpId, partMetaId); @@ -2072,7 +2114,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException grp, partId, io.getEncryptedPageIndex(pageAddr), encrPageCnt); } - globalRemoveId().setIfGreater(io.getGlobalRemoveId(pageAddr)); + grp.offheap().globalRemoveId().setIfGreater(io.getGlobalRemoveId(pageAddr)); } } finally { @@ -2116,7 +2158,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException */ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); - IgniteWriteAheadLogManager wal = ctx.wal(); + IgniteWriteAheadLogManager wal = grp.shared().wal(); int grpId = grp.groupId(); long partMetaId = pageMem.partitionMetaPageId(grpId, partId); @@ -2177,7 +2219,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { assert pageVer == 1 || pageVer == 2; if (log.isDebugEnabled()) - log.info("Upgrade partition meta page version: [part=" + partId + + log.debug("Upgrade partition meta page version: [part=" + partId + ", grpId=" + grpId + ", oldVer=" + pageVer + ", newVer=" + io.getVersion() ); @@ -2250,6 +2292,11 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { } } + /** {@inheritDoc} */ + @Override public CacheDataTree tree() { + return dataTree; + } + /** {@inheritDoc} */ @Override public boolean init() { try { @@ -2495,7 +2542,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { long expireTime, @Nullable CacheDataRow oldRow ) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2625,7 +2672,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { GridCacheVersion ver, long expireTime, @Nullable CacheDataRow oldRow) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2658,7 +2705,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void invoke(GridCacheContext cctx, KeyCacheObject key, OffheapInvokeClosure c) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2668,7 +2715,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void remove(GridCacheContext cctx, KeyCacheObject key, int partId) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate = init0(false); @@ -2828,7 +2875,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void clear(int cacheId) throws IgniteCheckedException { - assert ctx.database().checkpointLockIsHeldByThread(); + assert grp.shared().database().checkpointLockIsHeldByThread(); CacheDataStore delegate0 = init0(true); @@ -2879,6 +2926,7 @@ public long expiredSize() throws IgniteCheckedException { public int purgeExpired( GridCacheContext cctx, IgniteInClosure2X c, + long throttlingTimeout, int amount ) throws IgniteCheckedException { CacheDataStore delegate0 = init0(true); @@ -2896,7 +2944,7 @@ public int purgeExpired( if (cleared < amount) { lastThrottledCacheId = cctx.cacheId(); - nextStoreCleanTimeNanos = nowNanos + U.millisToNanos(unwindThrottlingTimeout); + nextStoreCleanTimeNanos = nowNanos + U.millisToNanos(throttlingTimeout); } return cleared; @@ -3038,7 +3086,8 @@ private int purgeExpiredInternal( } } - @Override public PartitionMetaStorage partStorage() { + /** */ + @Override public PartitionMetaStorage partStorage() { return partStorage; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 42ad2a9374a19..9eb02fbd994a9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -75,6 +75,7 @@ import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; import org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer; @@ -417,6 +418,21 @@ public DataRegion addDataRegion( DataStorageConfiguration dataStorageCfg, DataRegionConfiguration dataRegionCfg, boolean trackable + ) throws IgniteCheckedException { + return addDataRegion(dataStorageCfg, dataRegionCfg, trackable, cctx.pageStore()); + } + + /** + * @param dataStorageCfg Database config. + * @param dataRegionCfg Data region config. + * @param pmPageMgr Page manager. + * @throws IgniteCheckedException If failed to initialize swap path. + */ + protected DataRegion addDataRegion( + DataStorageConfiguration dataStorageCfg, + DataRegionConfiguration dataRegionCfg, + boolean trackable, + PageReadWriteManager pmPageMgr ) throws IgniteCheckedException { String dataRegionName = dataRegionCfg.getName(); @@ -430,7 +446,7 @@ public DataRegion addDataRegion( cctx.kernalContext().metric(), dataRegionMetricsProvider(dataRegionCfg)); - DataRegion region = initMemory(dataStorageCfg, dataRegionCfg, memMetrics, trackable); + DataRegion region = initMemory(dataStorageCfg, dataRegionCfg, memMetrics, trackable, pmPageMgr); dataRegionMap.put(dataRegionName, region); @@ -1205,6 +1221,7 @@ public void ensureFreeSpace(DataRegion memPlc) throws IgniteCheckedException { * @param memCfg memory configuration with common parameters. * @param plcCfg data region with PageMemory specific parameters. * @param memMetrics {@link DataRegionMetrics} object to collect memory usage metrics. + * @param pmPageMgr Page manager. * @return data region instance. * * @throws IgniteCheckedException If failed to initialize swap path. @@ -1213,9 +1230,10 @@ private DataRegion initMemory( DataStorageConfiguration memCfg, DataRegionConfiguration plcCfg, DataRegionMetricsImpl memMetrics, - boolean trackable + boolean trackable, + PageReadWriteManager pmPageMgr ) throws IgniteCheckedException { - PageMemory pageMem = createPageMemory(createOrReuseMemoryProvider(plcCfg), memCfg, plcCfg, memMetrics, trackable); + PageMemory pageMem = createPageMemory(createOrReuseMemoryProvider(plcCfg), memCfg, plcCfg, memMetrics, trackable, pmPageMgr); return new DataRegion(pageMem, plcCfg, memMetrics, createPageEvictionTracker(plcCfg, pageMem)); } @@ -1314,6 +1332,7 @@ protected PageEvictionTracker createPageEvictionTracker(DataRegionConfiguration * @param memCfg Memory configuartion. * @param memPlcCfg data region configuration. * @param memMetrics DataRegionMetrics to collect memory usage metrics. + * @param pmPageMgr Page manager. * @return PageMemory instance. */ protected PageMemory createPageMemory( @@ -1321,7 +1340,8 @@ protected PageMemory createPageMemory( DataStorageConfiguration memCfg, DataRegionConfiguration memPlcCfg, DataRegionMetricsImpl memMetrics, - boolean trackable + boolean trackable, + PageReadWriteManager pmPageMgr ) { memMetrics.persistenceEnabled(false); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java index 2beac7b1c3f4b..7fe7002db5f04 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointManager.java @@ -170,7 +170,7 @@ public CheckpointManager( checkpointPagesWriterFactory = new CheckpointPagesWriterFactory( logger, snapshotMgr, - (pageMemEx, fullPage, buf, tag) -> pageStoreManager.writeInternal(fullPage.groupId(), fullPage.pageId(), buf, tag, true), + (pageMemEx, fullPage, buf, tag) -> pageStoreManager.write(fullPage.groupId(), fullPage.pageId(), buf, tag, true), persStoreMetrics, throttlingPolicy, threadBuf, pageMemoryGroupResolver diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java index 7a329f7d3e3a7..9e7b3dccf283c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java @@ -139,7 +139,7 @@ public LightweightCheckpointManager( logger, snapshotMgr, (pageMemEx, fullPage, buf, tag) -> - pageStoreManager.writeInternal(fullPage.groupId(), fullPage.pageId(), buf, tag, true), + pageStoreManager.write(fullPage.groupId(), fullPage.pageId(), buf, tag, true), persStoreMetrics, throttlingPolicy, threadBuf, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java index 29f7e6f891b87..4deb047569194 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileIOFactory.java @@ -29,6 +29,7 @@ /** * {@link FileIO} factory definition. */ +@FunctionalInterface public interface FileIOFactory extends Serializable { /** * Creates I/O interface for file with default I/O mode. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java index adb49a35f228c..6b63d315b4259 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java @@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.LongConsumer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; @@ -42,7 +43,6 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteOutClosure; @@ -101,7 +101,7 @@ public class FilePageStore implements PageStore { private final AtomicLong allocated; /** Region metrics updater. */ - private final LongAdderMetric allocatedTracker; + private final LongConsumer allocatedTracker; /** List of listeners for current page store to handle. */ private final List lsnrs = new CopyOnWriteArrayList<>(); @@ -130,7 +130,7 @@ public FilePageStore( IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, - LongAdderMetric allocatedTracker + LongConsumer allocatedTracker ) { this.type = type; this.pathProvider = pathProvider; @@ -357,7 +357,7 @@ private void stop0(boolean delete) throws IOException { } } finally { - allocatedTracker.add(-1L * allocated.getAndSet(0) / pageSize); + allocatedTracker.accept(-1L * allocated.getAndSet(0) / pageSize); inited = false; @@ -406,7 +406,7 @@ private void stop0(boolean delete) throws IOException { throw new StorageException("Failed to truncate partition file [file=" + filePath.toAbsolutePath() + "]", e); } finally { - allocatedTracker.add(-1L * allocated.getAndSet(0) / pageSize); + allocatedTracker.accept(-1L * allocated.getAndSet(0) / pageSize); inited = false; @@ -443,7 +443,7 @@ private void stop0(boolean delete) throws IOException { assert delta % pageSize == 0 : delta; - allocatedTracker.add(delta / pageSize); + allocatedTracker.accept(delta / pageSize); } recover = false; @@ -592,7 +592,7 @@ public void init() throws StorageException { // Order is important, update of total allocated pages must be called after allocated update // and setting inited to true, because it affects pages() returned value. - allocatedTracker.add(pages()); + allocatedTracker.accept(pages()); } catch (IOException e) { err = new StorageException( @@ -839,7 +839,7 @@ private long allocPage() { off = allocated.get(); if (allocated.compareAndSet(off, off + pageSize)) { - allocatedTracker.increment(); + allocatedTracker.accept(1); break; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java index 6607ebe12002a..ae923592f4af3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java @@ -19,10 +19,10 @@ import java.io.File; import java.nio.file.Path; +import java.util.function.LongConsumer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.store.PageStore; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.lang.IgniteOutClosure; /** @@ -38,7 +38,7 @@ public interface FilePageStoreFactory { * @return page store * @throws IgniteCheckedException if failed. */ - default PageStore createPageStore(byte type, File file, LongAdderMetric allocatedTracker) + default PageStore createPageStore(byte type, File file, LongConsumer allocatedTracker) throws IgniteCheckedException { return createPageStore(type, file::toPath, allocatedTracker); } @@ -52,6 +52,6 @@ default PageStore createPageStore(byte type, File file, LongAdderMetric allocate * @return page store * @throws IgniteCheckedException if failed */ - PageStore createPageStore(byte type, IgniteOutClosure pathProvider, LongAdderMetric allocatedTracker) + PageStore createPageStore(byte type, IgniteOutClosure pathProvider, LongConsumer allocatedTracker) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index 024f52c85687d..7152b1e870c46 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -49,9 +49,9 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Function; +import java.util.function.LongConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -67,9 +67,9 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.pagemem.store.PageStoreCollection; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; -import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheSharedManagerAdapter; import org.apache.ignite.internal.processors.cache.StoredCacheData; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; @@ -78,8 +78,9 @@ import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManagerImpl; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; -import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.GridStripedReadWriteLock; import org.apache.ignite.internal.util.typedef.X; @@ -104,7 +105,8 @@ /** * File page store manager. */ -public class FilePageStoreManager extends GridCacheSharedManagerAdapter implements IgnitePageStoreManager { +public class FilePageStoreManager extends GridCacheSharedManagerAdapter implements IgnitePageStoreManager, + PageStoreCollection { /** File suffix. */ public static final String FILE_SUFFIX = ".bin"; @@ -118,7 +120,10 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen public static final String PART_FILE_PREFIX = "part-"; /** */ - public static final String INDEX_FILE_NAME = "index" + FILE_SUFFIX; + public static final String INDEX_FILE_PREFIX = "index"; + + /** */ + public static final String INDEX_FILE_NAME = INDEX_FILE_PREFIX + FILE_SUFFIX; /** */ public static final String PART_FILE_TEMPLATE = PART_FILE_PREFIX + "%d" + FILE_SUFFIX; @@ -157,6 +162,9 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen /** Marshaller. */ private final Marshaller marshaller; + /** Page manager. */ + private final PageReadWriteManager pmPageMgr; + /** * Executor to disallow running code that modifies data in idxCacheStores concurrently with cleanup of file page * store. @@ -187,9 +195,6 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen /** Absolute directory for file page store. Includes consistent id based folder. */ private File storeWorkDir; - /** */ - private final long metaPageId = PageIdUtils.pageId(-1, PageMemory.FLAG_IDX, 0); - /** */ private final Set grpsWithoutIdx = Collections.newSetFromMap(new ConcurrentHashMap()); @@ -217,6 +222,8 @@ public FilePageStoreManager(GridKernalContext ctx) { pageStoreV1FileIoFactory = pageStoreFileIoFactory = dsCfg.getFileIOFactory(); marshaller = MarshallerUtils.jdkMarshaller(ctx.igniteInstanceName()); + + pmPageMgr = new PageReadWriteManagerImpl(ctx, this, FilePageStoreManager.class.getSimpleName()); } /** {@inheritDoc} */ @@ -446,7 +453,7 @@ private List checkCachesWithDisabledWal() { } /** {@inheritDoc} */ - @Override public void initialize(int cacheId, int partitions, String workingDir, LongAdderMetric tracker) + @Override public void initialize(int cacheId, int partitions, String workingDir, LongConsumer tracker) throws IgniteCheckedException { assert storeWorkDir != null; @@ -493,7 +500,7 @@ private List checkCachesWithDisabledWal() { new File(storeWorkDir, META_STORAGE_NAME), grpId, PageIdAllocator.METASTORE_PARTITION + 1, - dataRegion.memoryMetrics().totalAllocatedPages(), + dataRegion.memoryMetrics().totalAllocatedPages()::add, false); CacheStoreHolder old = idxCacheStores.put(grpId, holder); @@ -596,34 +603,8 @@ public void removeConfigurationChangeListener(BiConsumer lsnr) { } /** {@inheritDoc} */ - @Override public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException { - read(grpId, pageId, pageBuf, false); - } - - /** - * Will preserve crc in buffer if keepCrc is true. - * - * @param grpId Group ID. - * @param pageId Page ID. - * @param pageBuf Page buffer. - * @param keepCrc Keep CRC flag. - * @throws IgniteCheckedException If failed. - */ - public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { - PageStore store = getStore(grpId, PageIdUtils.partId(pageId)); - - try { - store.read(pageId, pageBuf, keepCrc); - - assert keepCrc || PageIO.getCrc(pageBuf) == 0 : store.size() - store.pageOffset(pageId); - - cctx.kernalContext().compress().decompressPage(pageBuf, store.getPageSize()); - } - catch (StorageException e) { - cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); - - throw e; - } + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { + pmPageMgr.read(grpId, pageId, pageBuf, keepCrc); } /** {@inheritDoc} */ @@ -648,8 +629,8 @@ public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) th } /** {@inheritDoc} */ - @Override public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { - writeInternal(grpId, pageId, pageBuf, tag, true); + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { + return pmPageMgr.write(grpId, pageId, pageBuf, tag, calculateCrc); } /** {@inheritDoc} */ @@ -659,59 +640,6 @@ public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) th return store.pageOffset(pageId); } - /** - * @param cacheId Cache ID to write. - * @param pageId Page ID. - * @param pageBuf Page buffer. - * @param tag Partition tag (growing 1-based partition file version). Used to validate page is not outdated - * @param calculateCrc if {@code False} crc calculation will be forcibly skipped. - * @return PageStore to which the page has been written. - * @throws IgniteCheckedException If IO error occurred. - */ - public PageStore writeInternal(int cacheId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) - throws IgniteCheckedException { - int partId = PageIdUtils.partId(pageId); - - PageStore store = getStore(cacheId, partId); - - try { - int pageSize = store.getPageSize(); - int compressedPageSize = pageSize; - - GridCacheContext cctx0 = cctx.cacheContext(cacheId); - - if (cctx0 != null) { - assert pageBuf.position() == 0 && pageBuf.limit() == pageSize : pageBuf; - - ByteBuffer compressedPageBuf = cctx0.compress().compressPage(pageBuf, store); - - if (compressedPageBuf != pageBuf) { - compressedPageSize = PageIO.getCompressedSize(compressedPageBuf); - - if (!calculateCrc) { - calculateCrc = true; - PageIO.setCrc(compressedPageBuf, 0); // It will be recalculated over compressed data further. - } - - PageIO.setCrc(pageBuf, 0); // It is expected to be reset to 0 after each write. - pageBuf = compressedPageBuf; - } - } - - store.write(pageId, pageBuf, tag, calculateCrc); - - if (pageSize > compressedPageSize) - store.punchHole(pageId, compressedPageSize); // TODO maybe add async punch mode? - } - catch (StorageException e) { - cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); - - throw e; - } - - return store; - } - /** * */ @@ -741,7 +669,7 @@ private CacheStoreHolder initForCache(CacheGroupDescriptor grpDesc, CacheConfigu cacheWorkDir, grpDesc.groupId(), grpDesc.config().getAffinity().partitions(), - allocatedTracker, + allocatedTracker::add, ccfg.isEncryptionEnabled() ); } @@ -799,7 +727,7 @@ public FilePageStoreFactory getPageStoreFactory(int grpId, boolean encrypted) { private CacheStoreHolder initDir(File cacheWorkDir, int grpId, int partitions, - LongAdderMetric allocatedTracker, + LongConsumer allocatedTracker, boolean encrypted) throws IgniteCheckedException { try { boolean dirExisted = checkAndInitCacheWorkDir(cacheWorkDir); @@ -829,7 +757,7 @@ private CacheStoreHolder initDir(File cacheWorkDir, allocatedTracker); partStores[partId] = partStore; - } + } return new CacheStoreHolder(idxStore, partStores); } @@ -983,25 +911,7 @@ else if (lockF.exists()) { /** {@inheritDoc} */ @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException { - assert partId <= MAX_PARTITION_ID || partId == INDEX_PARTITION; - - PageStore store = getStore(grpId, partId); - - try { - long pageIdx = store.allocatePage(); - - return PageIdUtils.pageId(partId, flags, (int)pageIdx); - } - catch (StorageException e) { - cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public long metaPageId(final int grpId) { - return metaPageId; + return pmPageMgr.allocatePage(grpId, partId, flags); } /** {@inheritDoc} */ @@ -1341,7 +1251,7 @@ private CacheStoreHolder getHolder(int grpId) throws IgniteCheckedException { * @return Collection of related page stores. * @throws IgniteCheckedException If failed. */ - public Collection getStores(int grpId) throws IgniteCheckedException { + @Override public Collection getStores(int grpId) throws IgniteCheckedException { return getHolder(grpId); } @@ -1353,7 +1263,7 @@ public Collection getStores(int grpId) throws IgniteCheckedException * * Note: visible for testing. */ - public PageStore getStore(int grpId, int partId) throws IgniteCheckedException { + @Override public PageStore getStore(int grpId, int partId) throws IgniteCheckedException { CacheStoreHolder holder = getHolder(grpId); if (holder == null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java index 8109dbbfa8c33..84609c9cb09c8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java @@ -17,8 +17,8 @@ package org.apache.ignite.internal.processors.cache.persistence.file; import java.nio.file.Path; +import java.util.function.LongConsumer; import org.apache.ignite.configuration.DataStorageConfiguration; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.lang.IgniteOutClosure; /** @@ -45,7 +45,7 @@ public FilePageStoreV2( IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, - LongAdderMetric allocatedTracker) { + LongConsumer allocatedTracker) { super(type, pathProvider, factory, cfg, allocatedTracker); hdrSize = cfg.getPageSize(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java index e54712247ab85..e6502426f82f0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java @@ -22,10 +22,10 @@ import java.nio.ByteOrder; import java.nio.file.Files; import java.nio.file.Path; +import java.util.function.LongConsumer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.pagemem.store.PageStore; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.lang.IgniteOutClosure; /** @@ -69,7 +69,7 @@ public FileVersionCheckingFactory( @Override public PageStore createPageStore( byte type, IgniteOutClosure pathProvider, - LongAdderMetric allocatedTracker) throws IgniteCheckedException { + LongConsumer allocatedTracker) throws IgniteCheckedException { Path filePath = pathProvider.apply(); if (!Files.exists(filePath)) @@ -124,7 +124,7 @@ private FilePageStore createPageStore( byte type, IgniteOutClosure pathProvider, int ver, - LongAdderMetric allocatedTracker) { + LongConsumer allocatedTracker) { switch (ver) { case FilePageStore.VERSION: diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java index e944b1ec0310b..530690eac0aaf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java @@ -67,6 +67,7 @@ import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX; import static org.apache.ignite.internal.pagemem.PageIdUtils.MAX_ITEMID_NUM; import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.getPageId; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver.DEFAULT_PAGE_IO_RESOLVER; /** * Striped doubly-linked list of page IDs optionally organized in buckets. @@ -211,7 +212,7 @@ protected PagesList( PageLockListener lockLsnr, GridKernalContext ctx ) { - super(cacheId, null, pageMem, wal, lockLsnr); + super(cacheId, null, pageMem, wal, lockLsnr, DEFAULT_PAGE_IO_RESOLVER); this.name = name; this.buckets = buckets; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java index 8bba4e1cf5506..4ddcb94131899 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java @@ -66,23 +66,13 @@ void writeUnlock(int grpId, long pageId, long page, Boolean walPlc, boolean dirtyFlag, boolean restore); /** - * Gets or allocates metadata page for specified grpId. - * - * @param grpId Group ID. - * @return Meta page for grpId. - * @throws IgniteCheckedException If failed. - */ - public long metaPageId(int grpId) throws IgniteCheckedException; - - /** - * Gets or allocates partition metadata page for specified grpId and partId. + * Gets partition metadata page for specified grpId and partId. * * @param grpId Group ID. * @param partId Partition ID. * @return Meta page for grpId and partId. - * @throws IgniteCheckedException If failed. */ - public long partitionMetaPageId(int grpId, int partId) throws IgniteCheckedException; + public long partitionMetaPageId(int grpId, int partId); /** * @see #acquirePage(int, long) @@ -154,6 +144,9 @@ public void checkpointWritePage( CheckpointMetricsTracker tracker ) throws IgniteCheckedException; + /** */ + public PageReadWriteManager pageManager(); + /** * Marks partition as invalid / outdated. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index f349cad46a11b..4872c6d8eeca9 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -57,7 +57,6 @@ import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageUtils; -import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.WALIterator; import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; @@ -196,8 +195,8 @@ public class PageMemoryImpl implements PageMemoryEx { /** */ private final ExecutorService asyncRunner; - /** Page store manager. */ - private IgnitePageStoreManager storeMgr; + /** Page manager. */ + private final PageReadWriteManager pmPageMgr; /** */ private IgniteWriteAheadLogManager walMgr; @@ -276,6 +275,7 @@ public class PageMemoryImpl implements PageMemoryEx { * @param directMemoryProvider Memory allocator to use. * @param sizes segments sizes, last is checkpoint pool size. * @param ctx Cache shared context. + * @param pmPageMgr Page store manager. * @param pageSize Page size. * @param flushDirtyPage write callback invoked when a dirty page is removed for replacement. * @param changeTracker Callback invoked to track changes in pages. @@ -288,6 +288,7 @@ public PageMemoryImpl( DirectMemoryProvider directMemoryProvider, long[] sizes, GridCacheSharedContext ctx, + PageReadWriteManager pmPageMgr, int pageSize, PageStoreWriter flushDirtyPage, @Nullable GridInClosure3X changeTracker, @@ -315,12 +316,12 @@ public PageMemoryImpl( this.throttlingPlc = throttlingPlc != null ? throttlingPlc : ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY; this.cpProgressProvider = cpProgressProvider; - storeMgr = ctx.pageStore(); + this.pmPageMgr = pmPageMgr; walMgr = ctx.wal(); encMgr = ctx.kernalContext().encryption(); encryptionDisabled = ctx.gridConfig().getEncryptionSpi() instanceof NoopEncryptionSpi; - assert storeMgr != null; + assert pmPageMgr != null; assert walMgr != null; assert encMgr != null; @@ -523,7 +524,7 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) if (isThrottlingEnabled()) writeThrottle.onMarkDirty(false); - long pageId = storeMgr.allocatePage(grpId, partId, flags); + long pageId = pmPageMgr.allocatePage(grpId, partId, flags); assert PageIdUtils.pageIndex(pageId) > 0; //it's crucial for tracking pages (zero page is super one) @@ -672,13 +673,6 @@ private DataRegionConfiguration getDataRegionConfiguration() { return false; } - /** {@inheritDoc} */ - @Override public long metaPageId(int grpId) { - assert started; - - return storeMgr.metaPageId(grpId); - } - /** {@inheritDoc} */ @Override public long partitionMetaPageId(int grpId, int partId) { assert started; @@ -881,7 +875,7 @@ else if (relPtr == OUTDATED_REL_PTR) { long actualPageId = 0; try { - storeMgr.read(grpId, pageId, buf); + pmPageMgr.read(grpId, pageId, buf, false); statHolder.trackPhysicalAndLogicalRead(pageAddr); @@ -1187,6 +1181,11 @@ private boolean isThrottlingEnabled() { writeThrottle.onFinishCheckpoint(); } + /** {@inheritDoc} */ + @Override public PageReadWriteManager pageManager() { + return pmPageMgr; + } + /** {@inheritDoc} */ @Override public void checkpointWritePage( FullPageId fullId, @@ -2148,7 +2147,7 @@ private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, PageStore assert writeLock().isHeldByCurrentThread(); // Do not evict cache meta pages. - if (fullPageId.pageId() == storeMgr.metaPageId(fullPageId.groupId())) + if (fullPageId.pageId() == META_PAGE_ID) return false; if (PageHeader.isAcquired(absPtr)) @@ -2161,7 +2160,7 @@ private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, PageStore // Can evict a dirty page only if should be written by a checkpoint. // These pages does not have tmp buffer. if (checkpointPages != null && checkpointPages.allowToSave(fullPageId)) { - assert storeMgr != null; + assert pmPageMgr != null; memMetrics.updatePageReplaceRate(U.currentTimeMillis() - PageHeader.readTimestamp(absPtr)); @@ -2334,7 +2333,7 @@ private long removePageForReplacement(PageStoreWriter saveDirtyPage) throws Igni CheckpointPages checkpointPages = this.checkpointPages; if (relRmvAddr == rndAddr || pinned || skip || - fullId.pageId() == storeMgr.metaPageId(fullId.groupId()) || + fullId.pageId() == META_PAGE_ID || (dirty && (checkpointPages == null || !checkpointPages.contains(fullId))) ) { i--; @@ -2487,10 +2486,14 @@ private long tryToFindSequentially(int cap, PageStoreWriter saveDirtyPage) throw ", pinnedInSegment=" + pinnedCnt + ", failedToPrepare=" + failToPrepare + ']' + U.nl() + "Out of memory in data region [" + - "name=" + dataRegionCfg.getName() + - ", initSize=" + U.readableSize(dataRegionCfg.getInitialSize(), false) + - ", maxSize=" + U.readableSize(dataRegionCfg.getMaxSize(), false) + - ", persistenceEnabled=" + dataRegionCfg.isPersistenceEnabled() + "] Try the following:" + U.nl() + + (dataRegionCfg == null ? "NULL" : ( + "name=" + dataRegionCfg.getName() + + ", initSize=" + U.readableSize(dataRegionCfg.getInitialSize(), false) + + ", maxSize=" + U.readableSize(dataRegionCfg.getMaxSize(), false) + + ", persistenceEnabled=" + dataRegionCfg.isPersistenceEnabled() + )) + + "]" + + " Try the following:" + U.nl() + " ^-- Increase maximum off-heap memory size (DataRegionConfiguration.maxSize)" + U.nl() + " ^-- Enable Ignite persistence (DataRegionConfiguration.persistenceEnabled)" + U.nl() + " ^-- Enable eviction or expiration policies" diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManager.java new file mode 100644 index 0000000000000..531da8bbef817 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManager.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.pagemem; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; + +/** */ +public interface PageReadWriteManager { + /** + * Reads a page for the given cache ID. Cache ID may be {@code 0} if the page is a meta page. + * + * @param grpId Cache group ID. + * @param pageId PageID to read. + * @param pageBuf Page buffer to write to. + * @param keepCrc Keep CRC flag. + * @throws IgniteCheckedException If failed to read the page. + */ + public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException; + + /** + * Writes the page for the given cache ID. Cache ID may be {@code 0} if the page is a meta page. + * + * @param grpId Cache group ID. + * @param pageId Page ID. + * @param pageBuf Page buffer to write. + * @throws IgniteCheckedException If failed to write page. + */ + public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException; + + /** + * Allocates a page for the given page space. + * + * @param grpId Cache group ID. + * @param partId Partition ID. Used only if {@code flags} is equal to {@link PageMemory#FLAG_DATA}. + * @param flags Page allocation flags. + * @return Allocated page ID. + * @throws IgniteCheckedException If IO exception occurred while allocating a page ID. + */ + public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManagerImpl.java new file mode 100644 index 0000000000000..5fbd7c15354c8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageReadWriteManagerImpl.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.pagemem; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.pagemem.store.PageStoreCollection; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.StorageException; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.typedef.internal.S; + +import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.MAX_PARTITION_ID; + +/** */ +public class PageReadWriteManagerImpl implements PageReadWriteManager { + /** */ + private final GridKernalContext ctx; + + /** */ + @GridToStringExclude + protected final PageStoreCollection pageStores; + + /** */ + @SuppressWarnings("unused") + private final String name; + + /** + * @param ctx Kernal context. + * @param pageStores Page stores. + */ + public PageReadWriteManagerImpl( + GridKernalContext ctx, + PageStoreCollection pageStores, + String name + ) { + this.ctx = ctx; + this.pageStores = pageStores; + this.name = name; + } + + /** {@inheritDoc} */ + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { + PageStore store = pageStores.getStore(grpId, PageIdUtils.partId(pageId)); + + try { + store.read(pageId, pageBuf, keepCrc); + + ctx.compress().decompressPage(pageBuf, store.getPageSize()); + } + catch (StorageException e) { + ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { + int partId = PageIdUtils.partId(pageId); + + PageStore store = pageStores.getStore(grpId, partId); + + try { + int pageSize = store.getPageSize(); + int compressedPageSize = pageSize; + + GridCacheContext cctx0 = ctx.cache().context().cacheContext(grpId); + + if (cctx0 != null) { + assert pageBuf.position() == 0 && pageBuf.limit() == pageSize : pageBuf; + + ByteBuffer compressedPageBuf = cctx0.compress().compressPage(pageBuf, store); + + if (compressedPageBuf != pageBuf) { + compressedPageSize = PageIO.getCompressedSize(compressedPageBuf); + + if (!calculateCrc) { + calculateCrc = true; + PageIO.setCrc(compressedPageBuf, 0); // It will be recalculated over compressed data further. + } + + PageIO.setCrc(pageBuf, 0); // It is expected to be reset to 0 after each write. + pageBuf = compressedPageBuf; + } + } + + store.write(pageId, pageBuf, tag, calculateCrc); + + if (pageSize > compressedPageSize) + store.punchHole(pageId, compressedPageSize); // TODO maybe add async punch mode? + } + catch (StorageException e) { + ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + throw e; + } + + return store; + } + + /** {@inheritDoc} */ + @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException { + assert partId <= MAX_PARTITION_ID || partId == INDEX_PARTITION; + + PageStore store = pageStores.getStore(grpId, partId); + + try { + long pageIdx = store.allocatePage(); + + return PageIdUtils.pageId(partId, flags, (int)pageIdx); + } + catch (StorageException e) { + ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(PageReadWriteManagerImpl.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java index 03e7e1c336687..681485de1f54e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java @@ -87,7 +87,6 @@ import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState; import org.apache.ignite.internal.processors.marshaller.MappedName; import org.apache.ignite.internal.processors.metric.MetricRegistry; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.util.GridBusyLock; import org.apache.ignite.internal.util.distributed.DistributedProcess; @@ -1209,7 +1208,7 @@ public LocalSnapshotSender(String snpName) { .apply(pair.getGroupId(), false) .createPageStore(getFlagByPartId(pair.getPartitionId()), snpPart::toPath, - new LongAdderMetric("NO_OP", null)) + val -> {}) ) { ByteBuffer pageBuf = ByteBuffer.allocate(pageSize) .order(ByteOrder.nativeOrder()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index a46b3a8a7dcf3..5e67b7c591423 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -59,9 +59,11 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.InsertLast; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandlerWrapper; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; @@ -90,6 +92,7 @@ import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Result.NOT_FOUND; import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Result.RETRY; import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Result.RETRY_ROOT; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver.DEFAULT_PAGE_IO_RESOLVER; /** * Abstract B+Tree. @@ -778,7 +781,8 @@ protected BPlusTree( metaPageId, reuseList, failureProcessor, - lockLsnr + lockLsnr, + DEFAULT_PAGE_IO_RESOLVER ); setIos(innerIos, leafIos); @@ -806,9 +810,10 @@ protected BPlusTree( long metaPageId, ReuseList reuseList, @Nullable FailureProcessor failureProcessor, - @Nullable PageLockListener lsnr - ) throws IgniteCheckedException { - super(cacheGrpId, grpName, pageMem, wal, lsnr); + @Nullable PageLockListener lsnr, + PageIoResolver pageIoRslvr + ) { + super(cacheGrpId, grpName, pageMem, wal, lsnr, pageIoRslvr); assert !F.isEmpty(name); @@ -5289,6 +5294,9 @@ private int findInsertionPoint(int lvl, BPlusIO io, long buf, int low, int cn throws IgniteCheckedException { assert row != null; + if (row instanceof InsertLast) + return -cnt - 1; + int high = cnt - 1; while (low <= high) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIoResolver.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIoResolver.java new file mode 100644 index 0000000000000..2354c09bc65b6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIoResolver.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.io; + +import org.apache.ignite.IgniteCheckedException; + +/** */ +public interface PageIoResolver { + /** */ + public static final PageIoResolver DEFAULT_PAGE_IO_RESOLVER = PageIO::getPageIO; + + /** */ + PageIO resolve(long pageAddr) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java new file mode 100644 index 0000000000000..e7cfd411f3755 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.tree.util; + +/** + * Rows with this marker interface will always be inserted in the very end of the tree. + */ +public interface InsertLast { +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java index 4d05e5e7ec1da..9461cca81e60d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java @@ -25,11 +25,13 @@ import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.record.delta.InitNewPageRecord; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.util.GridUnsafe; import org.jetbrains.annotations.Nullable; import static java.lang.Boolean.FALSE; import static java.lang.Boolean.TRUE; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver.DEFAULT_PAGE_IO_RESOLVER; /** * Page handler. @@ -112,7 +114,8 @@ public static R readPage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { long page = pageMem.acquirePage(cacheId, pageId, statHolder); try { @@ -121,7 +124,7 @@ public static R readPage( if (pageAddr == 0L) return lockFailed; try { - PageIO io = PageIO.getPageIO(pageAddr); + PageIO io = pageIoRslvr.resolve(pageAddr); return h.run(cacheId, pageId, page, pageAddr, io, null, arg, intArg, statHolder); } finally { @@ -157,7 +160,8 @@ public static R readPage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { long pageAddr = 0L; @@ -165,7 +169,7 @@ public static R readPage( if ((pageAddr = readLock(pageMem, cacheId, pageId, page, lsnr)) == 0L) return lockFailed; - PageIO io = PageIO.getPageIO(pageAddr); + PageIO io = pageIoRslvr.resolve(pageAddr); return h.run(cacheId, pageId, page, pageAddr, io, null, arg, intArg, statHolder); } finally { @@ -236,7 +240,7 @@ public static void initPage( PageLockListener lsnr, IoStatisticsHolder statHolder ) throws IgniteCheckedException { - Boolean res = writePage(pageMem, grpId, pageId, lsnr, PageHandler.NO_OP, init, wal, null, null, 0, FALSE, statHolder); + Boolean res = writePage(pageMem, grpId, pageId, lsnr, PageHandler.NO_OP, init, wal, null, null, 0, FALSE, statHolder, DEFAULT_PAGE_IO_RESOLVER); assert res != FALSE; } @@ -269,7 +273,8 @@ public static R writePage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { boolean releaseAfterWrite = true; long page = pageMem.acquirePage(grpId, pageId, statHolder); @@ -288,7 +293,7 @@ public static R writePage( walPlc = FALSE; } else - init = PageIO.getPageIO(pageAddr); + init = pageIoRslvr.resolve(pageAddr); R res = h.run(grpId, pageId, page, pageAddr, init, walPlc, arg, intArg, statHolder); @@ -339,7 +344,8 @@ public static R writePage( X arg, int intArg, R lockFailed, - IoStatisticsHolder statHolder + IoStatisticsHolder statHolder, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { long pageAddr = writeLock(pageMem, grpId, pageId, page, lsnr, false); @@ -355,7 +361,7 @@ public static R writePage( walPlc = FALSE; } else - init = PageIO.getPageIO(pageAddr); + init = pageIoRslvr.resolve(pageAddr); R res = h.run(grpId, pageId, page, pageAddr, init, walPlc, arg, intArg, statHolder); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java index 103280131729d..f06677135b0f0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java @@ -31,7 +31,7 @@ */ public class CacheDataRowStore extends RowStore { /** Whether version should be skipped. */ - private static ThreadLocal SKIP_VER = ThreadLocal.withInitial(() -> false); + private static final ThreadLocal SKIP_VER = ThreadLocal.withInitial(() -> false); /** * @return Skip version flag. @@ -74,17 +74,8 @@ public int getPartitionId() { * @param link Link. * @return Search row. */ - CacheSearchRow keySearchRow(int cacheId, int hash, long link) { - DataRow dataRow = new DataRow( - grp, - hash, - link, - partId, - CacheDataRowAdapter.RowData.KEY_ONLY, - SKIP_VER.get() - ); - - return initDataRow(dataRow, cacheId); + protected CacheSearchRow keySearchRow(int cacheId, int hash, long link) { + return dataRow(cacheId, hash, link, CacheDataRowAdapter.RowData.KEY_ONLY); } /** @@ -97,7 +88,7 @@ CacheSearchRow keySearchRow(int cacheId, int hash, long link) { * @param opCntr Mvcc operation counter. * @return Search row. */ - MvccDataRow mvccRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData, long crdVer, long mvccCntr, int opCntr) { + protected MvccDataRow mvccRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData, long crdVer, long mvccCntr, int opCntr) { MvccDataRow row = new MvccDataRow( grp, hash, @@ -120,7 +111,7 @@ MvccDataRow mvccRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowDat * @param rowData Required row data. * @return Data row. */ - CacheDataRow dataRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData) { + protected CacheDataRow dataRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData) { DataRow dataRow = new DataRow( grp, hash, diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java index 9eb3b7cea8921..10e1db155abb5 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java @@ -24,7 +24,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java index 12774b464e13a..82dfb71fc7e64 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java @@ -339,7 +339,7 @@ private void generateWal( long writeStart = System.nanoTime(); - storeMgr.write(cacheId, pageId, buf, tag); + storeMgr.write(cacheId, pageId, buf, tag, true); long writeEnd = System.nanoTime(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java index 95e24ccb9d6e2..300ce4e3d1a3e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java @@ -44,7 +44,6 @@ import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore; import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.internal.util.typedef.internal.U; @@ -199,8 +198,7 @@ public void testFilePageStoreInterruptThreads() throws Exception { DataStorageConfiguration dbCfg = getDataStorageConfiguration(); - FilePageStore pageStore = new FilePageStore(PageMemory.FLAG_DATA, () -> file.toPath(), factory, dbCfg, - new LongAdderMetric("NO_OP", null)); + FilePageStore pageStore = new FilePageStore(PageMemory.FLAG_DATA, file::toPath, factory, dbCfg, val -> {}); int pageSize = dbCfg.getPageSize(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java index 04a0cf1cc3861..f829b90d69c8d 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java @@ -1016,7 +1016,7 @@ private IgniteBiTuple, WALPointer> runCheckpointing( long writeStart = System.nanoTime(); - storeMgr.write(cacheId, fullId.pageId(), tmpBuf, tag); + storeMgr.write(cacheId, fullId.pageId(), tmpBuf, tag, true); long writeEnd = System.nanoTime(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java index 8f985cd3bdb05..3e7328150caa6 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java @@ -108,6 +108,7 @@ public class BPlusTreePageMemoryImplTest extends BPlusTreeSelfTest { PageMemory mem = new PageMemoryImpl( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java index 1e67469a1f0a8..456c9d6016074 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java @@ -107,6 +107,7 @@ public class BPlusTreeReuseListPageMemoryImplTest extends BPlusTreeReuseSelfTest PageMemory mem = new PageMemoryImpl( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement (rotation with disk) should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java index a8f9ee2d002f9..1f6d5ce0526be 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java @@ -275,7 +275,7 @@ private PageMemoryImpl createPageMemory(IgniteConfiguration cfg, PageStoreWriter } }; - PageMemoryImpl memory = new PageMemoryImpl(provider, sizes, sctx, pageSize, + PageMemoryImpl memory = new PageMemoryImpl(provider, sizes, sctx, sctx.pageStore(), pageSize, pageWriter, null, () -> true, memMetrics, PageMemoryImpl.ThrottlingPolicy.DISABLED, clo); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java index 67aa12b58e9c4..19e67b47c9b8f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java @@ -123,6 +123,7 @@ public class IndexStoragePageMemoryImplTest extends IndexStorageSelfTest { return new PageMemoryImpl( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement (rotation with disk) should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java index 3c7a2ede4a7f0..c5cc3728a9b94 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java @@ -23,6 +23,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.LongConsumer; import java.util.function.Predicate; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; @@ -30,11 +31,11 @@ import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.StoredCacheData; -import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteFuture; @@ -57,7 +58,7 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { /** {@inheritDoc} */ @Override public void initialize(int cacheId, int partitions, String workingDir, - LongAdderMetric tracker) throws IgniteCheckedException { + LongConsumer tracker) throws IgniteCheckedException { // No-op. } @@ -88,8 +89,8 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { } /** {@inheritDoc} */ - @Override public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException { - + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { + // No-op. } /** {@inheritDoc} */ @@ -103,8 +104,9 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { } /** {@inheritDoc} */ - @Override public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { // No-op. + return null; } /** {@inheritDoc} */ @@ -150,11 +152,6 @@ public class NoOpPageStoreManager implements IgnitePageStoreManager { return allocator.get(); } - /** {@inheritDoc} */ - @Override public long metaPageId(int grpId) { - return 1; - } - /** {@inheritDoc} */ @Override public void start(GridCacheSharedContext cctx) throws IgniteCheckedException { // No-op. diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java index 9f74ec498b2eb..51e29fff39ec0 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java @@ -114,6 +114,7 @@ public class PageMemoryImplNoLoadTest extends PageMemoryNoLoadSelfTest { provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, (fullPageId, byteBuf, tag) -> { assert false : "No page replacement (rotation with disk) should happen during the test"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java index 9ea27f9965376..1632e733db8ec 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java @@ -43,6 +43,7 @@ import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; @@ -327,7 +328,7 @@ private void doCheckpoint( PageStoreWriter pageStoreWriter = (fullPageId, buf, tag) -> { assertNotNull(tag); - pageStoreMgr.write(fullPageId.groupId(), fullPageId.pageId(), buf, 1); + pageStoreMgr.write(fullPageId.groupId(), fullPageId.pageId(), buf, 1, false); }; for (FullPageId cpPage : cpPages) { @@ -642,6 +643,7 @@ private PageMemoryImpl createPageMemory( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, replaceWriter, new GridInClosure3X() { @@ -661,6 +663,7 @@ private PageMemoryImpl createPageMemory( provider, sizes, sharedCtx, + sharedCtx.pageStore(), PAGE_SIZE, replaceWriter, new GridInClosure3X() { @@ -701,7 +704,7 @@ private static class TestPageStoreManager extends NoOpPageStoreManager implement private Map storedPages = new HashMap<>(); /** {@inheritDoc} */ - @Override public void read(int grpId, long pageId, ByteBuffer pageBuf) throws IgniteCheckedException { + @Override public void read(int grpId, long pageId, ByteBuffer pageBuf, boolean keepCrc) throws IgniteCheckedException { FullPageId fullPageId = new FullPageId(pageId, grpId); byte[] bytes = storedPages.get(fullPageId); @@ -713,12 +716,14 @@ private static class TestPageStoreManager extends NoOpPageStoreManager implement } /** {@inheritDoc} */ - @Override public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { + @Override public PageStore write(int grpId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { byte[] data = new byte[PAGE_SIZE]; pageBuf.get(data); storedPages.put(new FullPageId(pageId, grpId), data); + + return null; } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java index 23e9af7d2ee8c..4d9166059041f 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DurableBackgroundCleanupIndexTreeTask.java @@ -28,6 +28,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.persistence.metastorage.pendingtask.DurableBackgroundTask; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.query.h2.database.H2Tree; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; @@ -141,7 +142,8 @@ public DurableBackgroundCleanupIndexTreeTask( null, stats, null, - 0 + 0, + PageIoResolver.DEFAULT_PAGE_IO_RESOLVER ); trees0.add(tree); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index de64068f43825..7a31059a1d82f 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -479,7 +479,6 @@ GridH2IndexBase createSortedIndex(String name, GridH2Table tbl, boolean pk, bool wrappedCols, inlineSize, segments, - qryCtxRegistry, log ); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java index 66badc0a8ffe9..9c6ca13236d0b 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java @@ -41,6 +41,7 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.CorruptedTreeException; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow; import org.apache.ignite.internal.processors.failure.FailureProcessor; @@ -208,7 +209,8 @@ public H2Tree( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { super( name, @@ -220,7 +222,8 @@ public H2Tree( metaPageId, reuseList, failureProcessor, - null + null, + pageIoRslvr ); this.cctx = cctx; @@ -296,7 +299,7 @@ public H2Tree( * * @return Indexed columns. */ - IndexColumn[] cols() { + public IndexColumn[] cols() { return cols; } @@ -308,11 +311,15 @@ IndexColumn[] cols() { * @throws IgniteCheckedException if failed. */ public H2Row createRow(long link) throws IgniteCheckedException { + return createRow(link, true); + } + + public H2Row createRow(long link, boolean follow) throws IgniteCheckedException { if (rowCache != null) { H2CacheRow row = rowCache.get(link); if (row == null) { - row = createRow0(link); + row = createRow0(link, follow); rowCache.put(row); } @@ -320,7 +327,7 @@ public H2Row createRow(long link) throws IgniteCheckedException { return row; } else - return createRow0(link); + return createRow0(link, follow); } /** @@ -332,14 +339,16 @@ public H2Row createRow(long link) throws IgniteCheckedException { * @return Row. * @throws IgniteCheckedException If failed. */ - private H2CacheRow createRow0(long link) throws IgniteCheckedException { + private H2CacheRow createRow0(long link, boolean follow) throws IgniteCheckedException { CacheDataRowAdapter row = new CacheDataRowAdapter(link); - row.initFromLink( - cctx.group(), - CacheDataRowAdapter.RowData.FULL, - true - ); + if (follow) { + row.initFromLink( + cctx.group(), + CacheDataRowAdapter.RowData.FULL, + true + ); + } return table.rowDescriptor().createRow(row); } @@ -352,12 +361,35 @@ private H2CacheRow createRow0(long link) throws IgniteCheckedException { * @return Row. * @throws IgniteCheckedException if failed. */ - public H2Row createMvccRow(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException { + public H2Row createMvccRow( + long link, + long mvccCrdVer, + long mvccCntr, + int mvccOpCntr + ) throws IgniteCheckedException { + return createMvccRow(link, mvccCrdVer, mvccCntr, mvccOpCntr, null); + } + + /** + * Create row from link. + * + * @param link Link. + * @param mvccOpCntr MVCC operation counter. + * @return Row. + * @throws IgniteCheckedException if failed. + */ + public H2Row createMvccRow( + long link, + long mvccCrdVer, + long mvccCntr, + int mvccOpCntr, + CacheDataRowAdapter.RowData rowData + ) throws IgniteCheckedException { if (rowCache != null) { H2CacheRow row = rowCache.get(link); if (row == null) { - row = createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr); + row = createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr, rowData); rowCache.put(row); } @@ -365,7 +397,15 @@ public H2Row createMvccRow(long link, long mvccCrdVer, long mvccCntr, int mvccOp return row; } else - return createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr); + return createMvccRow0(link, mvccCrdVer, mvccCntr, mvccOpCntr, rowData); + } + + public boolean getPk() { + return pk; + } + + public boolean getAffinityKey() { + return affinityKey; } /** @@ -375,8 +415,8 @@ public H2Row createMvccRow(long link, long mvccCrdVer, long mvccCntr, int mvccOp * @param mvccOpCntr Mvcc operation counter. * @return Row. */ - private H2CacheRow createMvccRow0(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr) - throws IgniteCheckedException { + private H2CacheRow createMvccRow0(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr, CacheDataRowAdapter.RowData rowData) + throws IgniteCheckedException { int partId = PageIdUtils.partId(PageIdUtils.pageId(link)); MvccDataRow row = new MvccDataRow( @@ -384,7 +424,7 @@ private H2CacheRow createMvccRow0(long link, long mvccCrdVer, long mvccCntr, int 0, link, partId, - null, + rowData, mvccCrdVer, mvccCntr, mvccOpCntr, diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java index 339ee7009a21f..1b7ffe7046815 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java @@ -41,11 +41,13 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager; import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.RootPage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.pendingtask.DurableBackgroundTask; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; @@ -240,7 +242,6 @@ private H2TreeIndex( * @param wrappedCols Index columns as is. * @param inlineSize Inline size. * @param segmentsCnt Count of tree segments. - * @param qryCtxRegistry Query context registry. * @throws IgniteCheckedException If failed. */ public static H2TreeIndex createIndex( @@ -254,7 +255,31 @@ public static H2TreeIndex createIndex( List wrappedCols, int inlineSize, int segmentsCnt, - QueryContextRegistry qryCtxRegistry, + IgniteLogger log + ) throws IgniteCheckedException { + return createIndex(cctx, rowCache, tbl, idxName, pk, affinityKey, unwrappedCols, wrappedCols, inlineSize, + segmentsCnt, cctx.dataRegion().pageMemory(), + cctx.offheap(), + PageIoResolver.DEFAULT_PAGE_IO_RESOLVER, + log + ); + } + + /** */ + public static H2TreeIndex createIndex( + GridCacheContext cctx, + @Nullable H2RowCache rowCache, + GridH2Table tbl, + String idxName, + boolean pk, + boolean affinityKey, + List unwrappedCols, + List wrappedCols, + int inlineSize, + int segmentsCnt, + PageMemory pageMemory, + IgniteCacheOffheapManager offheap, + PageIoResolver pageIoRslvr, IgniteLogger log ) throws IgniteCheckedException { assert segmentsCnt > 0 : segmentsCnt; @@ -274,10 +299,10 @@ public static H2TreeIndex createIndex( AtomicInteger maxCalculatedInlineSize = new AtomicInteger(); IoStatisticsHolderIndex stats = new IoStatisticsHolderIndex( - SORTED_INDEX, - cctx.name(), - idxName, - cctx.kernalContext().metric() + SORTED_INDEX, + cctx.name(), + idxName, + cctx.kernalContext().metric() ); InlineIndexColumnFactory idxHelperFactory = new InlineIndexColumnFactory(tbl.getCompareMode()); @@ -286,7 +311,7 @@ public static H2TreeIndex createIndex( db.checkpointReadLock(); try { - RootPage page = getMetaPage(cctx, treeName, i); + RootPage page = getMetaPage(offheap, cctx, treeName, i); segments[i] = h2TreeFactory.create( cctx, @@ -295,12 +320,12 @@ public static H2TreeIndex createIndex( idxName, tbl.getName(), tbl.cacheName(), - cctx.offheap().reuseListForIndex(treeName), + offheap.reuseListForIndex(treeName), cctx.groupId(), cctx.group().name(), - cctx.dataRegion().pageMemory(), + pageMemory, cctx.shared().wal(), - cctx.offheap().globalRemoveId(), + offheap.globalRemoveId(), page.pageId().pageId(), page.isAllocated(), unwrappedCols, @@ -314,7 +339,8 @@ public static H2TreeIndex createIndex( log, stats, idxHelperFactory, - inlineSize + inlineSize, + pageIoRslvr ); } finally { @@ -593,7 +619,7 @@ private static boolean isExpired(@NotNull H2Row row) { * @param segment Segment Id. * @return Snapshot for requested segment if there is one. */ - private H2Tree treeForRead(int segment) { + public H2Tree treeForRead(int segment) { return segments[segment]; } @@ -627,9 +653,9 @@ private BPlusTree.TreeRowClosure filter(QueryContext qctx) { * @return RootPage for meta page. * @throws IgniteCheckedException If failed. */ - private static RootPage getMetaPage(GridCacheContext cctx, String treeName, int segIdx) + private static RootPage getMetaPage(IgniteCacheOffheapManager offheap, GridCacheContext cctx, String treeName, int segIdx) throws IgniteCheckedException { - return cctx.offheap().rootPageForIndex(cctx.cacheId(), treeName, segIdx); + return offheap.rootPageForIndex(cctx.cacheId(), treeName, segIdx); } /** @@ -1018,7 +1044,8 @@ public H2Tree create( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java index 313c5421c5ce5..7380c53265804 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/AbstractInlineIndexColumn.java @@ -100,7 +100,7 @@ protected AbstractInlineIndexColumn(Column col, int type, short size) { * * @return Restored value or {@code null} if value can't be restored. */ - @Nullable Value get(long pageAddr, int off, int maxSize) { + @Nullable public Value get(long pageAddr, int off, int maxSize) { if (size > 0 && size + 1 > maxSize) return null; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java index 7226a5a50eeed..b1464365fd4af 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/inlinecolumn/ObjectHashInlineIndexColumn.java @@ -17,8 +17,11 @@ package org.apache.ignite.internal.processors.query.h2.database.inlinecolumn; +import java.sql.PreparedStatement; +import java.sql.SQLException; import org.apache.ignite.internal.pagemem.PageUtils; import org.h2.table.Column; +import org.h2.value.CompareMode; import org.h2.value.Value; import org.h2.value.ValueInt; @@ -60,7 +63,9 @@ public ObjectHashInlineIndexColumn(Column col) { /** {@inheritDoc} */ @Override protected Value get0(long pageAddr, int off) { - return null; + int hashCode = PageUtils.getInt(pageAddr, off + 1); + + return new ValueObjectHashCode(hashCode); } /** @@ -80,4 +85,91 @@ ValueInt inlinedValue(long pageAddr, int off) { return size() + 1; } + + /** + * Value for object with hashcode. + */ + private static class ValueObjectHashCode extends Value { + /** + * The precision in digits. + */ + public static final int PRECISION = 10; + + /** + * The maximum display size of an int. + * Example: -2147483648 + */ + public static final int DISPLAY_SIZE = 11; + + /** + * Hashcode of object. + */ + private final int value; + + public ValueObjectHashCode(int value) { + this.value = value; + } + + /** {@inheritDoc} */ + @Override public String getSQL() { + return getString(); + } + + /** {@inheritDoc} */ + @Override public int getType() { + return Value.JAVA_OBJECT; + } + + /** {@inheritDoc} */ + @Override public int getInt() { + return value; + } + + /** {@inheritDoc} */ + @Override public long getLong() { + return value; + } + + /** {@inheritDoc} */ + @Override protected int compareSecure(Value o, CompareMode mode) { + ValueObjectHashCode v = (ValueObjectHashCode) o; + return Integer.compare(value, v.value); + } + + /** {@inheritDoc} */ + @Override public String getString() { + return String.valueOf(value); + } + + /** {@inheritDoc} */ + @Override public long getPrecision() { + return PRECISION; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return value; + } + + /** {@inheritDoc} */ + @Override public Object getObject() { + return value; + } + + /** {@inheritDoc} */ + @Override public void set(PreparedStatement prep, int parameterIndex) + throws SQLException { + prep.setInt(parameterIndex, value); + } + + /** {@inheritDoc} */ + @Override public int getDisplaySize() { + return DISPLAY_SIZE; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object other) { + return other instanceof ValueObjectHashCode && value == ((ValueObjectHashCode) other).value; + } + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java index f62952f489131..efb942c29312b 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java @@ -161,4 +161,9 @@ private static IOVersions getVersions(short t @Override public final long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx) + payloadSize); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return payloadSize; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java index 4bddaeec20dbb..6e4236c23fd26 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java @@ -64,7 +64,6 @@ private static void register(boolean mvcc) { * @param mvccEnabled Mvcc flag. * @return IOVersions for given payload. */ - @SuppressWarnings("unchecked") public static IOVersions> getVersions(int payload, boolean mvccEnabled) { assert payload >= 0 && payload <= PageIO.MAX_PAYLOAD_SIZE; @@ -140,7 +139,7 @@ private static IOVersions getVersions(short ty } /** {@inheritDoc} */ - @Override public final H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { long link = getLink(pageAddr, idx); @@ -159,4 +158,9 @@ private static IOVersions getVersions(short ty @Override public final long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx) + payloadSize); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return payloadSize; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java index a782ffb9ad83a..76cccf31f4364 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java @@ -71,4 +71,9 @@ public abstract class AbstractH2InnerIO extends BPlusInnerIO implements H @Override public long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx)); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return 0; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java index ccacb4ea0dc0c..058b5bc92e1c9 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java @@ -54,7 +54,7 @@ public abstract class AbstractH2LeafIO extends BPlusLeafIO implements H2R } /** {@inheritDoc} */ - @Override public final H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { long link = getLink(pageAddr, idx); @@ -73,4 +73,9 @@ public abstract class AbstractH2LeafIO extends BPlusLeafIO implements H2R @Override public long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx)); } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return 0; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java index 085f98bd9817e..3c79df79fc6f9 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java @@ -26,7 +26,7 @@ public class H2ExtrasLeafIO extends AbstractH2ExtrasLeafIO { * @param ver Page format version. * @param payloadSize Payload size. */ - H2ExtrasLeafIO(short type, int ver, int payloadSize) { + public H2ExtrasLeafIO(short type, int ver, int payloadSize) { super(type, ver, 8, payloadSize); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java index 8954de08e3175..466cd1c2886d9 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java @@ -31,7 +31,7 @@ public class H2LeafIO extends AbstractH2LeafIO { /** * @param ver Page format version. */ - private H2LeafIO(int ver) { + public H2LeafIO(int ver) { super(T_H2_REF_LEAF, ver, 8); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java index 60a15989b9379..75854711087f6 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java @@ -22,13 +22,13 @@ /** * Leaf page for H2 row references. */ -class H2MvccExtrasLeafIO extends AbstractH2ExtrasLeafIO { +public class H2MvccExtrasLeafIO extends AbstractH2ExtrasLeafIO { /** * @param type Page type. * @param ver Page format version. * @param payloadSize Payload size. */ - H2MvccExtrasLeafIO(short type, int ver, int payloadSize) { + protected H2MvccExtrasLeafIO(short type, int ver, int payloadSize) { super(type, ver, 28, payloadSize); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java index c7cd99823852c..5575806d22971 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java @@ -32,7 +32,7 @@ public class H2MvccLeafIO extends AbstractH2LeafIO { /** * @param ver Page format version. */ - private H2MvccLeafIO(int ver) { + protected H2MvccLeafIO(int ver) { super(T_H2_MVCC_REF_LEAF, ver, 28); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java index 1942069ab2408..55a36d6172dd3 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java @@ -61,4 +61,9 @@ public default int getMvccOperationCounter(long pageAddr, int idx) { public default boolean storeMvccInfo() { return false; } + + /** + * @return Size of reserved data array for data inlining. + */ + public int getPayloadSize(); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java index 527a33ec81409..86df146cdc29c 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/H2CacheRow.java @@ -182,6 +182,13 @@ private Value wrap(Object val, int type) { } } + /** + * @return Cache data row. + */ + public CacheDataRow getRow() { + return row; + } + /** * @return {@code True} if this is removed row (doesn't have value). */ @@ -345,4 +352,11 @@ private boolean removedRow() { return sb.toString(); } + + /** + * @return H2 row descriptor. + */ + public GridH2RowDescriptor getDesc() { + return desc; + } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java index ca96684056527..1f03ef4d4cc4e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/LongDestroyDurableBackgroundTaskTest.java @@ -60,6 +60,7 @@ import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadOnlyMetastorage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadWriteMetastorage; import org.apache.ignite.internal.processors.cache.persistence.metastorage.pendingtask.DurableBackgroundTask; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.failure.FailureProcessor; @@ -758,7 +759,8 @@ public H2TreeTest( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { super( cctx, @@ -786,7 +788,8 @@ public H2TreeTest( log, stats, factory, - configuredInlineSize + configuredInlineSize, + pageIoRslvr ); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java index 9e0f93a1a6995..ed90b66507c41 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/MultipleParallelCacheDeleteDeadlockTest.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.failure.FailureProcessor; @@ -266,7 +267,8 @@ public H2TreeTest( IgniteLogger log, IoStatisticsHolder stats, InlineIndexColumnFactory factory, - int configuredInlineSize + int configuredInlineSize, + PageIoResolver pageIoRslvr ) throws IgniteCheckedException { super( cctx, @@ -294,7 +296,8 @@ public H2TreeTest( log, stats, factory, - configuredInlineSize + configuredInlineSize, + pageIoRslvr ); } From 2d582586e1402fb9174f2fdad8ed4d677b3856d2 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Thu, 12 Nov 2020 20:37:23 +0300 Subject: [PATCH 042/110] IGNITE-13637: Implemented column-wise binding for result rows This closes #8403 --- .../platforms/cpp/odbc-test/CMakeLists.txt | 1 + .../cpp/odbc-test/include/odbc_test_suite.h | 179 ++++++++++- .../odbc-test/project/vs/odbc-test.vcxproj | 3 +- .../project/vs/odbc-test.vcxproj.filters | 5 +- .../cpp/odbc-test/src/api_robustness_test.cpp | 2 +- .../cpp/odbc-test/src/cursor_binding_test.cpp | 288 ++++++++++++++++++ .../cpp/odbc-test/src/odbc_test_suite.cpp | 214 +++++++++++-- .../cpp/odbc-test/src/queries_test.cpp | 8 +- .../cpp/odbc-test/src/streaming_test.cpp | 4 +- .../cpp/odbc/include/ignite/odbc/statement.h | 25 +- .../odbc/src/app/application_data_buffer.cpp | 22 +- modules/platforms/cpp/odbc/src/statement.cpp | 111 +++++-- 12 files changed, 781 insertions(+), 81 deletions(-) create mode 100644 modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp diff --git a/modules/platforms/cpp/odbc-test/CMakeLists.txt b/modules/platforms/cpp/odbc-test/CMakeLists.txt index d9cefb68efe9d..3a08f42035dc4 100644 --- a/modules/platforms/cpp/odbc-test/CMakeLists.txt +++ b/modules/platforms/cpp/odbc-test/CMakeLists.txt @@ -65,6 +65,7 @@ set(SOURCES src/teamcity/teamcity_boost.cpp src/authentication_test.cpp src/sql_parsing_test.cpp src/streaming_test.cpp + src/cursor_binding_test.cpp src/test_server.cpp ../odbc/src/log.cpp ../odbc/src/cursor.cpp diff --git a/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h b/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h index 89133a98f1b83..1bdd3b077436d 100644 --- a/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h +++ b/modules/platforms/cpp/odbc-test/include/odbc_test_suite.h @@ -25,6 +25,16 @@ #include #include +#include + +#ifndef BOOST_TEST_CONTEXT +# define BOOST_TEST_CONTEXT(...) +#endif + +#ifndef BOOST_TEST_INFO +# define BOOST_TEST_INFO(...) +#endif + #include #include "ignite/ignite.h" @@ -97,7 +107,7 @@ namespace ignite /** * Insert requested number of TestType values with all defaults except - * for the strFields, which are generated using getTestString(). + * for the strFields, which are generated using GetTestString(). * * @param recordsNum Number of records to insert. * @param merge Set to true to use merge instead. @@ -130,13 +140,176 @@ namespace ignite */ void InsertNonFullBatchSelect(int recordsNum, int splitAt); + /** + * Get test i8Field. + * + * @param idx Index. + * @return Corresponding i8Field value. + */ + static int8_t GetTestI8Field(int64_t idx); + + /** + * Check i8Field test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestI8Value(int idx, int8_t value); + + /** + * Get test i16Field. + * + * @param idx Index. + * @return Corresponding i16Field value. + */ + static int16_t GetTestI16Field(int64_t idx); + + /** + * Check i16Field test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestI16Value(int idx, int16_t value); + + /** + * Get test i32Field. + * + * @param idx Index. + * @return Corresponding i32Field value. + */ + static int32_t GetTestI32Field(int64_t idx); + + /** + * Check i32Field test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestI32Value(int idx, int32_t value); + /** * Get test string. * - * @param ind Index. + * @param idx Index. * @return Corresponding test string. */ - static std::string getTestString(int64_t ind); + static std::string GetTestString(int64_t idx); + + /** + * Check strField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestStringValue(int idx, const std::string& value); + + /** + * Get test floatField. + * + * @param idx Index. + * @return Corresponding floatField value. + */ + static float GetTestFloatField(int64_t idx); + + /** + * Check floatField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestFloatValue(int idx, float value); + + /** + * Get test doubleField. + * + * @param idx Index. + * @return Corresponding doubleField value. + */ + static double GetTestDoubleField(int64_t idx); + + /** + * Check doubleField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestDoubleValue(int idx, double value); + + /** + * Get test boolField. + * + * @param idx Index. + * @return Corresponding boolField value. + */ + static bool GetTestBoolField(int64_t idx); + + /** + * Check boolField test value. + * @param idx Index. + * @param value Value to test. + */ + static void CheckTestBoolValue(int idx, bool value); + + /** + * Get test dateField. + * + * @param idx Index. + * @param val Output value. + */ + static void GetTestDateField(int64_t idx, SQL_DATE_STRUCT& val); + + /** + * Check dateField test value. + * + * @param idx Index. + * @param val Value to test. + */ + static void CheckTestDateValue(int idx, const SQL_DATE_STRUCT& val); + + /** + * Get test timeField. + * + * @param idx Index. + * @param val Output value. + */ + static void GetTestTimeField(int64_t idx, SQL_TIME_STRUCT& val); + + /** + * Check timeField test value. + * + * @param idx Index. + * @param val Value to test. + */ + static void CheckTestTimeValue(int idx, const SQL_TIME_STRUCT& val); + + /** + * Get test timestampField. + * + * @param idx Index. + * @param val Output value. + */ + static void GetTestTimestampField(int64_t idx, SQL_TIMESTAMP_STRUCT& val); + + /** + * Check timestampField test value. + * + * @param idx Index. + * @param val Value to test. + */ + static void CheckTestTimestampValue(int idx, const SQL_TIMESTAMP_STRUCT& val); + + /** + * Get test i8ArrayField. + * + * @param idx Index. + * @param val Output value. + * @param valLen Value length. + */ + static void GetTestI8ArrayField(int64_t idx, int8_t* val, size_t valLen); + + /** + * Check i8ArrayField test value. + * + * @param idx Index. + * @param val Value to test. + * @param valLen Value length. + */ + static void CheckTestI8ArrayValue(int idx, const int8_t* val, size_t valLen); /** * Check that SQL error has expected SQL state. diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj index 90abdc82a7404..cfc5ac1687d13 100644 --- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj +++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj @@ -185,6 +185,7 @@ + @@ -252,4 +253,4 @@ - \ No newline at end of file + diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters index 68c78cdcfc92f..5d6e787836902 100644 --- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters +++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters @@ -37,6 +37,9 @@ Code + + Code + Code @@ -246,4 +249,4 @@ Configs - \ No newline at end of file + diff --git a/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp b/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp index 9148ba94fde14..8fcb9659abd6c 100644 --- a/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp @@ -113,7 +113,7 @@ struct ApiRobustnessTestSuiteFixture : public odbc::OdbcTestSuite // Operation is not supported. However, there should be no crash. BOOST_CHECK(ret == SQL_ERROR); - CheckSQLStatementDiagnosticError("HY106"); + CheckSQLStatementDiagnosticError("HYC00"); } /** diff --git a/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp b/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp new file mode 100644 index 0000000000000..5d095014a9572 --- /dev/null +++ b/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef _WIN32 +# include +#endif + +#include +#include + +#include + +#include +#include + +#include + +#include "ignite/ignite.h" +#include "ignite/ignition.h" +#include "ignite/impl/binary/binary_utils.h" + +#include "test_type.h" +#include "test_utils.h" +#include "odbc_test_suite.h" + +using namespace ignite; +using namespace ignite::cache; +using namespace ignite::cache::query; +using namespace ignite::common; +using namespace ignite_test; + +using namespace boost::unit_test; + +using ignite::impl::binary::BinaryUtils; + +/** + * Test setup fixture. + */ +struct CursorBindingTestSuiteFixture : public odbc::OdbcTestSuite +{ + static Ignite StartAdditionalNode(const char* name) + { + return StartPlatformNode("queries-test.xml", name); + } + + /** + * Constructor. + */ + CursorBindingTestSuiteFixture() : + testCache(0) + { + grid = StartAdditionalNode("NodeMain"); + + testCache = grid.GetCache("cache"); + } + + /** + * Destructor. + */ + virtual ~CursorBindingTestSuiteFixture() + { + // No-op. + } + + /** Node started during the test. */ + Ignite grid; + + /** Test cache instance. */ + Cache testCache; +}; + +BOOST_FIXTURE_TEST_SUITE(CursorBindingTestSuite, CursorBindingTestSuiteFixture) + + +#define CHECK_TEST_VALUES(idx, testIdx) \ + do { \ + BOOST_TEST_CONTEXT("Test idx: " << testIdx) \ + { \ + BOOST_CHECK(RowStatus[idx] == SQL_ROW_SUCCESS || RowStatus[idx] == SQL_ROW_SUCCESS_WITH_INFO); \ + \ + BOOST_CHECK(i8FieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(i16FieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(i32FieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(strFieldsLen[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(floatFields[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(doubleFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(boolFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(dateFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(timeFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(timestampFieldsInd[idx] != SQL_NULL_DATA); \ + BOOST_CHECK(i8ArrayFieldsLen[idx] != SQL_NULL_DATA); \ + \ + int8_t i8Field = static_cast(i8Fields[idx]); \ + int16_t i16Field = static_cast(i16Fields[idx]); \ + int32_t i32Field = static_cast(i32Fields[idx]); \ + std::string strField(reinterpret_cast(&strFields[idx][0]), \ + static_cast(strFieldsLen[idx])); \ + float floatField = static_cast(floatFields[idx]); \ + double doubleField = static_cast(doubleFields[idx]); \ + bool boolField = boolFields[idx] != 0; \ + \ + CheckTestI8Value(testIdx, i8Field); \ + CheckTestI16Value(testIdx, i16Field); \ + CheckTestI32Value(testIdx, i32Field); \ + CheckTestStringValue(testIdx, strField); \ + CheckTestFloatValue(testIdx, floatField); \ + CheckTestDoubleValue(testIdx, doubleField); \ + CheckTestBoolValue(testIdx, boolField); \ + CheckTestDateValue(testIdx, dateFields[idx]); \ + CheckTestTimeValue(testIdx, timeFields[idx]); \ + CheckTestTimestampValue(testIdx, timestampFields[idx]); \ + CheckTestI8ArrayValue(testIdx, reinterpret_cast(i8ArrayFields[idx]), \ + static_cast(i8ArrayFieldsLen[idx])); \ + } \ + } while (false) + +BOOST_AUTO_TEST_CASE(TestCursorBindingColumnWise) +{ + enum { ROWS_COUNT = 15 }; + enum { ROW_ARRAY_SIZE = 10 }; + enum { BUFFER_SIZE = 1024 }; + + StartAdditionalNode("Node2"); + + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=8"); + + // Preloading data. + + InsertTestBatch(0, ROWS_COUNT, ROWS_COUNT); + + // Setting attributes. + + SQLUSMALLINT RowStatus[ROW_ARRAY_SIZE]; + SQLUINTEGER NumRowsFetched; + + SQLRETURN ret; + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_BIND_TYPE, SQL_BIND_BY_COLUMN, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_ARRAY_SIZE, reinterpret_cast(ROW_ARRAY_SIZE), 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_STATUS_PTR, RowStatus, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROWS_FETCHED_PTR, &NumRowsFetched, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + // Binding collumns. + + SQLSCHAR i8Fields[ROW_ARRAY_SIZE] = {0}; + SQLLEN i8FieldsInd[ROW_ARRAY_SIZE]; + + SQLSMALLINT i16Fields[ROW_ARRAY_SIZE] = {0}; + SQLLEN i16FieldsInd[ROW_ARRAY_SIZE]; + + SQLINTEGER i32Fields[ROW_ARRAY_SIZE] = {0}; + SQLLEN i32FieldsInd[ROW_ARRAY_SIZE]; + + SQLCHAR strFields[ROW_ARRAY_SIZE][BUFFER_SIZE]; + SQLLEN strFieldsLen[ROW_ARRAY_SIZE]; + + SQLREAL floatFields[ROW_ARRAY_SIZE]; + SQLLEN floatFieldsInd[ROW_ARRAY_SIZE]; + + SQLDOUBLE doubleFields[ROW_ARRAY_SIZE]; + SQLLEN doubleFieldsInd[ROW_ARRAY_SIZE]; + + SQLCHAR boolFields[ROW_ARRAY_SIZE]; + SQLLEN boolFieldsInd[ROW_ARRAY_SIZE]; + + SQL_DATE_STRUCT dateFields[ROW_ARRAY_SIZE]; + SQLLEN dateFieldsInd[ROW_ARRAY_SIZE]; + + SQL_TIME_STRUCT timeFields[ROW_ARRAY_SIZE]; + SQLLEN timeFieldsInd[ROW_ARRAY_SIZE]; + + SQL_TIMESTAMP_STRUCT timestampFields[ROW_ARRAY_SIZE]; + SQLLEN timestampFieldsInd[ROW_ARRAY_SIZE]; + + SQLCHAR i8ArrayFields[ROW_ARRAY_SIZE][BUFFER_SIZE]; + SQLLEN i8ArrayFieldsLen[ROW_ARRAY_SIZE]; + + ret = SQLBindCol(stmt, 1, SQL_C_STINYINT, i8Fields, 0, i8FieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 2, SQL_C_SSHORT, i16Fields, 0, i16FieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 3, SQL_C_LONG, i32Fields, 0, i32FieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 4, SQL_C_CHAR, strFields, BUFFER_SIZE, strFieldsLen); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 5, SQL_C_FLOAT, floatFields, 0, floatFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 6, SQL_C_DOUBLE, doubleFields, 0, doubleFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 7, SQL_C_BIT, boolFields, 0, boolFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 8, SQL_C_DATE, dateFields, 0, dateFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 9, SQL_C_TIME, timeFields, 0, timeFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 10, SQL_C_TIMESTAMP, timestampFields, 0, timestampFieldsInd); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLBindCol(stmt, 11, SQL_C_BINARY, i8ArrayFields, BUFFER_SIZE, i8ArrayFieldsLen); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + SQLCHAR sql[] = "SELECT " + "i8Field, i16Field, i32Field, strField, floatField, doubleField, " + "boolField, dateField, timeField, timestampField, i8ArrayField " + "FROM TestType " + "ORDER BY _key"; + + // Execute a statement to retrieve rows from the Orders table. + ret = SQLExecDirect(stmt, sql, SQL_NTS); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFetchScroll(stmt, SQL_FETCH_NEXT, 0); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(NumRowsFetched, (SQLUINTEGER)ROW_ARRAY_SIZE); + + for (int64_t i = 0; i < NumRowsFetched; i++) + { + CHECK_TEST_VALUES(i, static_cast(i)); + } + + ret = SQLFetch(stmt); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(NumRowsFetched, ROWS_COUNT - ROW_ARRAY_SIZE); + + for (int64_t i = 0; i < NumRowsFetched; i++) + { + int64_t testIdx = i + ROW_ARRAY_SIZE; + CHECK_TEST_VALUES(i, static_cast(testIdx)); + } + + for (int64_t i = NumRowsFetched; i < ROW_ARRAY_SIZE; i++) + { + BOOST_TEST_INFO("Checking row status for row: " << i); + BOOST_CHECK(RowStatus[i] == SQL_ROW_NOROW); + } + + ret = SQLFetchScroll(stmt, SQL_FETCH_NEXT, 0); + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + + // Close the cursor. + ret = SQLCloseCursor(stmt); + ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); +} + +BOOST_AUTO_TEST_CASE(TestCursorBindingRowWise) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=8"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_BIND_TYPE, reinterpret_cast(42), 0); + + BOOST_CHECK_EQUAL(ret, SQL_ERROR); + + CheckSQLStatementDiagnosticError("HYC00"); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp index 4ebec9f55598f..28affb7b97d2b 100644 --- a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp +++ b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp @@ -95,9 +95,7 @@ namespace ignite outstr, sizeof(outstr), &outstrlen, SQL_DRIVER_COMPLETE); if (!SQL_SUCCEEDED(ret)) - { BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_DBC, dbc)); - } // Allocate a statement handle SQLAllocHandle(SQL_HANDLE_STMT, dbc, &stmt); @@ -184,15 +182,182 @@ namespace ignite Ignition::StopAll(true); } - std::string OdbcTestSuite::getTestString(int64_t ind) + int8_t OdbcTestSuite::GetTestI8Field(int64_t idx) + { + return static_cast(idx * 8); + } + + void OdbcTestSuite::CheckTestI8Value(int idx, int8_t value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestI8Field(idx)); + } + + int16_t OdbcTestSuite::GetTestI16Field(int64_t idx) + { + return static_cast(idx * 16); + } + + void OdbcTestSuite::CheckTestI16Value(int idx, int16_t value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestI16Field(idx)); + } + + int32_t OdbcTestSuite::GetTestI32Field(int64_t idx) + { + return static_cast(idx * 32); + } + + void OdbcTestSuite::CheckTestI32Value(int idx, int32_t value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestI32Field(idx)); + } + + std::string OdbcTestSuite::GetTestString(int64_t idx) { std::stringstream builder; - builder << "String#" << ind; + builder << "String#" << idx; return builder.str(); } + void OdbcTestSuite::CheckTestStringValue(int idx, const std::string &value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestString(idx)); + } + + float OdbcTestSuite::GetTestFloatField(int64_t idx) + { + return static_cast(idx * 0.5f); + } + + void OdbcTestSuite::CheckTestFloatValue(int idx, float value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestFloatField(idx)); + } + + double OdbcTestSuite::GetTestDoubleField(int64_t idx) + { + return static_cast(idx * 0.25f); + } + + void OdbcTestSuite::CheckTestDoubleValue(int idx, double value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestDoubleField(idx)); + } + + bool OdbcTestSuite::GetTestBoolField(int64_t idx) + { + return static_cast(idx % 2 == 0); + } + + void OdbcTestSuite::CheckTestBoolValue(int idx, bool value) + { + BOOST_TEST_INFO("Test index: " << idx); + BOOST_CHECK_EQUAL(value, GetTestBoolField(idx)); + } + + void OdbcTestSuite::GetTestDateField(int64_t idx, SQL_DATE_STRUCT& val) + { + val.year = static_cast(2017 + idx / 365); + val.month = static_cast(((idx / 28) % 12) + 1); + val.day = static_cast((idx % 28) + 1); + } + + void OdbcTestSuite::CheckTestDateValue(int idx, const SQL_DATE_STRUCT& val) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + SQL_DATE_STRUCT expected; + GetTestDateField(idx, expected); + + BOOST_CHECK_EQUAL(val.year, expected.year); + BOOST_CHECK_EQUAL(val.month, expected.month); + BOOST_CHECK_EQUAL(val.day, expected.day); + } + } + + void OdbcTestSuite::GetTestTimeField(int64_t idx, SQL_TIME_STRUCT& val) + { + val.hour = (idx / 3600) % 24; + val.minute = (idx / 60) % 60; + val.second = idx % 60; + } + + void OdbcTestSuite::CheckTestTimeValue(int idx, const SQL_TIME_STRUCT& val) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + SQL_TIME_STRUCT expected; + GetTestTimeField(idx, expected); + + BOOST_CHECK_EQUAL(val.hour, expected.hour); + BOOST_CHECK_EQUAL(val.minute, expected.minute); + BOOST_CHECK_EQUAL(val.second, expected.second); + } + } + + void OdbcTestSuite::GetTestTimestampField(int64_t idx, SQL_TIMESTAMP_STRUCT& val) + { + SQL_DATE_STRUCT date; + GetTestDateField(idx, date); + + SQL_TIME_STRUCT time; + GetTestTimeField(idx, time); + + val.year = date.year; + val.month = date.month; + val.day = date.day; + val.hour = time.hour; + val.minute = time.minute; + val.second = time.second; + val.fraction = static_cast(std::abs(idx * 914873)) % 1000000000; + } + + void OdbcTestSuite::CheckTestTimestampValue(int idx, const SQL_TIMESTAMP_STRUCT& val) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + SQL_TIMESTAMP_STRUCT expected; + GetTestTimestampField(idx, expected); + + BOOST_CHECK_EQUAL(val.year, expected.year); + BOOST_CHECK_EQUAL(val.month, expected.month); + BOOST_CHECK_EQUAL(val.day, expected.day); + BOOST_CHECK_EQUAL(val.hour, expected.hour); + BOOST_CHECK_EQUAL(val.minute, expected.minute); + BOOST_CHECK_EQUAL(val.second, expected.second); + BOOST_CHECK_EQUAL(val.fraction, expected.fraction); + } + } + + void OdbcTestSuite::GetTestI8ArrayField(int64_t idx, int8_t* val, size_t valLen) + { + for (size_t j = 0; j < valLen; ++j) + val[j] = static_cast(idx * valLen + j); + } + + void OdbcTestSuite::CheckTestI8ArrayValue(int idx, const int8_t* val, size_t valLen) + { + BOOST_TEST_CONTEXT("Test index: " << idx) + { + common::FixedSizeArray expected(static_cast(valLen)); + GetTestI8ArrayField(idx, expected.GetData(), expected.GetSize()); + + for (size_t j = 0; j < valLen; ++j) + { + BOOST_TEST_INFO("Byte index: " << j); + BOOST_CHECK_EQUAL(val[j], expected[(int32_t)j]); + } + } + } + void OdbcTestSuite::CheckSQLDiagnosticError(int16_t handleType, SQLHANDLE handle, const std::string& expectSqlState) { SQLCHAR state[ODBC_BUFFER_SIZE]; @@ -267,7 +432,7 @@ namespace ignite for (SQLSMALLINT i = 0; i < recordsNum; ++i) { key = i + 1; - std::string val = getTestString(i); + std::string val = GetTestString(i); strncpy(strField, val.c_str(), sizeof(strField)); strFieldLen = SQL_NTS; @@ -342,36 +507,23 @@ namespace ignite int seed = from + i; keys[i] = seed; - i8Fields[i] = seed * 8; - i16Fields[i] = seed * 16; - i32Fields[i] = seed * 32; + i8Fields[i] = GetTestI8Field(seed); + i16Fields[i] = GetTestI16Field(seed); + i32Fields[i] = GetTestI32Field(seed); - std::string val = getTestString(seed); + std::string val = GetTestString(seed); strncpy(strFields.GetData() + 1024 * i, val.c_str(), 1023); strFieldsLen[i] = val.size(); - floatFields[i] = seed * 0.5f; - doubleFields[i] = seed * 0.25f; - boolFields[i] = seed % 2 == 0; - - dateFields[i].year = 2017 + seed / 365; - dateFields[i].month = ((seed / 28) % 12) + 1; - dateFields[i].day = (seed % 28) + 1; - - timeFields[i].hour = (seed / 3600) % 24; - timeFields[i].minute = (seed / 60) % 60; - timeFields[i].second = seed % 60; + floatFields[i] = GetTestFloatField(seed); + doubleFields[i] = GetTestDoubleField(seed); + boolFields[i] = GetTestBoolField(seed); - timestampFields[i].year = dateFields[i].year; - timestampFields[i].month = dateFields[i].month; - timestampFields[i].day = dateFields[i].day; - timestampFields[i].hour = timeFields[i].hour; - timestampFields[i].minute = timeFields[i].minute; - timestampFields[i].second = timeFields[i].second; - timestampFields[i].fraction = static_cast(std::abs(seed * 914873)) % 1000000000; + GetTestDateField(seed, dateFields[i]); + GetTestTimeField(seed, timeFields[i]); + GetTestTimestampField(seed, timestampFields[i]); - for (int j = 0; j < 42; ++j) - i8ArrayFields[i * 42 + j] = seed * 42 + j; + GetTestI8ArrayField(seed, &i8ArrayFields[i*42], 42); i8ArrayFieldsLen[i] = 42; } @@ -553,7 +705,7 @@ namespace ignite if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum; BOOST_CHECK_EQUAL(key, expectedKey); @@ -634,7 +786,7 @@ namespace ignite if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum; BOOST_CHECK_EQUAL(key, expectedKey); diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index 46fed8a75ae2e..a22c33647db49 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -922,7 +922,7 @@ BOOST_AUTO_TEST_CASE(TestInsertSelect) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum + 1; BOOST_CHECK_EQUAL(key, expectedKey); @@ -999,7 +999,7 @@ BOOST_AUTO_TEST_CASE(TestInsertUpdateSelect) if (expectedKey == 42) expectedStr = "Updated value"; else - expectedStr = getTestString(selectedRecordsNum); + expectedStr = GetTestString(selectedRecordsNum); BOOST_CHECK_EQUAL(std::string(strField, strFieldLen), expectedStr); @@ -1066,7 +1066,7 @@ BOOST_AUTO_TEST_CASE(TestInsertDeleteSelect) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); int64_t expectedKey = (selectedRecordsNum + 1) * 2; - std::string expectedStr = getTestString(expectedKey - 1); + std::string expectedStr = GetTestString(expectedKey - 1); BOOST_CHECK_EQUAL(key, expectedKey); BOOST_CHECK_EQUAL(std::string(strField, strFieldLen), expectedStr); @@ -1127,7 +1127,7 @@ BOOST_AUTO_TEST_CASE(TestInsertMergeSelect) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - std::string expectedStr = getTestString(selectedRecordsNum); + std::string expectedStr = GetTestString(selectedRecordsNum); int64_t expectedKey = selectedRecordsNum + 1; BOOST_CHECK_EQUAL(key, expectedKey); diff --git a/modules/platforms/cpp/odbc-test/src/streaming_test.cpp b/modules/platforms/cpp/odbc-test/src/streaming_test.cpp index 1d74338a8ef94..d863e3763d864 100644 --- a/modules/platforms/cpp/odbc-test/src/streaming_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/streaming_test.cpp @@ -115,7 +115,7 @@ struct StreamingTestSuiteFixture : odbc::OdbcTestSuite for (int32_t i = begin; i < end; ++i) { key = i; - std::string val = getTestString(i); + std::string val = GetTestString(i); strncpy(strField, val.c_str(), sizeof(strField)); strFieldLen = SQL_NTS; @@ -240,7 +240,7 @@ struct StreamingTestSuiteFixture : odbc::OdbcTestSuite BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_CHECK_EQUAL(i, keyVal); - BOOST_CHECK_EQUAL(getTestString(i), std::string(strField, static_cast(strFieldLen))); + BOOST_CHECK_EQUAL(GetTestString(i), std::string(strField, static_cast(strFieldLen))); } // Resetting parameters. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h index 19d29a93c8b3a..37c91b470e4cf 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h @@ -296,28 +296,28 @@ namespace ignite * * @param ptr Rows fetched buffer pointer. */ - void SetRowsFetchedPtr(size_t* ptr); + void SetRowsFetchedPtr(SQLINTEGER* ptr); /** * Get rows fetched buffer pointer. * * @return Rows fetched buffer pointer. */ - size_t* GetRowsFetchedPtr(); + SQLINTEGER* GetRowsFetchedPtr(); /** * Set row statuses array pointer. * * @param ptr Row statuses array pointer. */ - void SetRowStatusesPtr(uint16_t* ptr); + void SetRowStatusesPtr(SQLUSMALLINT* ptr); /** * Get row statuses array pointer. * * @return Row statuses array pointer. */ - uint16_t* GetRowStatusesPtr(); + SQLUSMALLINT* GetRowStatusesPtr(); /** * Select next parameter data for which is required. @@ -670,6 +670,13 @@ namespace ignite */ SqlResult::Type UpdateParamsMeta(); + /** + * Convert SQLRESULT to SQL_ROW_RESULT. + * + * @return Operation result. + */ + uint16_t SqlResultToRowResult(SqlResult::Type value); + /** * Constructor. * Called by friend classes. @@ -687,18 +694,18 @@ namespace ignite /** Underlying query. */ std::auto_ptr currentQuery; - /** Row bind type. */ - SqlUlen rowBindType; - /** Buffer to store number of rows fetched by the last fetch. */ - size_t* rowsFetched; + SQLINTEGER* rowsFetched; /** Array to store statuses of rows fetched by the last fetch. */ - uint16_t* rowStatuses; + SQLUSMALLINT* rowStatuses; /** Offset added to pointers to change binding of column data. */ int* columnBindOffset; + /** Row array size. */ + SqlUlen rowArraySize; + /** Parameters. */ app::ParameterSet parameters; diff --git a/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp b/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp index 2f1fbcb217fe6..35af8cfe7d052 100644 --- a/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp +++ b/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp @@ -1707,28 +1707,36 @@ namespace ignite return buflen; case OdbcNativeType::AI_SIGNED_SHORT: + return static_cast(sizeof(SQLSMALLINT)); + case OdbcNativeType::AI_UNSIGNED_SHORT: - return static_cast(sizeof(short)); + return static_cast(sizeof(SQLUSMALLINT)); case OdbcNativeType::AI_SIGNED_LONG: + return static_cast(sizeof(SQLUINTEGER)); + case OdbcNativeType::AI_UNSIGNED_LONG: - return static_cast(sizeof(long)); + return static_cast(sizeof(SQLINTEGER)); case OdbcNativeType::AI_FLOAT: - return static_cast(sizeof(float)); + return static_cast(sizeof(SQLREAL)); case OdbcNativeType::AI_DOUBLE: - return static_cast(sizeof(double)); + return static_cast(sizeof(SQLDOUBLE)); - case OdbcNativeType::AI_BIT: case OdbcNativeType::AI_SIGNED_TINYINT: + return static_cast(sizeof(SQLSCHAR)); + + case OdbcNativeType::AI_BIT: case OdbcNativeType::AI_UNSIGNED_TINYINT: - return static_cast(sizeof(char)); + return static_cast(sizeof(SQLCHAR)); case OdbcNativeType::AI_SIGNED_BIGINT: - case OdbcNativeType::AI_UNSIGNED_BIGINT: return static_cast(sizeof(SQLBIGINT)); + case OdbcNativeType::AI_UNSIGNED_BIGINT: + return static_cast(sizeof(SQLUBIGINT)); + case OdbcNativeType::AI_TDATE: return static_cast(sizeof(SQL_DATE_STRUCT)); diff --git a/modules/platforms/cpp/odbc/src/statement.cpp b/modules/platforms/cpp/odbc/src/statement.cpp index d88d44907e389..9253030f839a8 100644 --- a/modules/platforms/cpp/odbc/src/statement.cpp +++ b/modules/platforms/cpp/odbc/src/statement.cpp @@ -46,10 +46,10 @@ namespace ignite connection(parent), columnBindings(), currentQuery(), - rowBindType(SQL_BIND_BY_COLUMN), rowsFetched(0), rowStatuses(0), columnBindOffset(0), + rowArraySize(1), parameters(), timeout(0) { @@ -205,7 +205,7 @@ namespace ignite if (!buffer && !resLen) { AddStatusRecord(SqlState::SHY009_INVALID_USE_OF_NULL_POINTER, - "ParameterValuePtr and StrLen_or_IndPtr are both null pointers."); + "ParameterValuePtr and StrLen_or_IndPtr are both null pointers"); return SqlResult::AI_ERROR; } @@ -234,34 +234,59 @@ namespace ignite LOG_MSG("SQL_ATTR_ROW_ARRAY_SIZE: " << val); - if (val != 1) + if (val < 1) { - AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, - "Fetching of more than one row by call is not supported."); + AddStatusRecord(SqlState::SHY092_OPTION_TYPE_OUT_OF_RANGE, + "Array size value can not be less than 1"); return SqlResult::AI_ERROR; } + rowArraySize = val; + break; } case SQL_ATTR_ROW_BIND_TYPE: { - rowBindType = reinterpret_cast(value); + SqlUlen rowBindType = reinterpret_cast(value); + + if (rowBindType != SQL_BIND_BY_COLUMN) + { + AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + "Only binding by column is currently supported"); + + return SqlResult::AI_ERROR; + } break; } case SQL_ATTR_ROWS_FETCHED_PTR: { - SetRowsFetchedPtr(reinterpret_cast(value)); + SetRowsFetchedPtr(reinterpret_cast(value)); break; } case SQL_ATTR_ROW_STATUS_PTR: { - SetRowStatusesPtr(reinterpret_cast(value)); + SetRowStatusesPtr(reinterpret_cast(value)); + + break; + } + + case SQL_ATTR_PARAM_BIND_TYPE: + { + SqlUlen paramBindType = reinterpret_cast(value); + + if (paramBindType != SQL_PARAM_BIND_BY_COLUMN) + { + AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + "Only binding by column is currently supported"); + + return SqlResult::AI_ERROR; + } break; } @@ -380,7 +405,7 @@ namespace ignite { SqlUlen* val = reinterpret_cast(buf); - *val = rowBindType; + *val = SQL_BIND_BY_COLUMN; break; } @@ -421,6 +446,15 @@ namespace ignite break; } + case SQL_ATTR_PARAM_BIND_TYPE: + { + SqlUlen* val = reinterpret_cast(buf); + + *val = SQL_PARAM_BIND_BY_COLUMN; + + break; + } + case SQL_ATTR_PARAM_BIND_OFFSET_PTR: { SQLULEN** val = reinterpret_cast(buf); @@ -956,7 +990,9 @@ namespace ignite if (orientation != SQL_FETCH_NEXT) { - AddStatusRecord(SqlState::SHY106_FETCH_TYPE_OUT_OF_RANGE, "The value specified for the argument FetchOrientation was not SQL_FETCH_NEXT."); + AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + "Only SQL_FETCH_NEXT FetchOrientation type is supported"); + return SqlResult::AI_ERROR; } @@ -975,8 +1011,7 @@ namespace ignite if (!currentQuery.get()) { - AddStatusRecord(SqlState::S24000_INVALID_CURSOR_STATE, - "Cursor is not in the open state."); + AddStatusRecord(SqlState::S24000_INVALID_CURSOR_STATE, "Cursor is not in the open state"); return SqlResult::AI_ERROR; } @@ -987,18 +1022,32 @@ namespace ignite it->second.SetByteOffset(*columnBindOffset); } - SqlResult::Type res = currentQuery->FetchNextRow(columnBindings); + SQLINTEGER fetched = 0; + SQLINTEGER errors = 0; - if (res == SqlResult::AI_SUCCESS) + for (SqlUlen i = 0; i < rowArraySize; ++i) { - if (rowsFetched) - *rowsFetched = 1; + for (app::ColumnBindingMap::iterator it = columnBindings.begin(); it != columnBindings.end(); ++it) + it->second.SetElementOffset(i); + + SqlResult::Type res = currentQuery->FetchNextRow(columnBindings); + + if (res == SqlResult::AI_SUCCESS || res == SqlResult::AI_SUCCESS_WITH_INFO) + ++fetched; + else if (res != SqlResult::AI_NO_DATA) + ++errors; if (rowStatuses) - rowStatuses[0] = SQL_ROW_SUCCESS; + rowStatuses[i] = SqlResultToRowResult(res); } - return res; + if (rowsFetched) + *rowsFetched = fetched < 0 ? static_cast(rowArraySize) : fetched; + + if (fetched > 0) + return errors == 0 ? SqlResult::AI_SUCCESS : SqlResult::AI_SUCCESS_WITH_INFO; + + return errors == 0 ? SqlResult::AI_NO_DATA : SqlResult::AI_ERROR; } const meta::ColumnMetaVector* Statement::GetMeta() @@ -1114,22 +1163,22 @@ namespace ignite return SqlResult::AI_SUCCESS; } - void Statement::SetRowsFetchedPtr(size_t* ptr) + void Statement::SetRowsFetchedPtr(SQLINTEGER* ptr) { rowsFetched = ptr; } - size_t* Statement::GetRowsFetchedPtr() + SQLINTEGER* Statement::GetRowsFetchedPtr() { return rowsFetched; } - void Statement::SetRowStatusesPtr(uint16_t* ptr) + void Statement::SetRowStatusesPtr(SQLUSMALLINT* ptr) { rowStatuses = ptr; } - uint16_t * Statement::GetRowStatusesPtr() + SQLUSMALLINT * Statement::GetRowStatusesPtr() { return rowStatuses; } @@ -1327,6 +1376,24 @@ namespace ignite return SqlResult::AI_SUCCESS; } + + uint16_t Statement::SqlResultToRowResult(SqlResult::Type value) + { + switch (value) + { + case SqlResult::AI_NO_DATA: + return SQL_ROW_NOROW; + + case SqlResult::AI_SUCCESS: + return SQL_ROW_SUCCESS; + + case SqlResult::AI_SUCCESS_WITH_INFO: + return SQL_ROW_SUCCESS_WITH_INFO; + + default: + return SQL_ROW_ERROR; + } + } } } From 50a62d8c26d8ec5a2d930688a77927ba539886e0 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Fri, 13 Nov 2020 10:32:59 +0300 Subject: [PATCH 043/110] IGNITE-13636: Fix ODBC Date metadata This closes #8453 --- .../processors/odbc/odbc/OdbcColumnMeta.java | 15 ++- .../cpp/odbc-test/src/cursor_binding_test.cpp | 6 +- .../cpp/odbc-test/src/meta_queries_test.cpp | 85 ++++++++++++ .../cpp/odbc-test/src/odbc_test_suite.cpp | 6 +- .../cpp/odbc-test/src/queries_test.cpp | 6 +- .../odbc-test/src/sql_test_suite_fixture.cpp | 12 +- .../cpp/odbc-test/src/sql_types_test.cpp | 124 ++++++++++++++++++ .../platforms/cpp/odbc/src/type_traits.cpp | 3 + 8 files changed, 241 insertions(+), 16 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java index 61e7370391a95..a8f749e548c61 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java @@ -19,6 +19,7 @@ import org.apache.ignite.binary.BinaryRawWriter; import org.apache.ignite.internal.binary.BinaryUtils; +import org.apache.ignite.internal.binary.GridBinaryMarshaller; import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.util.typedef.internal.S; @@ -128,7 +129,7 @@ public void write(BinaryRawWriter writer) { writer.writeString(tableName); writer.writeString(columnName); - byte typeId = BinaryUtils.typeByClass(dataType); + byte typeId = getTypeId(dataType); writer.writeByte(typeId); @@ -138,6 +139,18 @@ public void write(BinaryRawWriter writer) { } } + /** + * Get ODBC type ID for the type. + * @param dataType Data type class. + * @return Type ID. + */ + private static byte getTypeId(Class dataType) { + if (dataType.equals(java.sql.Date.class)) + return GridBinaryMarshaller.DATE; + + return BinaryUtils.typeByClass(dataType); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(OdbcColumnMeta.class, this); diff --git a/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp b/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp index 5d095014a9572..d145e5231d042 100644 --- a/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/cursor_binding_test.cpp @@ -217,13 +217,13 @@ BOOST_AUTO_TEST_CASE(TestCursorBindingColumnWise) ret = SQLBindCol(stmt, 7, SQL_C_BIT, boolFields, 0, boolFieldsInd); ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); - ret = SQLBindCol(stmt, 8, SQL_C_DATE, dateFields, 0, dateFieldsInd); + ret = SQLBindCol(stmt, 8, SQL_C_TYPE_DATE, dateFields, 0, dateFieldsInd); ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); - ret = SQLBindCol(stmt, 9, SQL_C_TIME, timeFields, 0, timeFieldsInd); + ret = SQLBindCol(stmt, 9, SQL_C_TYPE_TIME, timeFields, 0, timeFieldsInd); ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); - ret = SQLBindCol(stmt, 10, SQL_C_TIMESTAMP, timestampFields, 0, timestampFieldsInd); + ret = SQLBindCol(stmt, 10, SQL_C_TYPE_TIMESTAMP, timestampFields, 0, timestampFieldsInd); ODBC_THROW_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); ret = SQLBindCol(stmt, 11, SQL_C_BINARY, i8ArrayFields, BUFFER_SIZE, i8ArrayFieldsLen); diff --git a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp index 733bf078d9ed6..04f76928b5082 100644 --- a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp @@ -161,6 +161,91 @@ BOOST_AUTO_TEST_CASE(TestGetTypeInfoAllTypes) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); } +BOOST_AUTO_TEST_CASE(TestDateTypeColumnAttributeCurdate) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select CURDATE()"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestDateTypeColumnAttributeLiteral) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select DATE '2020-10-25'"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestDateTypeColumnAttributeField) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select CAST (dateField as DATE) from TestType"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestTimeTypeColumnAttributeLiteral) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select TIME '12:42:13'"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestTimeTypeColumnAttributeField) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR req[] = "select timeField from TestType"; + SQLExecDirect(stmt, req, SQL_NTS); + + SQLLEN intVal = 0; + + SQLRETURN ret = SQLColAttribute(stmt, 1, SQL_DESC_TYPE, 0, 0, 0, &intVal); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(intVal, SQL_TYPE_TIME); +} + BOOST_AUTO_TEST_CASE(TestColAttributesColumnLength) { Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); diff --git a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp index 28affb7b97d2b..4a08a184678a6 100644 --- a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp +++ b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp @@ -584,19 +584,19 @@ namespace ignite BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_TEST_CHECKPOINT("Binding dateFields"); - ret = SQLBindParameter(stmt, 9, SQL_PARAM_INPUT, SQL_C_DATE, SQL_DATE, 0, 0, dateFields.GetData(), 0, 0); + ret = SQLBindParameter(stmt, 9, SQL_PARAM_INPUT, SQL_C_TYPE_DATE, SQL_TYPE_DATE, 0, 0, dateFields.GetData(), 0, 0); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_TEST_CHECKPOINT("Binding timeFields"); - ret = SQLBindParameter(stmt, 10, SQL_PARAM_INPUT, SQL_C_TIME, SQL_TIME, 0, 0, timeFields.GetData(), 0, 0); + ret = SQLBindParameter(stmt, 10, SQL_PARAM_INPUT, SQL_C_TYPE_TIME, SQL_TYPE_TIME, 0, 0, timeFields.GetData(), 0, 0); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); BOOST_TEST_CHECKPOINT("Binding timestampFields"); - ret = SQLBindParameter(stmt, 11, SQL_PARAM_INPUT, SQL_C_TIMESTAMP, SQL_TIMESTAMP, 0, 0, timestampFields.GetData(), 0, 0); + ret = SQLBindParameter(stmt, 11, SQL_PARAM_INPUT, SQL_C_TYPE_TIMESTAMP, SQL_TYPE_TIMESTAMP, 0, 0, timestampFields.GetData(), 0, 0); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index a22c33647db49..6cded84775148 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -748,15 +748,15 @@ BOOST_AUTO_TEST_CASE(TestNullFields) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - ret = SQLBindCol(stmt, 9, SQL_C_DATE, &dateColumn, 0, &columnLens[8]); + ret = SQLBindCol(stmt, 9, SQL_C_TYPE_DATE, &dateColumn, 0, &columnLens[8]); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - ret = SQLBindCol(stmt, 10, SQL_C_TIME, &timeColumn, 0, &columnLens[9]); + ret = SQLBindCol(stmt, 10, SQL_C_TYPE_TIME, &timeColumn, 0, &columnLens[9]); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - ret = SQLBindCol(stmt, 11, SQL_C_TIMESTAMP, ×tampColumn, 0, &columnLens[10]); + ret = SQLBindCol(stmt, 11, SQL_C_TYPE_TIMESTAMP, ×tampColumn, 0, &columnLens[10]); if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); diff --git a/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp b/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp index 9d2c2ab1b67c0..af46057159fc4 100644 --- a/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp +++ b/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp @@ -253,7 +253,7 @@ namespace ignite { SQL_DATE_STRUCT res; - CheckSingleResult0(request, SQL_C_DATE, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_DATE, &res, 0, 0); } template<> @@ -261,7 +261,7 @@ namespace ignite { SQL_TIMESTAMP_STRUCT res; - CheckSingleResult0(request, SQL_C_TIMESTAMP, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIMESTAMP, &res, 0, 0); } template<> @@ -269,7 +269,7 @@ namespace ignite { SQL_TIME_STRUCT res; - CheckSingleResult0(request, SQL_C_TIME, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIME, &res, 0, 0); } template<> @@ -305,7 +305,7 @@ namespace ignite { SQL_DATE_STRUCT res; - CheckSingleResult0(request, SQL_C_DATE, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_DATE, &res, 0, 0); using ignite::impl::binary::BinaryUtils; Date actual = common::MakeDateGmt(res.year, res.month, res.day); @@ -317,7 +317,7 @@ namespace ignite { SQL_TIMESTAMP_STRUCT res; - CheckSingleResult0(request, SQL_C_TIMESTAMP, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIMESTAMP, &res, 0, 0); using ignite::impl::binary::BinaryUtils; Timestamp actual = common::MakeTimestampGmt(res.year, res.month, res.day, res.hour, res.minute, res.second, res.fraction); @@ -331,7 +331,7 @@ namespace ignite { SQL_TIME_STRUCT res; - CheckSingleResult0(request, SQL_C_TIME, &res, 0, 0); + CheckSingleResult0(request, SQL_C_TYPE_TIME, &res, 0, 0); using ignite::impl::binary::BinaryUtils; Time actual = common::MakeTimeGmt(res.hour, res.minute, res.second); diff --git a/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp index e2fbdf6a58754..65feef06a6da9 100644 --- a/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp @@ -304,4 +304,128 @@ BOOST_AUTO_TEST_CASE(TestTimeInsert) BOOST_REQUIRE_EQUAL(out.timeField.GetSeconds(), expected.GetSeconds()); } +void FetchAndCheckDate(SQLHSTMT stmt, const std::string& req, SQLSMALLINT dataType) +{ + std::vector req0(req.begin(), req.end()); + req0.push_back(0); + + SQLExecDirect(stmt, &req0[0], SQL_NTS); + + SQL_DATE_STRUCT res; + + memset(&res, 0, sizeof(res)); + + SQLLEN resLen = 0; + SQLRETURN ret = SQLBindCol(stmt, 1, dataType, &res, 0, &resLen); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(res.day, 25); + BOOST_CHECK_EQUAL(res.month, 10); + BOOST_CHECK_EQUAL(res.year, 2020); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralDate) +{ + FetchAndCheckDate(stmt, "select DATE '2020-10-25'", SQL_C_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralDateLegacy) +{ + FetchAndCheckDate(stmt, "select DATE '2020-10-25'", SQL_C_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldDateAsDate) +{ + TestType val1; + val1.dateField = common::MakeDateGmt(2020, 10, 25); + + testCache.Put(1, val1); + + FetchAndCheckDate(stmt, "select CAST (dateField as DATE) from TestType", SQL_C_TYPE_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldDateAsDateLegacy) +{ + TestType val1; + val1.dateField = common::MakeDateGmt(2020, 10, 25); + + testCache.Put(1, val1); + + FetchAndCheckDate(stmt, "select CAST (dateField as DATE) from TestType", SQL_C_DATE); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldDateAsIs) +{ + TestType val1; + val1.dateField = common::MakeDateGmt(2020, 10, 25); + + testCache.Put(1, val1); + + FetchAndCheckDate(stmt, "select dateField from TestType", SQL_C_TYPE_DATE); +} + +void FetchAndCheckTime(SQLHSTMT stmt, const std::string& req, SQLSMALLINT dataType) +{ + std::vector req0(req.begin(), req.end()); + req0.push_back(0); + + SQLExecDirect(stmt, &req0[0], SQL_NTS); + + SQL_TIME_STRUCT res; + + memset(&res, 0, sizeof(res)); + + SQLLEN resLen = 0; + SQLRETURN ret = SQLBindCol(stmt, 1, dataType, &res, 0, &resLen); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(res.hour, 12); + BOOST_CHECK_EQUAL(res.minute, 42); + BOOST_CHECK_EQUAL(res.second, 13); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralTime) +{ + FetchAndCheckTime(stmt, "select TIME '12:42:13'", SQL_C_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestFetchLiteralTimeLegacy) +{ + FetchAndCheckTime(stmt, "select TIME '12:42:13'", SQL_C_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldTimeAsIs) +{ + TestType val1; + val1.timeField = common::MakeTimeGmt(12, 42, 13); + + testCache.Put(1, val1); + + FetchAndCheckTime(stmt, "select timeField from TestType", SQL_C_TYPE_TIME); +} + +BOOST_AUTO_TEST_CASE(TestFetchFieldTimeAsIsLegacy) +{ + TestType val1; + val1.timeField = common::MakeTimeGmt(12, 42, 13); + + testCache.Put(1, val1); + + FetchAndCheckTime(stmt, "select timeField from TestType", SQL_C_TIME); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc/src/type_traits.cpp b/modules/platforms/cpp/odbc/src/type_traits.cpp index 25eff51d309c1..1310c67bca9b1 100644 --- a/modules/platforms/cpp/odbc/src/type_traits.cpp +++ b/modules/platforms/cpp/odbc/src/type_traits.cpp @@ -336,12 +336,15 @@ namespace ignite case SQL_C_BINARY: return OdbcNativeType::AI_BINARY; + case SQL_C_DATE: case SQL_C_TYPE_DATE: return OdbcNativeType::AI_TDATE; + case SQL_C_TIME: case SQL_C_TYPE_TIME: return OdbcNativeType::AI_TTIME; + case SQL_C_TIMESTAMP: case SQL_C_TYPE_TIMESTAMP: return OdbcNativeType::AI_TTIMESTAMP; From d75bfd0f35d43ab29f9b280d31e9a0c3132da59d Mon Sep 17 00:00:00 2001 From: akorensh Date: Fri, 13 Nov 2020 13:51:28 +0300 Subject: [PATCH 044/110] IGNITE-13655 Implement readiness probe REST endpoint - Fixes #8417. Signed-off-by: Ilya Kasnacheev --- .../client/rest/GridProbeCommandTest.java | 224 ++++++++++++++++++ .../client/suite/IgniteClientTestSuite.java | 4 + .../apache/ignite/internal/IgnitionEx.java | 17 ++ .../processors/rest/GridRestCommand.java | 5 +- .../processors/rest/GridRestProcessor.java | 2 + .../processors/rest/GridRestResponse.java | 3 + .../probe/GridProbeCommandHandler.java | 70 ++++++ .../http/jetty/GridJettyRestHandler.java | 5 +- 8 files changed, 327 insertions(+), 3 deletions(-) create mode 100644 modules/clients/src/test/java/org/apache/ignite/internal/client/rest/GridProbeCommandTest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/probe/GridProbeCommandHandler.java diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/rest/GridProbeCommandTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/rest/GridProbeCommandTest.java new file mode 100644 index 0000000000000..af09544db0d57 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/rest/GridProbeCommandTest.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.rest; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.rest.GridRestCommand; +import org.apache.ignite.internal.processors.rest.GridRestResponse; +import org.apache.ignite.internal.processors.rest.handlers.GridRestCommandHandler; +import org.apache.ignite.internal.processors.rest.handlers.probe.GridProbeCommandHandler; +import org.apache.ignite.internal.processors.rest.request.GridRestCacheRequest; +import org.apache.ignite.plugin.AbstractTestPluginProvider; +import org.apache.ignite.plugin.PluginProvider; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** + * Test whether REST probe command works correctly when kernal has started and vice versa. + */ +public class GridProbeCommandTest extends GridCommonAbstractTest { + /** */ + private static final int JETTY_PORT = 8080; + + /** */ + private CountDownLatch triggerRestCmdLatch = new CountDownLatch(1); + + /** */ + private CountDownLatch triggerPluginStartLatch = new CountDownLatch(1); + + /** */ + public static Map executeProbeRestRequest() throws IOException { + HttpURLConnection conn = (HttpURLConnection)(new URL("http://localhost:" + JETTY_PORT + "/ignite?cmd=probe").openConnection()); + conn.connect(); + + boolean isHTTP_OK = conn.getResponseCode() == HttpURLConnection.HTTP_OK; + + Map restResponse = null; + + try (InputStreamReader streamReader = new InputStreamReader(isHTTP_OK ? conn.getInputStream() : conn.getErrorStream())) { + + ObjectMapper objMapper = new ObjectMapper(); + restResponse = objMapper.readValue(streamReader, + new TypeReference>() { + }); + + log.info("probe command response is: " + restResponse); + + } + catch (Exception e) { + log.error("error executing probe rest command", e); + } + return restResponse; + + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + cfg.setConnectorConfiguration(new ConnectorConfiguration()); + + if (igniteInstanceName.equals("regular")) + return cfg; + else if (igniteInstanceName.equals("delayedStart")) { + PluginProvider delayedStartPluginProvider = new DelayedStartPluginProvider(triggerPluginStartLatch, triggerRestCmdLatch); + + cfg.setPluginProviders(new PluginProvider[] {delayedStartPluginProvider}); + } + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(false); + } + + /** + * Test for the REST probe command + * + * @throws Exception If failed. + */ + @Test + public void testRestProbeCommand() throws Exception { + startGrid("regular"); + + GridRestCommandHandler hnd = new GridProbeCommandHandler((grid("regular")).context()); + + GridRestCacheRequest req = new GridRestCacheRequest(); + req.command(GridRestCommand.PROBE); + + IgniteInternalFuture resp = hnd.handleAsync(req); + resp.get(); + + assertEquals(GridRestResponse.STATUS_SUCCESS, resp.result().getSuccessStatus()); + assertEquals("grid has started", resp.result().getResponse()); + + } + + /** + *

Test rest cmd=probe command given a non fully started kernal.

+ *

1. start the grid on a seperate thread w/a plugin that will keep it waiting, at a point after rest http + * processor is ready, until signaled to proceed.

+ *

2. when the grid.start() has reached the plugin init method(rest http processor has started now), issue a + * rest command against the non-fully started kernal.

+ *

3. validate that the probe cmd has returned the appropriate erroneous code and message.

+ *

4. stop the grid.

+ * + * @throws Exception If failed. + */ + @Test + public void testRestProbeCommandGridNotStarted() throws Exception { + new Thread(new Runnable() { + @Override public void run() { + try { + startGrid("delayedStart"); + } + catch (Exception e) { + log.error("error when starting delatedStart grid", e); + } + } + }).start(); + + Map probeRestCommandResponse; + + log.info("awaiting plugin handler latch"); + triggerPluginStartLatch.await(); + log.info("starting rest command url call"); + try { + probeRestCommandResponse = executeProbeRestRequest(); + log.info("finished rest command url call"); + } + finally { + triggerRestCmdLatch.countDown(); //make sure the grid shuts down + } + + assertTrue(probeRestCommandResponse.get("error").equals("grid has not started")); + assertEquals(GridRestResponse.SERVICE_UNAVAILABLE, probeRestCommandResponse.get("successStatus")); + } + + /** + *

Start a regular grid, issue a cmd=probe rest command, and validate restponse + * + * @throws Exception If failed. + */ + @Test + public void testRestProbeCommandGridStarted() throws Exception { + startGrid("regular"); + + Map probeRestCommandResponse; + + probeRestCommandResponse = executeProbeRestRequest(); + + assertTrue(probeRestCommandResponse.get("response").equals("grid has started")); + assertEquals(0, probeRestCommandResponse.get("successStatus")); + } + + /** + * This plugin awaits until it is given the signal to process -- thereby allowing an http request against a non + * fully started kernal. + */ + public static class DelayedStartPluginProvider extends AbstractTestPluginProvider { + /** */ + private CountDownLatch triggerRestCmd; + + /** */ + private CountDownLatch triggerPluginStart; + + /** */ + public DelayedStartPluginProvider(CountDownLatch triggerPluginStartLatch, + CountDownLatch triggerRestCmdLatch) { + this.triggerPluginStart = triggerPluginStartLatch; + this.triggerRestCmd = triggerRestCmdLatch; + } + + /** {@inheritDoc} */ + @Override public String name() { + return "DelayedStartPlugin"; + } + + /** {@inheritDoc} */ + @Override public void onIgniteStart() { + super.onIgniteStart(); + + triggerPluginStart.countDown(); + + log.info("awaiting rest command latch ..."); + + try { + triggerRestCmd.await(); + } + catch (InterruptedException e) { + log.error("error in custom plugin", e); + } + + log.info("finished awaiting rest command latch."); + } + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java index 8a3936b17abd1..7408f4e3e40e1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java @@ -42,6 +42,7 @@ import org.apache.ignite.internal.client.integration.ClientTcpSslMultiNodeSelfTest; import org.apache.ignite.internal.client.integration.ClientTcpSslSelfTest; import org.apache.ignite.internal.client.integration.ClientTcpUnreachableMultiNodeSelfTest; +import org.apache.ignite.internal.client.rest.GridProbeCommandTest; import org.apache.ignite.internal.client.router.ClientFailedInitSelfTest; import org.apache.ignite.internal.client.router.RouterFactorySelfTest; import org.apache.ignite.internal.client.router.TcpRouterMultiNodeSelfTest; @@ -134,6 +135,9 @@ ClientTcpUnreachableMultiNodeSelfTest.class, ClientPreferDirectSelfTest.class, + //Test REST probe cmd + GridProbeCommandTest.class, + // Test client with many nodes and in multithreaded scenarios ClientTcpMultiThreadedSelfTest.class, ClientTcpSslMultiThreadedSelfTest.class, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index dfdae46fdc0eb..310426919c068 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -1476,6 +1476,16 @@ public static DependencyResolver dependencyResolver() { return dependencyResolver.get(); } + /** + * @param name Grid name (possibly {@code null} for default grid). + * @return true when all managers, processors, and plugins have started and ignite kernal start method has fully + * completed. + */ + public static boolean hasKernalStarted(String name) { + IgniteNamedInstance grid = name != null ? grids.get(name) : dfltGrid; + return grid != null && grid.hasStartLatchCompleted(); + } + /** * Start context encapsulates all starting parameters. */ @@ -3215,6 +3225,13 @@ public void setCounter(int cnt) { this.cnt = cnt; } } + + /** + * @return whether the startLatch has been counted down, thereby indicating that the kernal has full started. + */ + public boolean hasStartLatchCompleted() { + return startLatch.getCount() == 0; + } } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java index c97c26a4223fb..16dc5f0397201 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestCommand.java @@ -223,7 +223,10 @@ public enum GridRestCommand { NODE_STATE_BEFORE_START("nodestatebeforestart"), /** Warm-up. */ - WARM_UP("warmup"); + WARM_UP("warmup"), + + /** probe. */ + PROBE("probe"); /** Enum values. */ private static final GridRestCommand[] VALS = values(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java index 358f75b23e5f3..21c5eb35505ea 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java @@ -56,6 +56,7 @@ import org.apache.ignite.internal.processors.rest.handlers.datastructures.DataStructuresCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.log.GridLogCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.memory.MemoryMetricsCommandHandler; +import org.apache.ignite.internal.processors.rest.handlers.probe.GridProbeCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.query.QueryCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.task.GridTaskCommandHandler; import org.apache.ignite.internal.processors.rest.handlers.top.GridTopologyCommandHandler; @@ -557,6 +558,7 @@ public GridRestProcessor(GridKernalContext ctx) { addHandler(new GridBaselineCommandHandler(ctx)); addHandler(new MemoryMetricsCommandHandler(ctx)); addHandler(new NodeStateBeforeStartCommandHandler(ctx)); + addHandler(new GridProbeCommandHandler(ctx)); // Start protocols. startTcpProtocol(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java index 0c3ac0499c954..adefd9e641b32 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestResponse.java @@ -51,6 +51,9 @@ public class GridRestResponse implements Externalizable { /** Success status. */ private int successStatus = STATUS_SUCCESS; + /** HTTP REQUEST not allowed */ + public static final int SERVICE_UNAVAILABLE = 503; + /** Session token. */ private byte[] sesTokBytes; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/probe/GridProbeCommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/probe/GridProbeCommandHandler.java new file mode 100644 index 0000000000000..844dd5b9ffa30 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/probe/GridProbeCommandHandler.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest.handlers.probe; + +import java.util.Collection; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgnitionEx; +import org.apache.ignite.internal.processors.rest.GridRestCommand; +import org.apache.ignite.internal.processors.rest.GridRestResponse; +import org.apache.ignite.internal.processors.rest.handlers.GridRestCommandHandlerAdapter; +import org.apache.ignite.internal.processors.rest.request.GridRestRequest; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.typedef.internal.U; + +import static org.apache.ignite.internal.processors.rest.GridRestCommand.PROBE; + +/** + * Handler for {@link GridRestCommand#PROBE}. + */ +public class GridProbeCommandHandler extends GridRestCommandHandlerAdapter { + /** + * @param ctx Context. + */ + public GridProbeCommandHandler(GridKernalContext ctx) { + super(ctx); + } + + /** Supported commands. */ + private static final Collection SUPPORTED_COMMANDS = U.sealList(PROBE); + + /** {@inheritDoc} */ + @Override public Collection supportedCommands() { + return SUPPORTED_COMMANDS; + } + + /** {@inheritDoc} */ + @Override public IgniteInternalFuture handleAsync(GridRestRequest req) { + assert req != null; + + assert SUPPORTED_COMMANDS.contains(req.command()); + + switch (req.command()) { + case PROBE: { + if (log.isDebugEnabled()) + log.debug("probe command handler invoked."); + + return new GridFinishedFuture<>(IgnitionEx.hasKernalStarted(ctx.igniteInstanceName()) ? new GridRestResponse("grid has started") : new GridRestResponse(GridRestResponse.SERVICE_UNAVAILABLE, "grid has not started")); + + } + } + + return new GridFinishedFuture<>(); + } +} diff --git a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java index 42b8a305eab2c..421bf782c43ae 100644 --- a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java +++ b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java @@ -464,7 +464,7 @@ private void processRequest(String act, HttpServletRequest req, HttpServletRespo if (sesTok != null) cmdRes.setSessionToken(U.byteArray2HexString(sesTok)); - res.setStatus(HttpServletResponse.SC_OK); + res.setStatus(cmdRes.getSuccessStatus() == GridRestResponse.SERVICE_UNAVAILABLE ? HttpServletResponse.SC_SERVICE_UNAVAILABLE : HttpServletResponse.SC_OK); } catch (Throwable e) { res.setStatus(HttpServletResponse.SC_OK); @@ -720,7 +720,8 @@ private void processRequest(String act, HttpServletRequest req, HttpServletRespo case DATA_REGION_METRICS: case DATA_STORAGE_METRICS: case NAME: - case VERSION: { + case VERSION: + case PROBE: { restReq = new GridRestRequest(); break; From 37e2e450afbe94fe04fc2e6c520d1b79f2d14e5e Mon Sep 17 00:00:00 2001 From: Vladsz83 Date: Fri, 13 Nov 2020 22:14:38 +0300 Subject: [PATCH 045/110] Variant 2. (#8452) --- docs/_docs/clustering/network-configuration.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/_docs/clustering/network-configuration.adoc b/docs/_docs/clustering/network-configuration.adoc index 9d9c904a1a9c9..c2c462e2c3246 100644 --- a/docs/_docs/clustering/network-configuration.adoc +++ b/docs/_docs/clustering/network-configuration.adoc @@ -56,7 +56,7 @@ You can find the complete list of properties in the javadoc:org.apache.ignite.sp | `localPort` | The port that the node binds to. If set to a non-default value, other cluster nodes must know this port to be able to discover the node. | `47500` | `localPortRange`| If the `localPort` is busy, the node attempts to bind to the next port (incremented by 1) and continues this process until it finds a free port. The `localPortRange` property defines the number of ports the node will try (starting from `localPort`). | `100` -| `soLinger`| Setting linger-on-close can help with socket deadlocks of SSL issues like JDK-8219658. But costs longer detection of node failure. | `0` +| `soLinger`| Specifies a linger-on-close timeout. This option levers awaiting on close() of a TCP Socket. Set positive value to avoid potential link:https://bugs.openjdk.java.net/browse/JDK-8219658[deadlocks with SSL connections]. Alternatively, update your JRE version to the latest one. Enabling this option can prolong detection of node failure. | `0` | `reconnectCount` | The number of times the node tries to (re)establish connection to another node. |`10` | `networkTimeout` | The maximum network timeout in milliseconds for network operations. |`5000` | `socketTimeout` | The socket operations timeout. This timeout is used to limit connection time and write-to-socket time. |`5000` From 6900fc4ff89be2bc2dc02e5035870dfd888e6674 Mon Sep 17 00:00:00 2001 From: Denis Magda Date: Fri, 13 Nov 2020 11:47:44 -0800 Subject: [PATCH 046/110] ignite docs: updated the TcpDiscovery.soLinger documentation --- docs/_docs/clustering/network-configuration.adoc | 6 +++++- docs/_docs/security/ssl-tls.adoc | 8 +++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/_docs/clustering/network-configuration.adoc b/docs/_docs/clustering/network-configuration.adoc index c2c462e2c3246..8c0e0f8f0f9b8 100644 --- a/docs/_docs/clustering/network-configuration.adoc +++ b/docs/_docs/clustering/network-configuration.adoc @@ -56,7 +56,11 @@ You can find the complete list of properties in the javadoc:org.apache.ignite.sp | `localPort` | The port that the node binds to. If set to a non-default value, other cluster nodes must know this port to be able to discover the node. | `47500` | `localPortRange`| If the `localPort` is busy, the node attempts to bind to the next port (incremented by 1) and continues this process until it finds a free port. The `localPortRange` property defines the number of ports the node will try (starting from `localPort`). | `100` -| `soLinger`| Specifies a linger-on-close timeout. This option levers awaiting on close() of a TCP Socket. Set positive value to avoid potential link:https://bugs.openjdk.java.net/browse/JDK-8219658[deadlocks with SSL connections]. Alternatively, update your JRE version to the latest one. Enabling this option can prolong detection of node failure. | `0` +| `soLinger`| Specifies a linger-on-close timeout of TCP sockets used by Discovery SPI. See Java `Socket.setSoLinger` API +for details on how to adjust this setting. In Ignite, the timeout defaults to a non-negative value to prevent +link:https://bugs.openjdk.java.net/browse/JDK-8219658[potential deadlocks with SSL connections, window=_blank] but, +as a side effect, this can prolong the detection of cluster node failures. Alternatively, update your JRE version to the +one with the SSL issue fixed and adjust this setting accordingly. | `0` | `reconnectCount` | The number of times the node tries to (re)establish connection to another node. |`10` | `networkTimeout` | The maximum network timeout in milliseconds for network operations. |`5000` | `socketTimeout` | The socket operations timeout. This timeout is used to limit connection time and write-to-socket time. |`5000` diff --git a/docs/_docs/security/ssl-tls.adoc b/docs/_docs/security/ssl-tls.adoc index a5edc0bf22034..b56b2094d586c 100644 --- a/docs/_docs/security/ssl-tls.adoc +++ b/docs/_docs/security/ssl-tls.adoc @@ -32,10 +32,12 @@ To enable SSL/TLS for cluster nodes, configure an `SSLContext` factory in the no You can use the `org.apache.ignite.ssl.SslContextFactory`, which is the default factory that uses a configurable keystore to initialize the SSL context. //You can also implement your own `SSLContext` factory. -[NOTE] +[CAUTION] ==== -There are known socket deadlock issue on various JRE when SSL enabled. Example: JDK-8219658. It is recommended to use -the latest versions of your JRE. Or you can enable linger options like `TcpDiscoverySpi.soLinger`. +Ensure that your version of the JVM addresses +link:https://bugs.openjdk.java.net/browse/JDK-8219658[the following issue, window=_blank] that can cause deadlocks +in SSL connections. If your JVM is affected but can't be updated, then set +the link:clustering/network-configuration[`TcpDiscoverySpi.soLinger`] parameter to a non-negative value. ==== Below is an example of `SslContextFactory` configuration: From b8aca2a812cddf094677b9a68ae202da2926dbcf Mon Sep 17 00:00:00 2001 From: samaitra Date: Mon, 16 Nov 2020 20:23:21 -0600 Subject: [PATCH 047/110] IGNITE-13539 Remove references for migrated Ignite Extensions modules from the assembly and osgi-karaf - Fixes #8460. Signed-off-by: samaitra --- assembly/dependencies-apache-ignite-slim.xml | 4 --- .../src/main/resources/features.xml | 30 +++++++++---------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/assembly/dependencies-apache-ignite-slim.xml b/assembly/dependencies-apache-ignite-slim.xml index 96b28f8b2e89d..e98695c8a0c85 100644 --- a/assembly/dependencies-apache-ignite-slim.xml +++ b/assembly/dependencies-apache-ignite-slim.xml @@ -145,20 +145,17 @@ org.apache.ignite:ignite-aop org.apache.ignite:ignite-aws - org.apache.ignite:ignite-camel org.apache.ignite:ignite-cassandra-serializers org.apache.ignite:ignite-cassandra-store org.apache.ignite:ignite-cloud org.apache.ignite:ignite-direct-io org.apache.ignite:ignite-gce org.apache.ignite:ignite-jcl - org.apache.ignite:ignite-jms11 org.apache.ignite:ignite-mesos org.apache.ignite:ignite-ml org.apache.ignite:ignite-ml-h2o-model-parser org.apache.ignite:ignite-ml-spark-model-parser org.apache.ignite:ignite-ml-xgboost-model-parser - org.apache.ignite:ignite-mqtt org.apache.ignite:ignite-osgi org.apache.ignite:ignite-osgi-karaf org.apache.ignite:ignite-osgi-paxlogging @@ -167,7 +164,6 @@ org.apache.ignite:ignite-spark org.apache.ignite:ignite-spark-2.4 org.apache.ignite:ignite-ssh - org.apache.ignite:ignite-storm org.apache.ignite:ignite-web org.apache.ignite:ignite-yarn org.apache.ignite:ignite-zookeeper diff --git a/modules/osgi-karaf/src/main/resources/features.xml b/modules/osgi-karaf/src/main/resources/features.xml index 0ff71e4bd057d..207941a4f291d 100644 --- a/modules/osgi-karaf/src/main/resources/features.xml +++ b/modules/osgi-karaf/src/main/resources/features.xml @@ -32,14 +32,14 @@ ignite-core ignite-aop ignite-aws - ignite-camel - ignite-flume + ignite-camel-ext + ignite-flume-ext ignite-indexing ignite-jcl - ignite-jms11 + ignite-jms11-ext ignite-jta ignite-kafka-ext - ignite-mqtt + ignite-mqtt-ext ignite-rest-http @@ -48,7 +48,7 @@ ignite-slf4j ignite-spring ignite-ssh - ignite-twitter + ignite-twitter-ext ignite-urideploy ignite-web ignite-zookeeper @@ -82,7 +82,7 @@ mvn:org.apache.ignite/ignite-aws/${project.version} - +

camel-core - mvn:org.apache.ignite/ignite-camel/${project.version} + mvn:org.apache.ignite/ignite-camel-ext/${ignite-camel-ext.version} - +
wrap wrap:mvn:org.apache.flume/flume-ng-core/${flume.ng.version}$Bundle-SymbolicName=flume-ng-core&Bundle-Version=${flume.ng.version} - mvn:org.apache.ignite/ignite-flume/${project.version} + mvn:org.apache.ignite/ignite-flume-ext/${ignite-flume-ext.version}
@@ -139,12 +139,12 @@ mvn:org.apache.ignite/ignite-jcl/${project.version} - +
mvn:org.apache.geronimo.specs/geronimo-jms_1.1_spec/${jms.spec.version} - mvn:org.apache.ignite/ignite-jms11/${project.version} + mvn:org.apache.ignite/ignite-jms11-ext/${ignite-jms11-ext.version}
@@ -185,7 +185,7 @@ mvn:org.apache.ignite/ignite-log4j/${project.version} - +
@@ -193,7 +193,7 @@ mvn:com.google.guava/guava/${guava.version} mvn:org.eclipse.paho/org.eclipse.paho.client.mqttv3/${paho.version} wrap:mvn:com.github.rholder/guava-retrying/${guava.retrying.version}$Bundle-SymbolicName=guava-retrying&Bundle-SymbolicName=guava-retrying&Bundle-Version=${guava.retrying.version} - mvn:org.apache.ignite/ignite-mqtt/${project.version} + mvn:org.apache.ignite/ignite-mqtt-ext/${ignite-mqtt-ext.version}
@@ -277,7 +277,7 @@ mvn:org.apache.ignite/ignite-ssh/${project.version} - +
@@ -285,7 +285,7 @@ mvn:com.google.guava/guava/${guava14.version} wrap:mvn:com.twitter/hbc-core/${twitter.hbc.version}$Bundle-SymbolicName=Hosebird Client Core&Bundle-Version=${twitter.hbc.version} wrap:mvn:com.twitter/hbc-twitter4j/${twitter.hbc.version}$Bundle-SymbolicName=Hosebird Client Twitter4J&Bundle-Version=${twitter.hbc.version} - mvn:org.apache.ignite/ignite-twitter/${project.version} + mvn:org.apache.ignite/ignite-twitter-ext/${ignite-twitter-ext.version}
From 965be312a340fb6fb9f958bc4fcabda0a5e4bc16 Mon Sep 17 00:00:00 2001 From: Nikita Tolstunov Date: Tue, 17 Nov 2020 13:30:41 +0300 Subject: [PATCH 048/110] IGNITE-12489 Fix persistence corruption caused by invalid tag check (flags were not rotated for some pages) - Fixes #8358. Signed-off-by: Alexey Goncharuk --- .../jmh/tree/BPlusTreeBenchmark.java | 6 + .../internal/pagemem/PageIdAllocator.java | 18 +- .../ignite/internal/pagemem/PageIdUtils.java | 2 +- .../pagemem/store/IgnitePageStoreManager.java | 2 +- .../internal/pagemem/store/PageStore.java | 6 + .../cache/IgniteCacheOffheapManagerImpl.java | 6 +- .../processors/cache/mvcc/txlog/TxLog.java | 3 +- .../cache/mvcc/txlog/TxLogTree.java | 2 + .../cache/persistence/DataStructure.java | 32 +++- .../persistence/GridCacheOffheapManager.java | 63 ++++--- .../IgniteCacheDatabaseSharedManager.java | 4 +- .../cache/persistence/IndexStorageImpl.java | 2 + .../file/FilePageStoreFactory.java | 5 +- .../file/FilePageStoreManager.java | 5 +- .../freelist/AbstractFreeList.java | 34 ++-- .../persistence/freelist/CacheFreeList.java | 7 +- .../cache/persistence/freelist/PagesList.java | 124 +++++++++++--- .../persistence/metastorage/MetaStorage.java | 21 ++- .../metastorage/MetastorageTree.java | 5 +- .../UpgradePendingTreeToPerPartitionTask.java | 4 +- .../persistence/pagemem/PageMemoryImpl.java | 13 +- .../partstate/GroupPartitionId.java | 12 +- .../partstorage/PartitionMetaStorageImpl.java | 6 +- .../snapshot/IgniteSnapshotManager.java | 4 +- .../cache/persistence/tree/BPlusTree.java | 9 +- .../persistence/tree/io/TrackingPageIO.java | 7 +- .../persistence/tree/reuse/ReuseList.java | 18 ++ .../persistence/tree/reuse/ReuseListImpl.java | 13 +- .../processors/cache/tree/CacheDataTree.java | 5 +- .../cache/tree/PendingEntriesTree.java | 5 +- .../cache/verify/IdleVerifyUtility.java | 14 +- ...tisticsMetricsLocalMXBeanImplSelfTest.java | 2 +- .../pagemem/impl/PageIdUtilsSelfTest.java | 41 +++-- .../PendingTreeCorruptionTest.java | 159 ++++++++++++++++++ .../db/CheckpointBufferDeadlockTest.java | 12 ++ .../database/BPlusTreeFakeReuseSelfTest.java | 6 + .../database/BPlusTreeReuseSelfTest.java | 3 +- .../database/BPlusTreeSelfTest.java | 1 + .../database/CacheFreeListSelfTest.java | 3 +- .../testsuites/IgnitePdsTestSuite4.java | 3 + .../processors/query/h2/IgniteH2Indexing.java | 2 + .../processors/query/h2/database/H2Tree.java | 2 + 42 files changed, 559 insertions(+), 132 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/PendingTreeCorruptionTest.java diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java index 7a35430b36ea5..af843cbbd5f74 100644 --- a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java @@ -103,6 +103,11 @@ private static class FakeReuseList implements ReuseList { return pageId == null ? 0L : pageId; } + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return pageId; + } + /** {@inheritDoc} */ @Override public long recycledPagesCount() throws IgniteCheckedException { return deque.size(); @@ -186,6 +191,7 @@ protected static class TestTree extends BPlusTree { reuseList, new IOVersions<>(new LongInnerIO()), new IOVersions<>(new LongLeafIO()), + PageIdAllocator.FLAG_IDX, null, null ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java index 73956959d0004..f97ada7aa794a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java @@ -25,12 +25,26 @@ * Allocates page ID's. */ public interface PageIdAllocator { - /** */ + /** + * Flag for Data page. + * Also used by partition meta and tracking pages. + * This type doesn't use Page ID rotation mechanizm. + */ public static final byte FLAG_DATA = 1; - /** */ + /** + * Flag for index page. + * Also used by internal structure in inmemory caches. + * This type uses Page ID rotation mechanizm. + */ public static final byte FLAG_IDX = 2; + /** + * Flag for internal structure page. + * This type uses Page ID rotation mechanizm. + */ + public static final byte FLAG_AUX = 4; + /** Max partition ID that can be used by affinity. */ public static final int MAX_PARTITION_ID = 65500; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java index c48f4a899f790..395586c8c508b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java @@ -108,7 +108,7 @@ public static int pageIndex(long pageId) { * @return Page ID. */ public static long pageId(long link) { - return flag(link) == PageIdAllocator.FLAG_IDX ? link : link & PAGE_ID_MASK; + return flag(link) == PageIdAllocator.FLAG_DATA ? link & PAGE_ID_MASK : link; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java index 50af2a445a57a..da606a6809065 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java @@ -162,7 +162,7 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac * Allocates a page for the given page space. * * @param grpId Cache group ID. - * @param partId Partition ID. Used only if {@code flags} is equal to {@link PageMemory#FLAG_DATA}. + * @param partId Partition ID. Used only if {@code flags} is not equal to {@link PageMemory#FLAG_IDX}. * @param flags Page allocation flags. * @return Allocated page ID. * @throws IgniteCheckedException If IO exception occurred while allocating a page ID. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java index 1d9e5014b9e56..528c682e69b47 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java @@ -26,6 +26,12 @@ * Persistent store of pages. */ public interface PageStore extends Closeable { + /** Type for regular affinity partitions. */ + public static byte TYPE_DATA = 1; + + /** Type for index partition. */ + public static byte TYPE_IDX = 2; + /** * @param lsnr Page write listener to set. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 4743d895a5896..024287f49f447 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -237,7 +237,8 @@ protected void initPendingTree(GridCacheContext cctx) throws IgniteCheckedExcept rootPage, grp.reuseList(), true, - lsnr + lsnr, + FLAG_IDX ); } } @@ -1291,7 +1292,8 @@ protected CacheDataStore createCacheDataStore0(int p) throws IgniteCheckedExcept rowStore, rootPage, true, - lsnr + lsnr, + FLAG_IDX ); return new CacheDataStoreImpl(p, rowStore, dataTree, () -> pendingEntries, grp, busyLock, log); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java index 0e98b8d0d206c..8cf61a8ccec82 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java @@ -184,7 +184,8 @@ private void init(GridKernalContext ctx) throws IgniteCheckedException { isNew, txLogReuseListLockLsnr, ctx, - null + null, + FLAG_IDX ); tree = new TxLogTree( diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java index c8509283976f0..a4df8bd0e888f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java @@ -19,6 +19,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; @@ -61,6 +62,7 @@ public TxLogTree( reuseList, TxLogInnerIO.VERSIONS, TxLogLeafIO.VERSIONS, + PageIdAllocator.FLAG_IDX, failureProcessor, lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java index 68fd48e48da8d..8814d181f009e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java @@ -70,6 +70,9 @@ public abstract class DataStructure { /** */ protected final PageIoResolver pageIoRslvr; + /** */ + protected final byte pageFlag; + /** * @param cacheGrpId Cache group ID. * @param grpName Cache group name. @@ -77,6 +80,7 @@ public abstract class DataStructure { * @param wal Write ahead log manager. * @param lockLsnr Page lock listener. * @param pageIoRslvr Page IO resolver. + * @param pageFlag Default flag value for allocated pages. */ public DataStructure( int cacheGrpId, @@ -84,7 +88,8 @@ public DataStructure( PageMemory pageMem, IgniteWriteAheadLogManager wal, PageLockListener lockLsnr, - PageIoResolver pageIoRslvr + PageIoResolver pageIoRslvr, + byte pageFlag ) { assert pageMem != null; @@ -94,6 +99,7 @@ public DataStructure( this.wal = wal; this.lockLsnr = lockLsnr == null ? NOOP_LSNR : lockLsnr; this.pageIoRslvr = pageIoRslvr; + this.pageFlag = pageFlag; } /** @@ -131,16 +137,30 @@ protected final long allocatePage(ReuseBag bag) throws IgniteCheckedException { * @throws IgniteCheckedException If failed. */ protected final long allocatePage(ReuseBag bag, boolean useRecycled) throws IgniteCheckedException { - long pageId = bag != null ? bag.pollFreePage() : 0; + long pageId = 0; + + if (useRecycled && reuseList != null) { + pageId = bag != null ? bag.pollFreePage() : 0; + + if (pageId == 0) + pageId = reuseList.takeRecycledPage(); - if (pageId == 0 && useRecycled && reuseList != null) - pageId = reuseList.takeRecycledPage(); + // Recycled. "pollFreePage" result should be reinitialized to move rotatedId to itemId. + if (pageId != 0) + pageId = reuseList.initRecycledPage(pageId, pageFlag, null); + } if (pageId == 0) pageId = allocatePageNoReuse(); assert pageId != 0; + assert PageIdUtils.flag(pageId) == FLAG_IDX && PageIdUtils.partId(pageId) == INDEX_PARTITION || + PageIdUtils.flag(pageId) != FLAG_IDX && PageIdUtils.partId(pageId) <= MAX_PARTITION_ID : + PageIdUtils.toDetailString(pageId); + + assert PageIdUtils.flag(pageId) != FLAG_DATA || PageIdUtils.itemId(pageId) == 0 : PageIdUtils.toDetailString(pageId); + return pageId; } @@ -160,7 +180,7 @@ protected long allocatePageNoReuse() throws IgniteCheckedException { */ protected final long acquirePage(long pageId, IoStatisticsHolder statHolder) throws IgniteCheckedException { assert PageIdUtils.flag(pageId) == FLAG_IDX && PageIdUtils.partId(pageId) == INDEX_PARTITION || - PageIdUtils.flag(pageId) == FLAG_DATA && PageIdUtils.partId(pageId) <= MAX_PARTITION_ID : + PageIdUtils.flag(pageId) != FLAG_IDX && PageIdUtils.partId(pageId) <= MAX_PARTITION_ID : U.hexLong(pageId) + " flag=" + PageIdUtils.flag(pageId) + " part=" + PageIdUtils.partId(pageId); return pageMem.acquirePage(grpId, pageId, statHolder); @@ -403,7 +423,7 @@ protected final long recyclePage( int rotatedIdPart = PageIO.getRotatedIdPart(pageAddr); if (rotatedIdPart != 0) { - recycled = PageIdUtils.link(pageId, rotatedIdPart > MAX_ITEMID_NUM ? 1 : rotatedIdPart); + recycled = PageIdUtils.link(pageId, rotatedIdPart); PageIO.setRotatedIdPart(pageAddr, 0); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index cb7b6dc7e2011..ad062a83d6a8e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -206,7 +206,8 @@ public class GridCacheOffheapManager extends IgniteCacheOffheapManagerImpl imple reuseListRoot.isAllocated(), diagnosticMgr.pageLockTracker().createPageLockTracker(reuseListName), ctx.kernalContext(), - pageListCacheLimit + pageListCacheLimit, + PageIdAllocator.FLAG_IDX ); RootPage metastoreRoot = metas.treeRoot; @@ -828,7 +829,7 @@ public static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, boolean init = cntrsPageId == 0; if (init && !sizes.isEmpty()) - cntrsPageId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + cntrsPageId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); long nextId = cntrsPageId; int written = 0; @@ -859,7 +860,7 @@ public static long writeSharedGroupCacheSizes(PageMemory pageMem, int grpId, if (written != items && (init = nextId == 0)) { //allocate new counters page - nextId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + nextId = pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); partCntrIo.setNextCountersPageId(curAddr, nextId); } } @@ -1959,13 +1960,14 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException reuseRoot.isAllocated(), ctx.diagnostic().pageLockTracker().createPageLockTracker(freeListName), ctx.kernalContext(), - pageListCacheLimit + pageListCacheLimit, + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; @@ -1984,13 +1986,14 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException partMetastoreReuseListRoot.isAllocated(), ctx.diagnostic().pageLockTracker().createPageLockTracker(partitionMetaStoreName), ctx.kernalContext(), - pageListCacheLimit + pageListCacheLimit, + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; @@ -2007,13 +2010,14 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException rowStore, treeRoot.pageId().pageId(), treeRoot.isAllocated(), - ctx.diagnostic().pageLockTracker().createPageLockTracker(dataTreeName) + ctx.diagnostic().pageLockTracker().createPageLockTracker(dataTreeName), + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; @@ -2028,13 +2032,14 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException pendingTreeRoot.pageId().pageId(), freeList, pendingTreeRoot.isAllocated(), - ctx.diagnostic().pageLockTracker().createPageLockTracker(pendingEntriesTreeName) + ctx.diagnostic().pageLockTracker().createPageLockTracker(pendingEntriesTreeName), + PageIdAllocator.FLAG_AUX ) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { assert ctx.database().checkpointLockIsHeldByThread(); - return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_AUX); } }; @@ -2185,15 +2190,15 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { io.initNewPage(pageAddr, partMetaId, pageMem.realPageSize(grpId)); - treeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - reuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); + treeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + reuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); - assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(pendingTreeRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(partMetaStoreReuseListRoot) == PageMemory.FLAG_DATA; + assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(pendingTreeRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(partMetaStoreReuseListRoot) == PageMemory.FLAG_AUX; io.setTreeRoot(pageAddr, treeRoot); io.setReuseListRoot(pageAddr, reuseListRoot); @@ -2228,8 +2233,8 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { ((PagePartitionMetaIOV3)io).upgradePage(pageAddr); - pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); - partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); + pendingTreeRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); + partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); io.setPendingTreeRoot(pageAddr, pendingTreeRoot); io.setPartitionMetaStoreReuseListRoot(pageAddr, partMetaStoreReuseListRoot); @@ -2247,7 +2252,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { partMetaStoreReuseListRoot = io.getPartitionMetaStoreReuseListRoot(pageAddr); if (partMetaStoreReuseListRoot == 0) { - partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_DATA); + partMetaStoreReuseListRoot = pageMem.allocatePage(grpId, partId, PageMemory.FLAG_AUX); if (PageHandler.isWalDeltaRecordNeeded(pageMem, grpId, partMetaId, partMetaPage, wal, null)) { @@ -2259,19 +2264,23 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { } } - if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong tree root page id flag: treeRoot=" + U.hexLong(treeRoot) + ", part=" + partId + ", grpId=" + grpId); - if (PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong reuse list root page id flag: reuseListRoot=" + U.hexLong(reuseListRoot) + ", part=" + partId + ", grpId=" + grpId); - if (PageIdUtils.flag(pendingTreeRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(pendingTreeRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(pendingTreeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong pending tree root page id flag: reuseListRoot=" - + U.hexLong(reuseListRoot) + ", part=" + partId + ", grpId=" + grpId); + + U.hexLong(pendingTreeRoot) + ", part=" + partId + ", grpId=" + grpId); - if (PageIdUtils.flag(partMetaStoreReuseListRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(partMetaStoreReuseListRoot) != PageMemory.FLAG_AUX + && PageIdUtils.flag(partMetaStoreReuseListRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong partition meta store list root page id flag: partMetaStoreReuseListRoot=" + U.hexLong(partMetaStoreReuseListRoot) + ", part=" + partId + ", grpId=" + grpId); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 9eb02fbd994a9..bfadeb22d0863 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -55,6 +55,7 @@ import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -315,7 +316,8 @@ protected void initPageMemoryDataStructures(DataStorageConfiguration dbCfg) thro true, lsnr, cctx.kernalContext(), - null + null, + PageIdAllocator.FLAG_IDX ); freeListMap.put(memPlcCfg.getName(), freeList); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java index 1be0b973ccc65..94f7feb208015 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; @@ -267,6 +268,7 @@ private MetaTree( reuseList, innerIos, leafIos, + PageIdAllocator.FLAG_IDX, failureProcessor, lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java index ae923592f4af3..53e9fe9c71197 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java @@ -21,7 +21,6 @@ import java.nio.file.Path; import java.util.function.LongConsumer; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.lang.IgniteOutClosure; @@ -32,7 +31,7 @@ public interface FilePageStoreFactory { /** * Creates instance of PageStore based on given file. * - * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA}. + * @param type Data type, can be {@link PageStore#TYPE_IDX} or {@link PageStore#TYPE_DATA}. * @param file File Page store file. * @param allocatedTracker metrics updater. * @return page store @@ -46,7 +45,7 @@ default PageStore createPageStore(byte type, File file, LongConsumer allocatedTr /** * Creates instance of PageStore based on file path provider. * - * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA} + * @param type Data type, can be {@link PageStore#TYPE_IDX} or {@link PageStore#TYPE_DATA} * @param pathProvider File Page store path provider. * @param allocatedTracker metrics updater * @return page store diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index 7152b1e870c46..3f5f8a9359676 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -64,7 +64,6 @@ import org.apache.ignite.internal.client.util.GridConcurrentHashSet; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; -import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.pagemem.store.PageStoreCollection; @@ -741,7 +740,7 @@ private CacheStoreHolder initDir(File cacheWorkDir, PageStore idxStore = pageStoreFactory.createPageStore( - PageMemory.FLAG_IDX, + PageStore.TYPE_IDX, idxFile, allocatedTracker); @@ -752,7 +751,7 @@ private CacheStoreHolder initDir(File cacheWorkDir, PageStore partStore = pageStoreFactory.createPageStore( - PageMemory.FLAG_DATA, + PageStore.TYPE_DATA, () -> getPartitionFilePath(cacheWorkDir, p), allocatedTracker); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java index b46417bb0ab48..7ccaf378874ec 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java @@ -50,6 +50,8 @@ import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.typedef.internal.U; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; + /** */ public abstract class AbstractFreeList extends PagesList implements FreeList, ReuseList { @@ -425,6 +427,7 @@ else if (putIsNeeded) * @param wal Write ahead log manager. * @param metaPageId Metadata page ID. * @param initNew {@code True} if new metadata should be initialized. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public AbstractFreeList( @@ -438,9 +441,10 @@ public AbstractFreeList( boolean initNew, PageLockListener lockLsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { - super(cacheId, name, memPlc.pageMemory(), BUCKETS, wal, metaPageId, lockLsnr, ctx); + super(cacheId, name, memPlc.pageMemory(), BUCKETS, wal, metaPageId, lockLsnr, ctx, pageFlag); rmvRow = new RemoveRowHandler(cacheId == 0); @@ -565,9 +569,8 @@ private int bucket(int freeSpace, boolean allowReuse) { */ private long allocateDataPage(int part) throws IgniteCheckedException { assert part <= PageIdAllocator.MAX_PARTITION_ID; - assert part != PageIdAllocator.INDEX_PARTITION; - return pageMem.allocatePage(grpId, part, PageIdAllocator.FLAG_DATA); + return pageMem.allocatePage(grpId, part, FLAG_DATA); } /** {@inheritDoc} */ @@ -719,17 +722,23 @@ private long takePage(int size, T row, IoStatisticsHolder statHolder) throws Ign } if (pageId == 0L) { // Handle reuse bucket. - pageId = reuseList == this ? - takeEmptyPage(REUSE_BUCKET, row.ioVersions(), statHolder) : reuseList.takeRecycledPage(); + if (reuseList == this) + pageId = takeEmptyPage(REUSE_BUCKET, row.ioVersions(), statHolder); + else { + pageId = reuseList.takeRecycledPage(); + + if (pageId != 0) + pageId = reuseList.initRecycledPage(pageId, FLAG_DATA, row.ioVersions().latest()); + } } if (pageId == 0L) return 0; - if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) // Page is taken from reuse bucket. - return initReusedPage(row, pageId, statHolder); - else // Page is taken from free space bucket. For in-memory mode partition must be changed. - return PageIdUtils.changePartitionId(pageId, row.partition()); + assert PageIdUtils.flag(pageId) == FLAG_DATA + : "rowVersions=" + row.ioVersions() + ", pageId=" + PageIdUtils.toDetailString(pageId); + + return PageIdUtils.changePartitionId(pageId, row.partition()); } /** @@ -911,6 +920,11 @@ public int emptyDataPages() { } } + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return initRecycledPage0(pageId, flag, initIO); + } + /** {@inheritDoc} */ @Override public long recycledPagesCount() throws IgniteCheckedException { assert reuseList == this : "not allowed to be a reuse list"; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java index a4a4363597391..fdf50c9a028c7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java @@ -41,6 +41,7 @@ public class CacheFreeList extends AbstractFreeList { * @param wal Wal. * @param metaPageId Meta page id. * @param initNew Initialize new. + * @param pageFlag Default flag value for allocated pages. */ public CacheFreeList( int cacheId, @@ -52,7 +53,8 @@ public CacheFreeList( boolean initNew, PageLockListener lockLsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { super( cacheId, @@ -65,7 +67,8 @@ public CacheFreeList( initNew, lockLsnr, ctx, - pageListCacheLimit + pageListCacheLimit, + pageFlag ); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java index 530690eac0aaf..bf6650066d578 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.managers.communication.GridIoPolicy; import org.apache.ignite.internal.metric.IoStatisticsHolder; import org.apache.ignite.internal.metric.IoStatisticsHolderNoOp; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; @@ -43,6 +44,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListRemovePageRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListSetNextRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListSetPreviousRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.RecycleRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.RotatedIdPartRecord; import org.apache.ignite.internal.processors.cache.persistence.DataStructure; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListMetaIO; @@ -65,7 +67,12 @@ import static java.lang.Boolean.TRUE; import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION; import static org.apache.ignite.internal.pagemem.PageIdUtils.MAX_ITEMID_NUM; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_DATA; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_DATA_METASTORAGE; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_DATA_PART; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.T_META; import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.getPageId; import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver.DEFAULT_PAGE_IO_RESOLVER; @@ -201,6 +208,7 @@ private final class PutBucket extends PageHandler { * @param buckets Number of buckets. * @param wal Write ahead log manager. * @param metaPageId Metadata page ID. + * @param pageFlag Default flag value for allocated pages. */ protected PagesList( int cacheId, @@ -210,9 +218,10 @@ protected PagesList( IgniteWriteAheadLogManager wal, long metaPageId, PageLockListener lockLsnr, - GridKernalContext ctx + GridKernalContext ctx, + byte pageFlag ) { - super(cacheId, null, pageMem, wal, lockLsnr, DEFAULT_PAGE_IO_RESOLVER); + super(cacheId, null, pageMem, wal, lockLsnr, DEFAULT_PAGE_IO_RESOLVER, pageFlag); this.name = name; this.buckets = buckets; @@ -1305,6 +1314,8 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, ", pageId=" + pageId + ']'); } + assert !isReuseBucket(bucket) : "reuse bucket detected"; + return pageId; } @@ -1372,7 +1383,15 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, PageIdUtils.itemId(pageId) > 0 && PageIdUtils.itemId(pageId) <= MAX_ITEMID_NUM : "Incorrectly recycled pageId in reuse bucket: " + U.hexLong(pageId); - dataPageId = pageId; + if (isReuseBucket(bucket)) { + byte flag = getFlag(initIoVers); + + PageIO initIO = initIoVers == null ? null : initIoVers.latest(); + + dataPageId = initRecycledPage0(pageId, flag, initIO); + } + else + dataPageId = pageId; if (io.isEmpty(tailAddr)) { long prevId = io.getPreviousId(tailAddr); @@ -1410,12 +1429,11 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, decrementBucketSize(bucket); - if (initIoVers != null) { - int partId = PageIdUtils.partId(tailId); + byte flag = getFlag(initIoVers); - dataPageId = initReusedPage(tailId, tailPage, tailAddr, partId, FLAG_DATA, initIoVers.latest()); - } else - dataPageId = recyclePage(tailId, tailPage, tailAddr, null); + PageIO pageIO = initIoVers != null ? initIoVers.latest() : null; + + dataPageId = initReusedPage(tailId, tailPage, tailAddr, PageIdUtils.partId(tailId), flag, pageIO); dirty = true; } @@ -1450,7 +1468,56 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, } /** - * Reused page must obtain correctly assembled page id, then initialized by proper {@link PageIO} instance and + * @param initIoVers Optional IO versions list that will be used later to init the page. + * @return {@link PageIdAllocator#FLAG_DATA} for cache group metas and data pages, + * {@link #pageFlag} otherwise. + */ + private byte getFlag(IOVersions initIoVers) { + if (initIoVers != null) { + PageIO pageIO = initIoVers.latest(); + + switch (pageIO.getType()) { + case T_META: + case T_DATA: + case T_DATA_PART: + case T_DATA_METASTORAGE: + return FLAG_DATA; + } + } + + return pageFlag; + } + + /** + * Create new page id and update page content accordingly if it's necessary. + * + * @param pageId Id of the recycled page from reuse bucket. + * @param flag New flag for the page. + * @return New page id. + * @throws IgniteCheckedException If failed. + * + * @see PagesList#initReusedPage(long, long, long, int, byte, PageIO) + */ + protected long initRecycledPage0(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + long page = pageMem.acquirePage(grpId, pageId); + + try { + long pageAddr = pageMem.writeLock(grpId, pageId, page); + + try { + return initReusedPage(pageId, page, pageAddr, PageIdUtils.partId(pageId), flag, initIO); + } + finally { + pageMem.writeUnlock(grpId, pageId, page, null, true); + } + } + finally { + pageMem.releasePage(grpId, pageId, page); + } + } + + /** + * Reused page must obtain correctly assaembled page id, then initialized by proper {@link PageIO} instance and * non-zero {@code itemId} of reused page id must be saved into special place. * * @param reusedPageId Reused page id. @@ -1464,30 +1531,47 @@ protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, */ protected final long initReusedPage(long reusedPageId, long reusedPage, long reusedPageAddr, int partId, byte flag, PageIO initIo) throws IgniteCheckedException { + if (flag == FLAG_IDX) + partId = INDEX_PARTITION; long newPageId = PageIdUtils.pageId(partId, flag, PageIdUtils.pageIndex(reusedPageId)); - initIo.initNewPage(reusedPageAddr, newPageId, pageSize()); - boolean needWalDeltaRecord = needWalDeltaRecord(reusedPageId, reusedPage, null); - if (needWalDeltaRecord) { - assert PageIdUtils.partId(reusedPageId) == PageIdUtils.partId(newPageId) : - "Partition consistency failure: " + - "newPageId=" + Long.toHexString(newPageId) + " (newPartId: " + PageIdUtils.partId(newPageId) + ") " + - "reusedPageId=" + Long.toHexString(reusedPageId) + " (partId: " + PageIdUtils.partId(reusedPageId) + ")"; + if (initIo != null) { + initIo.initNewPage(reusedPageAddr, newPageId, pageSize()); + + if (needWalDeltaRecord) { + assert PageIdUtils.partId(reusedPageId) == PageIdUtils.partId(newPageId) : + "Partition consistency failure: " + + "newPageId=" + Long.toHexString(newPageId) + " (newPartId: " + PageIdUtils.partId(newPageId) + ") " + + "reusedPageId=" + Long.toHexString(reusedPageId) + " (partId: " + PageIdUtils.partId(reusedPageId) + ")"; - wal.log(new InitNewPageRecord(grpId, reusedPageId, initIo.getType(), - initIo.getVersion(), newPageId)); + wal.log(new InitNewPageRecord(grpId, reusedPageId, initIo.getType(), + initIo.getVersion(), newPageId)); + } } int itemId = PageIdUtils.itemId(reusedPageId); if (itemId != 0) { - PageIO.setRotatedIdPart(reusedPageAddr, itemId); + if (flag == FLAG_DATA) { + PageIO.setRotatedIdPart(reusedPageAddr, itemId); + + if (needWalDeltaRecord) + wal.log(new RotatedIdPartRecord(grpId, newPageId, itemId)); + } + else + newPageId = PageIdUtils.link(newPageId, itemId); + } + + long storedPageId = getPageId(reusedPageAddr); + + if (storedPageId != newPageId) { + PageIO.setPageId(reusedPageAddr, newPageId); if (needWalDeltaRecord) - wal.log(new RotatedIdPartRecord(grpId, newPageId, itemId)); + wal.log(new RecycleRecord(grpId, storedPageId, newPageId)); } return newPageId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java index b88cad88f6a21..2c20a02648733 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java @@ -72,7 +72,7 @@ import org.apache.ignite.marshaller.jdk.JdkMarshaller; import org.jetbrains.annotations.NotNull; -import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; import static org.apache.ignite.internal.pagemem.PageIdAllocator.OLD_METASTORE_PARTITION; /** @@ -266,10 +266,11 @@ else if (!readOnly || getOrAllocateMetas(partId = PageIdAllocator.OLD_METASTORE_ reuseListRoot.isAllocated(), diagnosticMgr.pageLockTracker().createPageLockTracker(freeListName), cctx.kernalContext(), - null + null, + FLAG_AUX ) { @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - return pageMem.allocatePage(grpId, partId, FLAG_DATA); + return pageMem.allocatePage(grpId, partId, FLAG_AUX); } }; @@ -487,11 +488,13 @@ public void removeData(String key) throws IgniteCheckedException { /** */ private void checkRootsPageIdFlag(long treeRoot, long reuseListRoot) throws StorageException { - if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_AUX && + PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong tree root page id flag: treeRoot=" + U.hexLong(treeRoot) + ", METASTORAGE_CACHE_ID=" + METASTORAGE_CACHE_ID); - if (PageIdUtils.flag(reuseListRoot) != PageMemory.FLAG_DATA) + if (PageIdUtils.flag(treeRoot) != PageMemory.FLAG_AUX && + PageIdUtils.flag(treeRoot) != PageMemory.FLAG_DATA) throw new StorageException("Wrong reuse list root page id flag: reuseListRoot=" + U.hexLong(reuseListRoot) + ", METASTORAGE_CACHE_ID=" + METASTORAGE_CACHE_ID); } @@ -550,11 +553,11 @@ private boolean getOrAllocateMetas(int partId) throws IgniteCheckedException { //MetaStorage never encrypted so realPageSize == pageSize. io.initNewPage(pageAddr, partMetaId, pageMem.pageSize()); - treeRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_DATA); - reuseListRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_DATA); + treeRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_AUX); + reuseListRoot = pageMem.allocatePage(METASTORAGE_CACHE_ID, partId, PageMemory.FLAG_AUX); - assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_DATA; - assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_DATA; + assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_AUX; + assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_AUX; io.setTreeRoot(pageAddr, treeRoot); io.setReuseListRoot(pageAddr, reuseListRoot); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java index afce4e21507a8..7b1bb6e88b676 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java @@ -28,7 +28,7 @@ import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.jetbrains.annotations.Nullable; -import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; /** * @@ -79,6 +79,7 @@ public MetastorageTree( reuseList, MetastorageBPlusIO.INNER_IO_VERSIONS, MetastorageBPlusIO.LEAF_IO_VERSIONS, + FLAG_AUX, failureProcessor, lockLsnr ); @@ -115,6 +116,6 @@ public MetastorageRowStore rowStore() { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { - return pageMem.allocatePage(grpId, partId, FLAG_DATA); + return pageMem.allocatePage(grpId, partId, FLAG_AUX); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java index 4499980a38461..6d06673a34067 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/migration/UpgradePendingTreeToPerPartitionTask.java @@ -22,6 +22,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.CacheGroupContext; @@ -150,7 +151,8 @@ private void processCacheGroup(CacheGroupContext grp) throws IgniteCheckedExcept pendingRootPage.pageId().pageId(), ((GridCacheOffheapManager)grp.offheap()).reuseListForIndex(null), false, - null + null, + PageIdAllocator.FLAG_IDX ); } finally { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index 4872c6d8eeca9..c6d4d87f4c795 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -514,7 +514,7 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) /** {@inheritDoc} */ @Override public long allocatePage(int grpId, int partId, byte flags) throws IgniteCheckedException { - assert flags == PageIdAllocator.FLAG_DATA && partId <= PageIdAllocator.MAX_PARTITION_ID || + assert flags != PageIdAllocator.FLAG_IDX && partId <= PageIdAllocator.MAX_PARTITION_ID || flags == PageIdAllocator.FLAG_IDX && partId == PageIdAllocator.INDEX_PARTITION : "flags = " + flags + ", partId = " + partId; @@ -536,12 +536,15 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) DelayedDirtyPageStoreWrite delayedWriter = delayedPageReplacementTracker != null ? delayedPageReplacementTracker.delayedPageWrite() : null; - FullPageId fullId = new FullPageId(pageId, grpId); - seg.writeLock().lock(); - boolean isTrackingPage = - changeTracker != null && trackingIO.trackingPageFor(pageId, realPageSize(grpId)) == pageId; + boolean isTrackingPage = changeTracker != null && + PageIdUtils.pageIndex(trackingIO.trackingPageFor(pageId, realPageSize(grpId))) == PageIdUtils.pageIndex(pageId); + + if (isTrackingPage && PageIdUtils.flag(pageId) == PageIdAllocator.FLAG_AUX) + pageId = PageIdUtils.pageId(PageIdUtils.partId(pageId), PageIdAllocator.FLAG_DATA, PageIdUtils.pageIndex(pageId)); + + FullPageId fullId = new FullPageId(pageId, grpId); try { long relPtr = seg.loadedPages.get( diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java index c236827634966..275fb551d6e02 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstate/GroupPartitionId.java @@ -20,7 +20,7 @@ import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; -import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.util.typedef.internal.S; import org.jetbrains.annotations.NotNull; @@ -53,7 +53,15 @@ public GroupPartitionId(final int grpId, final int partId) { * @return flag to be used for partition */ public static byte getFlagByPartId(final int partId) { - return partId == PageIdAllocator.INDEX_PARTITION ? PageMemory.FLAG_IDX : PageMemory.FLAG_DATA; + return partId == PageIdAllocator.INDEX_PARTITION ? PageIdAllocator.FLAG_IDX : PageIdAllocator.FLAG_DATA; + } + + /** + * @param partId Partition ID. + * @return page store type to be used for partition + */ + public static byte getTypeByPartId(final int partId) { + return partId == PageIdAllocator.INDEX_PARTITION ? PageStore.TYPE_IDX : PageStore.TYPE_DATA; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java index 0e9062a09b895..acf83342212b4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/partstorage/PartitionMetaStorageImpl.java @@ -49,6 +49,7 @@ public class PartitionMetaStorageImpl extends AbstractFreeLi * @param wal Wal. * @param metaPageId Meta page id. * @param initNew Initialize new. + * @param pageFlag Default flag value for allocated pages. */ public PartitionMetaStorageImpl( int cacheId, String name, @@ -60,9 +61,10 @@ public PartitionMetaStorageImpl( boolean initNew, PageLockListener lsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { - super(cacheId, name, memMetrics, memPlc, reuseList, wal, metaPageId, initNew, lsnr, ctx, pageListCacheLimit); + super(cacheId, name, memMetrics, memPlc, reuseList, wal, metaPageId, initNew, lsnr, ctx, pageListCacheLimit, pageFlag); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java index 681485de1f54e..1d9f385fec536 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteSnapshotManager.java @@ -132,7 +132,7 @@ import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_TEMPLATE; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.getPartitionFile; import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.DB_DEFAULT_FOLDER; -import static org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId.getFlagByPartId; +import static org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId.getTypeByPartId; import static org.apache.ignite.internal.util.IgniteUtils.isLocalNodeCoordinator; import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.END_SNAPSHOT; import static org.apache.ignite.internal.util.distributed.DistributedProcess.DistributedProcessType.START_SNAPSHOT; @@ -1206,7 +1206,7 @@ public LocalSnapshotSender(String snpName) { try (FileIO fileIo = ioFactory.create(delta, READ); FilePageStore pageStore = (FilePageStore)storeFactory .apply(pair.getGroupId(), false) - .createPageStore(getFlagByPartId(pair.getPartitionId()), + .createPageStore(getTypeByPartId(pair.getPartitionId()), snpPart::toPath, val -> {}) ) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index 5e67b7c591423..ca92a71c0800b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -754,6 +754,7 @@ private class InitRoot extends PageHandler { * @param reuseList Reuse list. * @param innerIos Inner IO versions. * @param leafIos Leaf IO versions. + * @param pageFlag Default flag value for allocated pages. * @param failureProcessor if the tree is corrupted. * @throws IgniteCheckedException If failed. */ @@ -768,6 +769,7 @@ protected BPlusTree( ReuseList reuseList, IOVersions> innerIos, IOVersions> leafIos, + byte pageFlag, @Nullable FailureProcessor failureProcessor, @Nullable PageLockListener lockLsnr ) throws IgniteCheckedException { @@ -780,6 +782,7 @@ protected BPlusTree( globalRmvId, metaPageId, reuseList, + pageFlag, failureProcessor, lockLsnr, DEFAULT_PAGE_IO_RESOLVER @@ -797,6 +800,7 @@ protected BPlusTree( * @param globalRmvId Remove ID. * @param metaPageId Meta page ID. * @param reuseList Reuse list. + * @param pageFlag Default flag value for allocated pages. * @param failureProcessor if the tree is corrupted. * @throws IgniteCheckedException If failed. */ @@ -809,11 +813,12 @@ protected BPlusTree( AtomicLong globalRmvId, long metaPageId, ReuseList reuseList, + byte pageFlag, @Nullable FailureProcessor failureProcessor, @Nullable PageLockListener lsnr, PageIoResolver pageIoRslvr - ) { - super(cacheGrpId, grpName, pageMem, wal, lsnr, pageIoRslvr); + ) throws IgniteCheckedException { + super(cacheGrpId, grpName, pageMem, wal, lsnr, pageIoRslvr, pageFlag); assert !F.isEmpty(name); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java index b2f52a5860a2b..5fa1cddd7b752 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/TrackingPageIO.java @@ -19,6 +19,7 @@ import java.nio.ByteBuffer; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.processors.cache.persistence.snapshot.TrackingPageIsCorruptedException; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; @@ -356,7 +357,11 @@ public long trackingPageFor(long pageId, int pageSize) { int pageIdx = ((PageIdUtils.pageIndex(pageId) - COUNT_OF_EXTRA_PAGE) / countOfPageToTrack(pageSize)) * countOfPageToTrack(pageSize) + COUNT_OF_EXTRA_PAGE; - long trackingPageId = PageIdUtils.pageId(PageIdUtils.partId(pageId), PageIdUtils.flag(pageId), pageIdx); + byte flag = PageIdUtils.partId(pageId) == PageIdAllocator.INDEX_PARTITION ? + PageIdAllocator.FLAG_IDX : + PageIdAllocator.FLAG_DATA; + + long trackingPageId = PageIdUtils.pageId(PageIdUtils.partId(pageId), flag, pageIdx); assert PageIdUtils.pageIndex(trackingPageId) <= PageIdUtils.pageIndex(pageId); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java index aaab186618bc2..d2a1ba057b614 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseList.java @@ -18,6 +18,10 @@ package org.apache.ignite.internal.processors.cache.persistence.tree.reuse; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.jetbrains.annotations.Nullable; /** * Reuse list. @@ -40,4 +44,18 @@ public interface ReuseList { * @throws IgniteCheckedException If failed. */ public long recycledPagesCount() throws IgniteCheckedException; + + /** + * Converts recycled page id back to a usable id. Might modify page content as well if flag is changing. + * + * @param pageId Id of the recycled page. + * @param flag Flag value for the page. One of {@link PageIdAllocator#FLAG_DATA}, {@link PageIdAllocator#FLAG_IDX} + * or {@link PageIdAllocator#FLAG_AUX}. + * @param initIO Page IO to reinit reused page. + * @return Updated page id. + * @throws IgniteCheckedException If failed. + * + * @see FullPageId + */ + long initRecycledPage(long pageId, byte flag, @Nullable PageIO initIO) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java index cf3897f470336..5d2789be305c3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/reuse/ReuseListImpl.java @@ -25,6 +25,7 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.freelist.PagesList; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; /** @@ -48,6 +49,7 @@ public class ReuseListImpl extends PagesList implements ReuseList { * @param wal Write ahead log manager. * @param metaPageId Metadata page ID. * @param initNew {@code True} if new metadata should be initialized. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public ReuseListImpl( @@ -59,7 +61,8 @@ public ReuseListImpl( boolean initNew, PageLockListener lockLsnr, GridKernalContext ctx, - AtomicLong pageListCacheLimit + AtomicLong pageListCacheLimit, + byte pageFlag ) throws IgniteCheckedException { super( cacheId, @@ -69,7 +72,8 @@ public ReuseListImpl( wal, metaPageId, lockLsnr, - ctx + ctx, + pageFlag ); bucketCache = new PagesCache(pageListCacheLimit); @@ -96,6 +100,11 @@ public ReuseListImpl( return takeEmptyPage(0, null, IoStatisticsHolderNoOp.INSTANCE); } + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return initRecycledPage0(pageId, flag, initIO); + } + /** {@inheritDoc} */ @Override public long recycledPagesCount() throws IgniteCheckedException { return storedPagesCount(0); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index e9d652c6b1be1..e9a88bab0eb63 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -84,6 +84,7 @@ public class CacheDataTree extends BPlusTree { * @param rowStore Row store. * @param metaPageId Meta page ID. * @param initNew Initialize new index. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public CacheDataTree( @@ -93,7 +94,8 @@ public CacheDataTree( CacheDataRowStore rowStore, long metaPageId, boolean initNew, - PageLockListener lockLsnr + PageLockListener lockLsnr, + byte pageFlag ) throws IgniteCheckedException { super( name, @@ -106,6 +108,7 @@ public CacheDataTree( reuseList, innerIO(grp), leafIO(grp), + pageFlag, grp.shared().kernalContext().failure(), lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java index 9cfb2c6c5032d..6070aca145b4e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java @@ -43,6 +43,7 @@ public class PendingEntriesTree extends BPlusTree { * @param metaPageId Meta page ID. * @param reuseList Reuse list. * @param initNew Initialize new index. + * @param pageFlag Default flag value for allocated pages. * @throws IgniteCheckedException If failed. */ public PendingEntriesTree( @@ -52,7 +53,8 @@ public PendingEntriesTree( long metaPageId, ReuseList reuseList, boolean initNew, - PageLockListener lockLsnr + PageLockListener lockLsnr, + byte pageFlag ) throws IgniteCheckedException { super( name, @@ -65,6 +67,7 @@ public PendingEntriesTree( reuseList, grp.sharedGroup() ? CacheIdAwarePendingEntryInnerIO.VERSIONS : PendingEntryInnerIO.VERSIONS, grp.sharedGroup() ? CacheIdAwarePendingEntryLeafIO.VERSIONS : PendingEntryLeafIO.VERSIONS, + pageFlag, grp.shared().kernalContext().failure(), lockLsnr ); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java index b93c12273fc61..c2ba3b166e476 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/verify/IdleVerifyUtility.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.pagemem.PageIdAllocator; @@ -43,6 +42,10 @@ import org.apache.ignite.lang.IgniteInClosure; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX; + /** * Utility class for idle verify command. */ @@ -75,7 +78,8 @@ public static void checkPartitionsPageCrcSum( * @param pageStore Page store. * @param grpCtx Passed cache group context. * @param partId Partition id. - * @param pageType Page type. Possible types {@link PageIdAllocator#FLAG_DATA}, {@link PageIdAllocator#FLAG_IDX}. + * @param pageType Page type. Possible types {@link PageIdAllocator#FLAG_DATA}, {@link PageIdAllocator#FLAG_IDX} + * and {@link PageIdAllocator#FLAG_AUX}. * @throws IgniteCheckedException If reading page failed. * @throws GridNotIdleException If cluster not idle. */ @@ -83,11 +87,11 @@ public static void checkPartitionsPageCrcSum( FilePageStore pageStore, CacheGroupContext grpCtx, int partId, - byte pageType + @Deprecated byte pageType ) throws IgniteCheckedException, GridNotIdleException { - assert pageType == PageIdAllocator.FLAG_DATA || pageType == PageIdAllocator.FLAG_IDX : pageType; + assert pageType == FLAG_DATA || pageType == FLAG_IDX || pageType == FLAG_AUX : pageType; - long pageId = PageIdUtils.pageId(partId, pageType, 0); + long pageId = PageIdUtils.pageId(partId, (byte)0, 0); ByteBuffer buf = ByteBuffer.allocateDirect(grpCtx.dataRegion().pageMemory().pageSize()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java index 9f8a32408c9cf..38a743a4b8a28 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/metric/IoStatisticsMetricsLocalMXBeanImplSelfTest.java @@ -123,7 +123,7 @@ public void testCacheBasic() throws Exception { long cacheLogicalReadsCnt = mreg.findMetric(LOGICAL_READS).value(); - assertEquals(cnt, cacheLogicalReadsCnt); + assertEquals(cnt - 1, cacheLogicalReadsCnt); // 1 is for reuse bucket stripe. long cachePhysicalReadsCnt = mreg.findMetric(PHYSICAL_READS).value(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java index 825c8678aa825..087b20cd0d957 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageIdUtilsSelfTest.java @@ -18,7 +18,7 @@ package org.apache.ignite.internal.pagemem.impl; import java.util.Random; - +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -99,13 +99,15 @@ public void testOffsetExtraction() throws Exception { @Test public void testPageIdFromLink() throws Exception { assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x00FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x10FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x01FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x11FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x80FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x88FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0x08FFFFFFFFFFFFFFL)); - assertEquals(0x00FFFFFFFFFFFFFFL, PageIdUtils.pageId(0xFFFFFFFFFFFFFFFFL)); + + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x0001FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x1001FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x0101FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x1101FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x8001FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x8801FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0x0801FFFFFFFFFFFFL)); + assertEquals(0x0001FFFFFFFFFFFFL, PageIdUtils.pageId(0xFF01FFFFFFFFFFFFL)); assertEquals(0x0002FFFFFFFFFFFFL, PageIdUtils.pageId(0x0002FFFFFFFFFFFFL)); assertEquals(0x1002FFFFFFFFFFFFL, PageIdUtils.pageId(0x1002FFFFFFFFFFFFL)); @@ -116,12 +118,21 @@ public void testPageIdFromLink() throws Exception { assertEquals(0x0802FFFFFFFFFFFFL, PageIdUtils.pageId(0x0802FFFFFFFFFFFFL)); assertEquals(0xFF02FFFFFFFFFFFFL, PageIdUtils.pageId(0xFF02FFFFFFFFFFFFL)); - assertEquals(0L, PageIdUtils.pageId(0x0000000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x1000000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x0100000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x8000000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0x0800000000000000L)); - assertEquals(0L, PageIdUtils.pageId(0xFF00000000000000L)); + assertEquals(0x0004FFFFFFFFFFFFL, PageIdUtils.pageId(0x0004FFFFFFFFFFFFL)); + assertEquals(0x1004FFFFFFFFFFFFL, PageIdUtils.pageId(0x1004FFFFFFFFFFFFL)); + assertEquals(0x0104FFFFFFFFFFFFL, PageIdUtils.pageId(0x0104FFFFFFFFFFFFL)); + assertEquals(0x1104FFFFFFFFFFFFL, PageIdUtils.pageId(0x1104FFFFFFFFFFFFL)); + assertEquals(0x8004FFFFFFFFFFFFL, PageIdUtils.pageId(0x8004FFFFFFFFFFFFL)); + assertEquals(0x8804FFFFFFFFFFFFL, PageIdUtils.pageId(0x8804FFFFFFFFFFFFL)); + assertEquals(0x0804FFFFFFFFFFFFL, PageIdUtils.pageId(0x0804FFFFFFFFFFFFL)); + assertEquals(0xFF04FFFFFFFFFFFFL, PageIdUtils.pageId(0xFF04FFFFFFFFFFFFL)); + + assertEquals(0x0000000000000000L, PageIdUtils.pageId(0x0000000000000000L)); + assertEquals(0x1000000000000000L, PageIdUtils.pageId(0x1000000000000000L)); + assertEquals(0x0100000000000000L, PageIdUtils.pageId(0x0100000000000000L)); + assertEquals(0x8000000000000000L, PageIdUtils.pageId(0x8000000000000000L)); + assertEquals(0x0800000000000000L, PageIdUtils.pageId(0x0800000000000000L)); + assertEquals(0xFF00000000000000L, PageIdUtils.pageId(0xFF00000000000000L)); } /** @@ -136,7 +147,7 @@ public void testRandomIds() throws Exception { int partId = rnd.nextInt(PageIdUtils.MAX_PART_ID + 1); int pageNum = rnd.nextInt(); - long pageId = PageIdUtils.pageId(partId, (byte) 0, pageNum); + long pageId = PageIdUtils.pageId(partId, PageIdAllocator.FLAG_DATA, pageNum); String msg = "For values [offset=" + U.hexLong(off) + ", fileId=" + U.hexLong(partId) + ", pageNum=" + U.hexLong(pageNum) + ']'; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/PendingTreeCorruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/PendingTreeCorruptionTest.java new file mode 100644 index 0000000000000..7a748e3346b4e --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/PendingTreeCorruptionTest.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.util.concurrent.TimeUnit; +import javax.cache.expiry.AccessedExpiryPolicy; +import javax.cache.expiry.Duration; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManagerImpl; +import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; +import org.apache.ignite.internal.processors.cache.tree.PendingRow; +import org.apache.ignite.internal.util.lang.GridCursor; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static java.util.concurrent.TimeUnit.MINUTES; + +/** */ +public class PendingTreeCorruptionTest extends GridCommonAbstractTest { + /** */ + @Before + public void before() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** */ + @After + public void after() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + cfg.setDataStorageConfiguration(new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setPersistenceEnabled(true) + ) + .setWalSegments(3) + .setWalSegmentSize(512 * 1024) + ); + + return cfg; + } + + /** */ + @Test + public void testCorruptionWhileLoadingData() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + String expireCacheName = "cacheWithExpire"; + String regularCacheName = "cacheWithoutExpire"; + String grpName = "cacheGroup"; + + IgniteCache expireCache = ig.getOrCreateCache( + new CacheConfiguration<>(expireCacheName) + .setExpiryPolicyFactory(AccessedExpiryPolicy.factoryOf(new Duration(MINUTES, 10))) + .setGroupName(grpName) + ); + + IgniteCache regularCache = ig.getOrCreateCache( + new CacheConfiguration<>(regularCacheName) + .setGroupName(grpName) + ); + + // This will initialize partition and cache structures. + expireCache.put(0, 0); + expireCache.remove(0); + + int expireCacheId = CU.cacheGroupId(expireCacheName, grpName); + + CacheGroupContext grp = ig.context().cache().cacheGroup(CU.cacheId(grpName)); + IgniteCacheOffheapManager.CacheDataStore store = ((IgniteCacheOffheapManagerImpl)grp.offheap()).dataStore(0); + + // Get pending tree of expire cache. + PendingEntriesTree pendingTree = store.pendingTree(); + + long year = TimeUnit.DAYS.toMillis(365); + long expiration = System.currentTimeMillis() + year; + + ig.context().cache().context().database().checkpointReadLock(); + + try { + // Carefully calculated number. Just enough for the first split to happen, but not more. + for (int i = 0; i < 202; i++) + pendingTree.putx(new PendingRow(expireCacheId, expiration, expiration + i)); // link != 0 + + // Open cursor, it'll cache first leaf of the tree. + GridCursor cur = pendingTree.find( + null, + new PendingRow(expireCacheId, expiration + year, 0), + PendingEntriesTree.WITHOUT_KEY + ); + + // Required for "do" loop to work. + assertTrue(cur.next()); + + int cnt = 0; + + // Emulate real expiry loop but with a more precise control. + do { + PendingRow row = cur.get(); + + pendingTree.removex(row); + + // Another carefully calculated moment. Here the page cache is exhausted AND the real page is merged + // with its sibling, meaning that cached "nextPageId" points to empty page from reuse list. + if (row.link - row.expireTime == 100) { + // Put into another cache will take a page from reuse list first. This means that cached + // "nextPageId" points to a data page. + regularCache.put(0, 0); + } + + cnt++; + } + while (cur.next()); + + assertEquals(202, cnt); + } + finally { + ig.context().cache().context().database().checkpointReadUnlock(); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java index 660c7ffb365ba..ca808ad3b04ed 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/CheckpointBufferDeadlockTest.java @@ -51,6 +51,7 @@ import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.ListeningTestLogger; @@ -236,6 +237,17 @@ private void runDeadlockScenario() throws Exception { long pageId = PageIdUtils.pageId(0, PageIdAllocator.FLAG_DATA, pageIdx); + long page = pageMem.acquirePage(CU.cacheId(cacheName), pageId); + + try { + // We do not know correct flag(FLAG_DATA or FLAG_AUX). Skip page if no luck. + if (pageId != PageIO.getPageId(page + PageMemoryImpl.PAGE_OVERHEAD)) + continue; + } + finally { + pageMem.releasePage(CU.cacheId(cacheName), pageId, page); + } + pickedPagesSet.add(new FullPageId(pageId, CU.cacheId(cacheName))); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java index cab54a45319ba..db00de358a6e8 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeFakeReuseSelfTest.java @@ -20,6 +20,7 @@ import java.util.concurrent.ConcurrentLinkedDeque; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; @@ -59,5 +60,10 @@ private static class FakeReuseList implements ReuseList { @Override public long recycledPagesCount() throws IgniteCheckedException { return deque.size(); } + + /** {@inheritDoc} */ + @Override public long initRecycledPage(long pageId, byte flag, PageIO initIO) throws IgniteCheckedException { + return pageId; + } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java index 8d50fadac472d..94b3db652e91a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeReuseSelfTest.java @@ -21,6 +21,7 @@ import java.util.Set; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; @@ -83,7 +84,7 @@ public TestReuseList( boolean initNew, GridKernalContext ctx ) throws IgniteCheckedException { - super(cacheId, name, pageMem, wal, metaPageId, initNew, new TestPageLockListener(), ctx, null); + super(cacheId, name, pageMem, wal, metaPageId, initNew, new TestPageLockListener(), ctx, null, PageIdAllocator.FLAG_IDX); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java index 82f11ade5fc6c..9465dc7ff91c5 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java @@ -2773,6 +2773,7 @@ public TestTree( reuseList, new IOVersions<>(new LongInnerIO(canGetRow)), new IOVersions<>(new LongLeafIO()), + PageIdAllocator.FLAG_IDX, new FailureProcessor(new GridTestKernalContext(log)) { @Override public boolean process(FailureContext failureCtx) { lockTrackerManager.dumpLocksToLog(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java index a6ce732d5b325..1c0f33dde647c 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListSelfTest.java @@ -530,7 +530,8 @@ protected FreeList createFreeList(int pageSize) throws Exception { true, null, new GridTestKernalContext(log), - null + null, + PageIdAllocator.FLAG_IDX ); } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java index dfca7461eabd6..be885e00704e3 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java @@ -40,6 +40,7 @@ import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRestartAfterFailedToWriteMetaPageTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsSpuriousRebalancingOnNodeJoinTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsTaskCancelingTest; +import org.apache.ignite.internal.processors.cache.persistence.PendingTreeCorruptionTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsCacheWalDisabledOnRebalancingTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPageReplacementDuringPartitionClearTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPartitionPreloadTest; @@ -121,6 +122,8 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, WarmUpSelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, LoadAllWarmUpStrategySelfTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, PendingTreeCorruptionTest.class, ignoredTests); + return suite; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 7a31059a1d82f..0ded892fce376 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -60,6 +60,7 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.mxbean.SqlQueryMXBean; import org.apache.ignite.internal.mxbean.SqlQueryMXBeanImpl; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -2447,6 +2448,7 @@ private JavaObjectSerializer h2Serializer() { reuseList, H2ExtrasInnerIO.getVersions(inlineSize, mvccEnabled), H2ExtrasLeafIO.getVersions(inlineSize, mvccEnabled), + PageIdAllocator.FLAG_IDX, ctx.failure(), lockLsnr ) { diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java index 9c6ca13236d0b..99a0add610c4c 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java @@ -30,6 +30,7 @@ import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.metric.IoStatisticsHolder; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; @@ -221,6 +222,7 @@ public H2Tree( globalRmvId, metaPageId, reuseList, + PageIdAllocator.FLAG_IDX, failureProcessor, null, pageIoRslvr From 1b5d837c9fb5ec4320aca3a7e15ea7b56e34fdfa Mon Sep 17 00:00:00 2001 From: Ivan Daschinskiy Date: Tue, 17 Nov 2020 18:18:52 +0300 Subject: [PATCH 049/110] IGNITE-13699 Register ZookeeperDiscovery metrics to new metrics framework (#8458) --- .../discovery/GridDiscoveryManager.java | 4 ++ .../spi/discovery/tcp/TcpDiscoverySpi.java | 5 +-- .../tcp/TcpDiscoverySpiMBeanTest.java | 2 +- .../discovery/zk/ZookeeperDiscoverySpi.java | 14 +++++++ .../discovery/zk/internal/ZkRuntimeState.java | 2 +- .../zk/internal/ZookeeperDiscoveryImpl.java | 32 ++++++++++----- .../ZookeeperDiscoveryStatistics.java | 39 ++++++++++++++----- .../internal/ZookeeperDiscoveryMiscTest.java | 29 +++++++++++++- 8 files changed, 101 insertions(+), 26 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 0a8c8572fcdd6..65a331fcb0e04 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -169,6 +169,7 @@ import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_USER_NAME; import static org.apache.ignite.internal.IgniteVersionUtils.VER; import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; +import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName; import static org.apache.ignite.internal.processors.security.SecurityUtils.isSecurityCompatibilityMode; import static org.apache.ignite.plugin.segmentation.SegmentationPolicy.NOOP; @@ -188,6 +189,9 @@ public class GridDiscoveryManager extends GridManagerAdapter { /** @see IgniteSystemProperties#IGNITE_DISCOVERY_HISTORY_SIZE */ public static final int DFLT_DISCOVERY_HISTORY_SIZE = 500; + /** Name of the discovery metrics registry. */ + public static final String DISCO_METRICS = metricName("io", "discovery"); + /** Predicate filtering out daemon nodes. */ private static final IgnitePredicate FILTER_NOT_DAEMON = new P1() { @Override public boolean apply(ClusterNode n) { diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index c18715ece79ce..8a2b4c6790444 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -120,7 +120,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; -import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; /** * Discovery SPI implementation that uses TCP/IP for node discovery. @@ -307,9 +307,6 @@ public class TcpDiscoverySpi extends IgniteSpiAdapter implements IgniteDiscovery /** @see IgniteSystemProperties#IGNITE_DISCOVERY_METRICS_QNT_WARN */ public static final int DFLT_DISCOVERY_METRICS_QNT_WARN = 500; - /** Name of the discovery metrics registry. */ - public static final String DISCO_METRICS = metricName("io", "discovery"); - /** Ssl message pattern for StreamCorruptedException. */ private static Pattern sslMsgPattern = Pattern.compile("invalid stream header: 150\\d0\\d00"); diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java index dd935a96b7f35..be2f147527b05 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiMBeanTest.java @@ -39,7 +39,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.ignite.events.EventType.EVT_NODE_SEGMENTED; -import static org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi.DISCO_METRICS; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; /** diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java index 3de8df6435b35..ec2dc805f6aa7 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpi.java @@ -31,6 +31,7 @@ import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpi; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpiInternalListener; +import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.A; @@ -41,6 +42,7 @@ import org.apache.ignite.resources.LoggerResource; import org.apache.ignite.spi.IgniteSpiAdapter; import org.apache.ignite.spi.IgniteSpiConfiguration; +import org.apache.ignite.spi.IgniteSpiContext; import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.IgniteSpiMBeanAdapter; import org.apache.ignite.spi.IgniteSpiMultipleInstancesSupport; @@ -62,6 +64,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT; import static org.apache.ignite.IgniteSystemProperties.getBoolean; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; /** * Zookeeper Discovery Spi. @@ -489,6 +492,17 @@ public DiscoverySpiNodeAuthenticator getAuthenticator() { } } + /** {@inheritDoc} */ + @Override protected void onContextInitialized0(IgniteSpiContext spiCtx) throws IgniteSpiException { + super.onContextInitialized0(spiCtx); + + MetricRegistry discoReg = (MetricRegistry)getSpiContext().getOrCreateMetricRegistry(DISCO_METRICS); + + stats.registerMetrics(discoReg); + + discoReg.register("Coordinator", () -> impl.getCoordinator(), UUID.class, "Coordinator ID"); + } + /** {@inheritDoc} */ @Override public void setInternalListener(IgniteDiscoverySpiInternalListener lsnr) { if (impl != null) diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java index 0aac6db91e336..547c185642d3b 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkRuntimeState.java @@ -39,7 +39,7 @@ class ZkRuntimeState { final boolean reconnect; /** */ - ZookeeperClient zkClient; + volatile ZookeeperClient zkClient; /** */ long internalOrder; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java index d9d56aef1aa69..bdd37d1a4c62c 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java @@ -192,7 +192,7 @@ public class ZookeeperDiscoveryImpl { private IgniteThreadPoolExecutor utilityPool; /** */ - private ZkRuntimeState rtState; + private volatile ZkRuntimeState rtState; /** */ private volatile ConnectionState connState = ConnectionState.STARTED; @@ -297,7 +297,9 @@ public ClusterNode localNode() { @Nullable public ZookeeperClusterNode node(UUID nodeId) { assert nodeId != null; - return rtState.top.nodesById.get(nodeId); + ZkRuntimeState rtState0 = rtState; + + return rtState0 != null ? rtState0.top.nodesById.get(nodeId) : null; } /** @@ -307,7 +309,9 @@ public ClusterNode localNode() { @Nullable public ZookeeperClusterNode node(long nodeOrder) { assert nodeOrder > 0 : nodeOrder; - return rtState.top.nodesByOrder.get(nodeOrder); + ZkRuntimeState rtState0 = rtState; + + return rtState0 != null ? rtState.top.nodesByOrder.get(nodeOrder) : null; } /** @@ -2298,8 +2302,6 @@ private void addJoinedNode( joinCtx.addJoinedNode(nodeEvtData, commonData); rtState.evtsData.onNodeJoin(joinedNode); - - stats.onNodeJoined(); } /** @@ -2817,9 +2819,12 @@ private void processNewEvents(final ZkDiscoveryEventsData evtsData) throws Excep throw e; } - if (rtState.joined) + if (rtState.joined) { rtState.evtsData = evtsData; + stats.onTopologyChanged(rtState.evtsData.topVer); + } + if (rtState.crd) handleProcessedEvents("procEvt"); else @@ -3552,6 +3557,8 @@ private void notifyNodeJoin(ZkJoinedNodeEvtData joinedEvtData, ZkJoiningNodeData null ) ).get(); + + stats.onNodeJoined(); } /** @@ -4630,7 +4637,12 @@ enum ConnectionState { /** */ public UUID getCoordinator() { - return rtState.top.nodesByOrder.values().stream() + ZkRuntimeState rtState0 = rtState; + + if (rtState0 == null) + return null; + + return rtState0.top.nodesByOrder.values().stream() .filter(n -> !n.isClient() && !n.isDaemon()) .map(ZookeeperClusterNode::id) .findFirst() @@ -4639,12 +4651,14 @@ public UUID getCoordinator() { /** */ public String getSpiState() { - return rtState.zkClient.state(); + return connState.toString(); } /** */ public String getZkSessionId() { - if (rtState.zkClient != null && rtState.zkClient.zk() != null) + ZkRuntimeState rtState0 = rtState; + + if (rtState0 != null && rtState0.zkClient != null) return Long.toHexString(rtState.zkClient.zk().getSessionId()); else return null; diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java index 21b62c49d43cb..faf00d60434da 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryStatistics.java @@ -16,7 +16,9 @@ */ package org.apache.ignite.spi.discovery.zk.internal; -import java.util.concurrent.atomic.LongAdder; +import org.apache.ignite.internal.processors.metric.MetricRegistry; +import org.apache.ignite.internal.processors.metric.impl.AtomicLongMetric; +import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; import org.apache.ignite.internal.util.typedef.internal.S; /** @@ -24,35 +26,49 @@ */ public class ZookeeperDiscoveryStatistics { /** */ - private final LongAdder joinedNodesCnt = new LongAdder(); + private final LongAdderMetric joinedNodesCnt = new LongAdderMetric("JoinedNodes", "Joined nodes count"); /** */ - private final LongAdder failedNodesCnt = new LongAdder(); + private final LongAdderMetric failedNodesCnt = new LongAdderMetric("FailedNodes", "Failed nodes count"); /** */ - private final LongAdder leftNodesCnt = new LongAdder(); + private final LongAdderMetric leftNodesCnt = new LongAdderMetric("LeftNodes", "Left nodes count"); /** Communication error count. */ - private final LongAdder commErrCnt = new LongAdder(); + private final LongAdderMetric commErrCnt = new LongAdderMetric("CommunicationErrors", "Communication errors count"); + + /** Current topology version */ + private final AtomicLongMetric topVer = new AtomicLongMetric("CurrentTopologyVersion", "Current topology version"); + + /** + * @param discoReg Discovery metric registry. + */ + public void registerMetrics(MetricRegistry discoReg) { + discoReg.register(joinedNodesCnt); + discoReg.register(failedNodesCnt); + discoReg.register(leftNodesCnt); + discoReg.register(commErrCnt); + discoReg.register(topVer); + } /** */ public long joinedNodesCnt() { - return joinedNodesCnt.longValue(); + return joinedNodesCnt.value(); } /** */ public long failedNodesCnt() { - return failedNodesCnt.longValue(); + return failedNodesCnt.value(); } /** */ public long leftNodesCnt() { - return leftNodesCnt.longValue(); + return leftNodesCnt.value(); } /** */ public long commErrorCount() { - return commErrCnt.longValue(); + return commErrCnt.value(); } /** */ @@ -75,6 +91,11 @@ public void onCommunicationError() { commErrCnt.increment(); } + /** */ + public void onTopologyChanged(long topVer) { + this.topVer.value(topVer); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(ZookeeperDiscoveryStatistics.class, this); diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java index f271bad61d05f..57aae67fe445a 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryMiscTest.java @@ -33,6 +33,7 @@ import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.SecurityCredentialsAttrFilterPredicate; +import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.security.SecurityContext; import org.apache.ignite.internal.util.lang.gridfunc.PredicateMapView; import org.apache.ignite.internal.util.typedef.G; @@ -47,11 +48,14 @@ import org.apache.ignite.spi.discovery.DiscoverySpiNodeAuthenticator; import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi; import org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpiMBean; +import org.apache.ignite.spi.metric.LongMetric; +import org.apache.ignite.spi.metric.ObjectMetric; import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SECURITY_SUBJECT_V2; +import static org.apache.ignite.internal.managers.discovery.GridDiscoveryManager.DISCO_METRICS; /** * Tests for Zookeeper SPI discovery. @@ -217,23 +221,44 @@ public void testDefaultConsistentId() throws Exception { */ @Test public void testMbean() throws Exception { - startGrids(3); + int cnt = 3; + + startGrids(cnt); UUID crdNodeId = grid(0).localNode().id(); try { - for (int i = 0; i < 3; i++) { + for (int i = 0; i < cnt; i++) { IgniteEx grid = grid(i); ZookeeperDiscoverySpiMBean bean = getMxBean(grid.context().igniteInstanceName(), "SPIs", ZookeeperDiscoverySpi.class, ZookeeperDiscoverySpiMBean.class); + MetricRegistry discoReg = grid.context().metric().registry(DISCO_METRICS); + assertNotNull(bean); assertEquals(String.valueOf(grid.cluster().node(crdNodeId)), bean.getCoordinatorNodeFormatted()); assertEquals(String.valueOf(grid.cluster().localNode()), bean.getLocalNodeFormatted()); assertEquals(zkCluster.getConnectString(), bean.getZkConnectionString()); assertEquals((long)grid.configuration().getFailureDetectionTimeout(), bean.getZkSessionTimeout()); + + assertEquals(grid.cluster().topologyVersion(), + discoReg.findMetric("CurrentTopologyVersion").value()); + + assertEquals(grid.cluster().node(crdNodeId).id(), + discoReg.>findMetric("Coordinator").value()); + + assertEquals(cnt - i - 1, bean.getNodesJoined()); + assertEquals(cnt - i - 1, discoReg.findMetric("JoinedNodes").value()); + + Arrays.asList("LeftNodes", "FailedNodes", "CommunicationErrors").forEach(name -> { + assertEquals(0, discoReg.findMetric(name).value()); + }); + + assertEquals(0, bean.getNodesLeft()); + assertEquals(0, bean.getNodesFailed()); + assertEquals(0, bean.getCommErrorProcNum()); } } finally { From a0acbb4c327a983ed092551f0dd3ef20405a79f2 Mon Sep 17 00:00:00 2001 From: tledkov Date: Wed, 18 Nov 2020 12:11:54 +0300 Subject: [PATCH 050/110] IGNITE-13576 fix '--property help' command --- .../commandline/property/PropertyCommand.java | 4 ++-- .../subcommands/PropertyHelpCommand.java | 8 +++---- .../GridCommandHandlerPropertiesTest.java | 23 +++++++++++++++++++ ...mmandHandlerClusterByClassTest_help.output | 4 ++-- ...ndlerClusterByClassWithSSLTest_help.output | 4 ++-- 5 files changed, 33 insertions(+), 10 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java index 1f2c694e69b30..9a7a63b4d434b 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/PropertyCommand.java @@ -52,13 +52,13 @@ public class PropertyCommand extends AbstractCommand { LIST.toString() ); - usage(log, "Get the property value", + usage(log, "Get the property value:", PROPERTY, GET.toString(), PropertyArgs.NAME, ""); - usage(log, "Set the property value", + usage(log, "Set the property value:", PROPERTY, SET.toString(), PropertyArgs.NAME, diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java index 29de3ec0a24b3..dcf2e6f4f2a3f 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/property/subcommands/PropertyHelpCommand.java @@ -19,12 +19,12 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.AbstractCommand; -import org.apache.ignite.internal.commandline.meta.MetadataCommand; +import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.meta.MetadataSubCommandsList; +import org.apache.ignite.internal.commandline.property.PropertyCommand; /** */ -public class PropertyHelpCommand extends AbstractCommand { +public class PropertyHelpCommand implements Command { /** {@inheritDoc} */ @Override public void printUsage(Logger log) { throw new UnsupportedOperationException("printUsage"); @@ -32,7 +32,7 @@ public class PropertyHelpCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { - new MetadataCommand().printUsage(log); + new PropertyCommand().printUsage(log); return null; } diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java index 111b5c0c0d846..2c800d7b9ea3a 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerPropertiesTest.java @@ -47,6 +47,29 @@ public void init() { public void clear() { } + /** + * Check the command '--property help'. + * Steps: + */ + @Test + public void testHelp() { + assertEquals(EXIT_CODE_OK, execute("--property", "help")); + + String out = testOut.toString(); + + assertContains(log, out, "Print property command help:"); + assertContains(log, out, "control.(sh|bat) --property help"); + + assertContains(log, out, "Print list of available properties:"); + assertContains(log, out, "control.(sh|bat) --property list"); + + assertContains(log, out, "Get the property value:"); + assertContains(log, out, "control.(sh|bat) --property get --name "); + + assertContains(log, out, "Set the property value:"); + assertContains(log, out, "control.(sh|bat) --property set --name --val "); + } + /** * Check the command '--property list'. * Steps: diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index 60ef386e8f938..5950ed1f56943 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -177,10 +177,10 @@ If the file name isn't specified the output file name is: '.bin' Print list of available properties: control.(sh|bat) --property list - Get the property value + Get the property value: control.(sh|bat) --property get --name - Set the property value + Set the property value: control.(sh|bat) --property set --name --val Print system view content: diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index 60ef386e8f938..5950ed1f56943 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -177,10 +177,10 @@ If the file name isn't specified the output file name is: '.bin' Print list of available properties: control.(sh|bat) --property list - Get the property value + Get the property value: control.(sh|bat) --property get --name - Set the property value + Set the property value: control.(sh|bat) --property set --name --val Print system view content: From 635dafb7742673494efa6e8e91e236820156d38f Mon Sep 17 00:00:00 2001 From: Andrey Gura Date: Wed, 6 May 2020 19:16:03 +0300 Subject: [PATCH 051/110] IGNITE-13701 Fixed content, order and amount of columns in INDEXES, TABLES and SCHEMAS views. --- .../jdbc/thin/JdbcThinMetadataSelfTest.java | 6 +- .../ignite/util/SystemViewCommandTest.java | 20 ++- .../cache/GridCacheContextInfo.java | 9 + .../query/h2/H2IndexesSystemViewTest.java | 35 ++-- .../systemview/walker/SqlIndexViewWalker.java | 47 +++--- .../walker/SqlSchemaViewWalker.java | 4 +- .../systemview/walker/SqlTableViewWalker.java | 46 +++--- .../internal/processors/query/h2/H2Utils.java | 1 - .../processors/query/h2/SchemaManager.java | 2 +- .../spi/systemview/view/SqlIndexView.java | 155 ++++++++++-------- .../spi/systemview/view/SqlSchemaView.java | 2 +- .../spi/systemview/view/SqlTableView.java | 92 +++++++++-- .../cache/metric/SqlViewExporterSpiTest.java | 24 +-- .../IgniteClusterSnapshotWithIndexesTest.java | 2 +- .../query/SqlSystemViewsSelfTest.java | 80 ++++----- 15 files changed, 313 insertions(+), 212 deletions(-) diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java index c9ee62a75acf1..5975eb0e4e766 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java @@ -720,6 +720,8 @@ public void testGetAllColumns() throws Exception { "SYS.CACHE_GROUPS.REBALANCE_DELAY.null.19", "SYS.CACHE_GROUPS.REBALANCE_ORDER.null.10", "SYS.CACHE_GROUPS.BACKUPS.null.10", + "SYS.INDEXES.CACHE_GROUP_ID.null.10", + "SYS.INDEXES.CACHE_GROUP_NAME.null.2147483647", "SYS.INDEXES.CACHE_ID.null.10", "SYS.INDEXES.CACHE_NAME.null.2147483647", "SYS.INDEXES.SCHEMA_NAME.null.2147483647", @@ -836,6 +838,8 @@ public void testGetAllColumns() throws Exception { "SYS.NODE_METRICS.RECEIVED_MESSAGES_COUNT.null.10", "SYS.NODE_METRICS.RECEIVED_BYTES_COUNT.null.19", "SYS.NODE_METRICS.OUTBOUND_MESSAGES_QUEUE.null.10", + "SYS.TABLES.CACHE_GROUP_ID.null.10", + "SYS.TABLES.CACHE_GROUP_NAME.null.2147483647", "SYS.TABLES.CACHE_ID.null.10", "SYS.TABLES.CACHE_NAME.null.2147483647", "SYS.TABLES.SCHEMA_NAME.null.2147483647", @@ -920,7 +924,7 @@ public void testGetAllColumns() throws Exception { "SYS.TRANSACTIONS.TOP_VER.null.2147483647", "SYS.TRANSACTIONS.KEYS_COUNT.null.10", "SYS.TRANSACTIONS.CACHE_IDS.null.2147483647", - "SYS.SCHEMAS.NAME.null.2147483647", + "SYS.SCHEMAS.SCHEMA_NAME.null.2147483647", "SYS.SCHEMAS.PREDEFINED.null.1", "SYS.VIEWS.NAME.null.2147483647", "SYS.VIEWS.DESCRIPTION.null.2147483647", diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java index a67ca87eeed64..e23554930355b 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/SystemViewCommandTest.java @@ -469,15 +469,17 @@ public void testTable() { List row = sqlTablesView.get(0); - assertEquals("T1", row.get(0)); // TABLE_NAME - assertEquals(DFLT_SCHEMA, row.get(1)); // SCHEMA_NAME - assertEquals("SQL_PUBLIC_T1", row.get(2)); // CACHE_NAME - assertEquals(Integer.toString(cacheId("SQL_PUBLIC_T1")), row.get(3)); // CACHE_ID - assertEquals("null", row.get(4)); // AFFINITY_KEY_COLUMN - assertEquals("ID", row.get(5)); // KEY_ALIAS - assertEquals("null", row.get(6)); // VALUE_ALIAS - assertEquals("java.lang.Long", row.get(7)); // KEY_TYPE_NAME - assertFalse("null".equals(row.get(8))); // VALUE_TYPE_NAME + assertEquals(Integer.toString(cacheId("SQL_PUBLIC_T1")), row.get(0)); // CACHE_GROUP_ID + assertEquals("SQL_PUBLIC_T1", row.get(1)); // CACHE_GROUP_NAME + assertEquals(Integer.toString(cacheId("SQL_PUBLIC_T1")), row.get(2)); // CACHE_ID + assertEquals("SQL_PUBLIC_T1", row.get(3)); // CACHE_NAME + assertEquals(DFLT_SCHEMA, row.get(4)); // SCHEMA_NAME + assertEquals("T1", row.get(5)); // TABLE_NAME + assertEquals("null", row.get(6)); // AFFINITY_KEY_COLUMN + assertEquals("ID", row.get(7)); // KEY_ALIAS + assertEquals("null", row.get(8)); // VALUE_ALIAS + assertEquals("java.lang.Long", row.get(9)); // KEY_TYPE_NAME + assertFalse("null".equals(row.get(10))); // VALUE_TYPE_NAME executeSql(ignite0, "CREATE TABLE T2(ID LONG PRIMARY KEY, NAME VARCHAR)"); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java index d1b5506a8dd19..6a5f6c6aad1bf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContextInfo.java @@ -92,6 +92,15 @@ public String name() { return config.getName(); } + /** + * Returns name of cache group. + * + * @return Cache group name. + */ + public String groupName() { + return CacheGroupContext.cacheOrGroupName(config); + } + /** * @return Cache group id. */ diff --git a/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java b/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java index b1dab6e82dcd8..35b5847f0fad8 100644 --- a/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java +++ b/modules/geospatial/src/test/java/org/apache/ignite/internal/processors/query/h2/H2IndexesSystemViewTest.java @@ -53,16 +53,16 @@ public void testIndexesView() throws Exception { execSql("CREATE SPATIAL INDEX IDX_GEO_1 ON PUBLIC.AFF_CACHE(GEOM)"); String idxSql = "SELECT " + - " CACHE_ID," + - " CACHE_NAME," + - " SCHEMA_NAME," + - " TABLE_NAME," + - " INDEX_NAME," + - " INDEX_TYPE," + - " COLUMNS," + - " IS_PK," + - " IS_UNIQUE," + - " INLINE_SIZE" + + " CACHE_ID," + + " CACHE_NAME," + + " SCHEMA_NAME," + + " TABLE_NAME," + + " INDEX_NAME," + + " INDEX_TYPE," + + " COLUMNS," + + " IS_PK," + + " IS_UNIQUE," + + " INLINE_SIZE" + " FROM SYS.INDEXES ORDER BY TABLE_NAME, INDEX_NAME"; List> srvNodeIndexes = execSql(srv, idxSql); @@ -72,23 +72,22 @@ public void testIndexesView() throws Exception { for (List idx : clientNodeNodeIndexes) assertTrue(srvNodeIndexes.contains(idx)); - String[][] expectedResults = { - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "IDX_GEO_1", "SPATIAL", "\"GEOM\" ASC", "false", "false", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC", "true", "true", "5"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC", "true", "true", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_proxy", "BTREE", "\"ID1\" ASC", "false", "false", "0"} + Object[][] expectedResults = { + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "IDX_GEO_1", "SPATIAL", "\"GEOM\" ASC", false, false, null}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC", true, true, 5}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC", false, true, null} }; for (int i = 0; i < srvNodeIndexes.size(); i++) { List resRow = srvNodeIndexes.get(i); - String[] expRow = expectedResults[i]; + Object[] expRow = expectedResults[i]; assertEquals(expRow.length, resRow.size()); for (int j = 0; j < expRow.length; j++) - assertEquals(expRow[j], String.valueOf(resRow.get(j))); + assertEquals(expRow[j], resRow.get(j)); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java index 4a2cdd5198419..6384c716fd202 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlIndexViewWalker.java @@ -17,7 +17,6 @@ package org.apache.ignite.internal.managers.systemview.walker; -import org.apache.ignite.internal.processors.query.h2.database.H2IndexType; import org.apache.ignite.spi.systemview.view.SqlIndexView; import org.apache.ignite.spi.systemview.view.SystemViewRowAttributeWalker; @@ -30,34 +29,38 @@ public class SqlIndexViewWalker implements SystemViewRowAttributeWalker { /** {@inheritDoc} */ @Override public void visitAll(AttributeVisitor v) { - v.accept(0, "indexName", String.class); - v.accept(1, "indexType", H2IndexType.class); - v.accept(2, "columns", String.class); - v.accept(3, "schemaName", String.class); - v.accept(4, "tableName", String.class); - v.accept(5, "cacheName", String.class); - v.accept(6, "cacheId", int.class); - v.accept(7, "inlineSize", int.class); - v.accept(8, "isPk", boolean.class); - v.accept(9, "isUnique", boolean.class); + v.accept(0, "cacheGroupId", int.class); + v.accept(1, "cacheGroupName", String.class); + v.accept(2, "cacheId", int.class); + v.accept(3, "cacheName", String.class); + v.accept(4, "schemaName", String.class); + v.accept(5, "tableName", String.class); + v.accept(6, "indexName", String.class); + v.accept(7, "indexType", String.class); + v.accept(8, "columns", String.class); + v.accept(9, "isPk", boolean.class); + v.accept(10, "isUnique", boolean.class); + v.accept(11, "inlineSize", Integer.class); } /** {@inheritDoc} */ @Override public void visitAll(SqlIndexView row, AttributeWithValueVisitor v) { - v.accept(0, "indexName", String.class, row.indexName()); - v.accept(1, "indexType", H2IndexType.class, row.indexType()); - v.accept(2, "columns", String.class, row.columns()); - v.accept(3, "schemaName", String.class, row.schemaName()); - v.accept(4, "tableName", String.class, row.tableName()); - v.accept(5, "cacheName", String.class, row.cacheName()); - v.acceptInt(6, "cacheId", row.cacheId()); - v.acceptInt(7, "inlineSize", row.inlineSize()); - v.acceptBoolean(8, "isPk", row.isPk()); - v.acceptBoolean(9, "isUnique", row.isUnique()); + v.acceptInt(0, "cacheGroupId", row.cacheGroupId()); + v.accept(1, "cacheGroupName", String.class, row.cacheGroupName()); + v.acceptInt(2, "cacheId", row.cacheId()); + v.accept(3, "cacheName", String.class, row.cacheName()); + v.accept(4, "schemaName", String.class, row.schemaName()); + v.accept(5, "tableName", String.class, row.tableName()); + v.accept(6, "indexName", String.class, row.indexName()); + v.accept(7, "indexType", String.class, row.indexType()); + v.accept(8, "columns", String.class, row.columns()); + v.acceptBoolean(9, "isPk", row.isPk()); + v.acceptBoolean(10, "isUnique", row.isUnique()); + v.accept(11, "inlineSize", Integer.class, row.inlineSize()); } /** {@inheritDoc} */ @Override public int count() { - return 10; + return 12; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java index d43a9031e4b05..ca440e89b6ba8 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlSchemaViewWalker.java @@ -29,13 +29,13 @@ public class SqlSchemaViewWalker implements SystemViewRowAttributeWalker { /** {@inheritDoc} */ @Override public void visitAll(AttributeVisitor v) { - v.accept(0, "name", String.class); + v.accept(0, "schemaName", String.class); v.accept(1, "predefined", boolean.class); } /** {@inheritDoc} */ @Override public void visitAll(SqlSchemaView row, AttributeWithValueVisitor v) { - v.accept(0, "name", String.class, row.name()); + v.accept(0, "schemaName", String.class, row.schemaName()); v.acceptBoolean(1, "predefined", row.predefined()); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java index 8fd2cb2d6f08f..0826618376acb 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/managers/systemview/walker/SqlTableViewWalker.java @@ -29,34 +29,38 @@ public class SqlTableViewWalker implements SystemViewRowAttributeWalker { /** {@inheritDoc} */ @Override public void visitAll(AttributeVisitor v) { - v.accept(0, "tableName", String.class); - v.accept(1, "schemaName", String.class); - v.accept(2, "cacheName", String.class); - v.accept(3, "cacheId", int.class); - v.accept(4, "affinityKeyColumn", String.class); - v.accept(5, "keyAlias", String.class); - v.accept(6, "valueAlias", String.class); - v.accept(7, "keyTypeName", String.class); - v.accept(8, "valueTypeName", String.class); - v.accept(9, "isIndexRebuildInProgress", boolean.class); + v.accept(0, "cacheGroupId", int.class); + v.accept(1, "cacheGroupName", String.class); + v.accept(2, "cacheId", int.class); + v.accept(3, "cacheName", String.class); + v.accept(4, "schemaName", String.class); + v.accept(5, "tableName", String.class); + v.accept(6, "affinityKeyColumn", String.class); + v.accept(7, "keyAlias", String.class); + v.accept(8, "valueAlias", String.class); + v.accept(9, "keyTypeName", String.class); + v.accept(10, "valueTypeName", String.class); + v.accept(11, "isIndexRebuildInProgress", boolean.class); } /** {@inheritDoc} */ @Override public void visitAll(SqlTableView row, AttributeWithValueVisitor v) { - v.accept(0, "tableName", String.class, row.tableName()); - v.accept(1, "schemaName", String.class, row.schemaName()); - v.accept(2, "cacheName", String.class, row.cacheName()); - v.acceptInt(3, "cacheId", row.cacheId()); - v.accept(4, "affinityKeyColumn", String.class, row.affinityKeyColumn()); - v.accept(5, "keyAlias", String.class, row.keyAlias()); - v.accept(6, "valueAlias", String.class, row.valueAlias()); - v.accept(7, "keyTypeName", String.class, row.keyTypeName()); - v.accept(8, "valueTypeName", String.class, row.valueTypeName()); - v.acceptBoolean(9, "isIndexRebuildInProgress", row.isIndexRebuildInProgress()); + v.acceptInt(0, "cacheGroupId", row.cacheGroupId()); + v.accept(1, "cacheGroupName", String.class, row.cacheGroupName()); + v.acceptInt(2, "cacheId", row.cacheId()); + v.accept(3, "cacheName", String.class, row.cacheName()); + v.accept(4, "schemaName", String.class, row.schemaName()); + v.accept(5, "tableName", String.class, row.tableName()); + v.accept(6, "affinityKeyColumn", String.class, row.affinityKeyColumn()); + v.accept(7, "keyAlias", String.class, row.keyAlias()); + v.accept(8, "valueAlias", String.class, row.valueAlias()); + v.accept(9, "keyTypeName", String.class, row.keyTypeName()); + v.accept(10, "valueTypeName", String.class, row.valueTypeName()); + v.acceptBoolean(11, "isIndexRebuildInProgress", row.isIndexRebuildInProgress()); } /** {@inheritDoc} */ @Override public int count() { - return 10; + return 12; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java index 233b27215721a..ecd8169195485 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java @@ -992,7 +992,6 @@ else if (cctx.config().getQueryParallelism() != expectedParallelism) { * * @return Array of key and affinity columns. Key's, if it possible, splitted into simple components. */ - @SuppressWarnings("ZeroLengthArrayAllocation") @NotNull public static IndexColumn[] unwrapKeyColumns(GridH2Table tbl, IndexColumn[] idxCols) { ArrayList keyCols = new ArrayList<>(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java index 6091b4e91996c..ad09200f2fc42 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/SchemaManager.java @@ -169,7 +169,7 @@ public SchemaManager(GridKernalContext ctx, ConnectionManager connMgr) { ctx.systemView().registerInnerCollectionView(SQL_IDXS_VIEW, SQL_IDXS_VIEW_DESC, new SqlIndexViewWalker(), dataTables.values(), - GridH2Table::getIndexes, + GridH2Table::indexesInformation, SqlIndexView::new); ctx.systemView().registerInnerArrayView(SQL_TBL_COLS_VIEW, SQL_TBL_COLS_VIEW_DESC, diff --git a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java index 05eda7db3d119..664be994f2cb1 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java +++ b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlIndexView.java @@ -18,14 +18,8 @@ package org.apache.ignite.spi.systemview.view; import org.apache.ignite.internal.managers.systemview.walker.Order; -import org.apache.ignite.internal.processors.query.h2.H2Utils; -import org.apache.ignite.internal.processors.query.h2.database.H2IndexType; -import org.apache.ignite.internal.processors.query.h2.database.H2PkHashIndex; -import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndexBase; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2ProxyIndex; +import org.apache.ignite.internal.processors.query.h2.database.IndexInformation; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; -import org.apache.ignite.internal.processors.query.h2.opt.H2TableScanIndex; -import org.h2.index.Index; /** * Sql index representation for a {@link SystemView}. @@ -35,103 +29,130 @@ public class SqlIndexView { private final GridH2Table tbl; /** Index. */ - private final Index idx; - - /** Index type. */ - private final H2IndexType type; + private final IndexInformation idx; /** */ - public SqlIndexView(GridH2Table tbl, Index idx) { + public SqlIndexView(GridH2Table tbl, IndexInformation idx) { this.tbl = tbl; this.idx = idx; - this.type = type(idx); } - /** @return Cache id. */ + /** + * Returns cache group ID. + * + * @return Cache group ID. + */ + @Order() + public int cacheGroupId() { + return tbl.cacheInfo().groupId(); + } + + /** + * Returns Cache group name. + * + * @return Cache group name. + */ + @Order(1) + public String cacheGroupName() { + return tbl.cacheInfo().groupName(); + } + + /** + * Returns cache ID. + * @return Cache ID. + */ + @Order(2) public int cacheId() { return tbl.cacheId(); } - /** @return Cache name. */ - @Order(5) + /** + * Returns cache name. + * + * @return Cache name. + */ + @Order(3) public String cacheName() { return tbl.cacheName(); } - /** @return Schema name. */ - @Order(3) + /** + * Returns schema name. + * + * @return Schema name. + */ + @Order(4) public String schemaName() { return tbl.getSchema().getName(); } - /** @return Table name. */ - @Order(4) + /** + * Returns table name. + * + * @return Table name. + */ + @Order(5) public String tableName() { return tbl.identifier().table(); } - /** @return Index name. */ - @Order() + /** + * Returns index name. + * + * @return Index name. + */ + @Order(6) public String indexName() { - return idx.getName(); + return idx.name(); } - /** @return Index type. */ - @Order(1) - public H2IndexType indexType() { - return type; + /** + * Returns index type. + * + * @return Index type. + */ + @Order(7) + public String indexType() { + return idx.type(); } - /** @return Indexed columns. */ - @Order(2) + /** + * Returns all columns on which index is built. + * + * @return Coma separated indexed columns. + */ + @Order(8) public String columns() { - switch (type) { - case HASH: - case BTREE: - return H2Utils.indexColumnsSql(H2Utils.unwrapKeyColumns(tbl, idx.getIndexColumns())); - - case SPATIAL: - return H2Utils.indexColumnsSql(idx.getIndexColumns()); - - case SCAN: - return null; - - default: - return "???"; - } + return idx.keySql(); } - /** @return {@code True} if primary key index, {@code false} otherwise. */ + /** + * Returns boolean value which indicates whether this index is for primary key or not. + * + * @return {@code True} if primary key index, {@code false} otherwise. + */ + @Order(9) public boolean isPk() { - return idx.getIndexType().isPrimaryKey(); + return idx.pk(); } - /** @return {@code True} if unique index, {@code false} otherwise. */ + /** + * Returns boolean value which indicates whether this index is unique or not. + * + * @return {@code True} if unique index, {@code false} otherwise. + */ + @Order(10) public boolean isUnique() { - return idx.getIndexType().isUnique(); - } - - /** @return Inline size. */ - public int inlineSize() { - return idx instanceof H2TreeIndexBase ? ((H2TreeIndexBase)idx).inlineSize() : 0; + return idx.unique(); } /** - * @param idx Inde. - * @return Index type. + * Returns inline size in bytes. + * + * @return Inline size. */ - private static H2IndexType type(Index idx) { - if (idx instanceof H2TreeIndexBase) { - return H2IndexType.BTREE; - } else if (idx instanceof H2PkHashIndex) - return H2IndexType.HASH; - else if (idx instanceof H2TableScanIndex) - return H2IndexType.SCAN; - else if (idx instanceof GridH2ProxyIndex) - return type(((GridH2ProxyIndex)idx).underlyingIndex()); - else if (idx.getIndexType().isSpatial()) - return H2IndexType.SPATIAL; - - return null; + @Order(11) + public Integer inlineSize() { + return idx.inlineSize(); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java index f09163266a318..7a1ecbd415f7e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java +++ b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlSchemaView.java @@ -36,7 +36,7 @@ public SqlSchemaView(H2Schema schema) { /** @return Schema name. */ @Order - public String name() { + public String schemaName() { return schema.schemaName(); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java index cc2a5111fdefb..c2b8a896a6922 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java +++ b/modules/indexing/src/main/java/org/apache/ignite/spi/systemview/view/SqlTableView.java @@ -47,56 +47,112 @@ public SqlTableView(GridH2Table tbl) { } } - /** @return Cache id. */ - @Order(3) + /** + * Returns cache group ID. + * + * @return Cache group ID. + */ + @Order() + public int cacheGroupId() { + return tbl.cacheInfo().groupId(); + } + + /** + * Returns Cache group name. + * + * @return Cache group name. + */ + @Order(1) + public String cacheGroupName() { + return tbl.cacheInfo().groupName(); + } + + /** + * Returns cache ID. + * + * @return Cache ID. + */ + @Order(2) public int cacheId() { return tbl.cacheId(); } - /** @return Cache name. */ - @Order(2) + /** + * Returns cache name. + * + * @return Cache name. + */ + @Order(3) public String cacheName() { return tbl.cacheName(); } - /** @return Schema name. */ - @Order(1) + /** + * Returns schema name. + * + * @return Schema name. + */ + @Order(4) public String schemaName() { return tbl.getSchema().getName(); } - /** @return Table name. */ - @Order + /** + * Returns table name. + * + * @return Table name. + */ + @Order(5) public String tableName() { return tbl.identifier().table(); } - /** @return Affinity key column. */ - @Order(4) + /** + * Returns name of affinity key column. + * + * @return Affinity key column name. + */ + @Order(6) public String affinityKeyColumn() { return affColName; } - /** @return Key alias. */ - @Order(5) + /** + * Returns alias for key column. + * + * @return Key alias. + */ + @Order(7) public String keyAlias() { return tbl.rowDescriptor().type().keyFieldAlias(); } - /** @return Value alias. */ - @Order(6) + /** + * Returns alias for value column. + * + * @return Value alias. + */ + @Order(8) public String valueAlias() { return tbl.rowDescriptor().type().valueFieldAlias(); } - /** @return Key type name. */ - @Order(7) + /** + * Returns name of key type. + * + * @return Key type name. + */ + @Order(9) public String keyTypeName() { return tbl.rowDescriptor().type().keyTypeName(); } - /** @return Value type name. */ - @Order(8) + /** + * Returns name of value type. + * + * @return Value type name. + */ + @Order(10) public String valueTypeName() { return tbl.rowDescriptor().type().valueTypeName(); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java index b0ad058051535..eb87a1d563c93 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/metric/SqlViewExporterSpiTest.java @@ -380,7 +380,7 @@ public void testSchemas() throws Exception { Set schemaFromSysView = new HashSet<>(); - schemasSysView.forEach(v -> schemaFromSysView.add(v.name())); + schemasSysView.forEach(v -> schemaFromSysView.add(v.schemaName())); HashSet expSchemas = new HashSet<>(asList("MY_SCHEMA", "ANOTHER_SCHEMA", "SYS", "PUBLIC")); @@ -453,20 +453,22 @@ public void testTable() throws Exception { assertEquals(1, res.size()); - List tbl = res.get(0); + List tbl = res.get(0); int cacheId = cacheId("SQL_PUBLIC_T1"); String cacheName = "SQL_PUBLIC_T1"; - assertEquals("T1", tbl.get(0)); // TABLE_NAME - assertEquals(DFLT_SCHEMA, tbl.get(1)); // SCHEMA_NAME - assertEquals(cacheName, tbl.get(2)); // CACHE_NAME - assertEquals(cacheId, tbl.get(3)); // CACHE_ID - assertNull(tbl.get(4)); // AFFINITY_KEY_COLUMN - assertEquals("ID", tbl.get(5)); // KEY_ALIAS - assertNull(tbl.get(6)); // VALUE_ALIAS - assertEquals("java.lang.Long", tbl.get(7)); // KEY_TYPE_NAME - assertNotNull(tbl.get(8)); // VALUE_TYPE_NAME + assertEquals(cacheId, tbl.get(0)); // CACHE_GROUP_ID + assertEquals(cacheName, tbl.get(1)); // CACHE_GROUP_NAME + assertEquals(cacheId, tbl.get(2)); // CACHE_ID + assertEquals(cacheName, tbl.get(3)); // CACHE_NAME + assertEquals(DFLT_SCHEMA, tbl.get(4)); // SCHEMA_NAME + assertEquals("T1", tbl.get(5)); // TABLE_NAME + assertNull(tbl.get(6)); // AFFINITY_KEY_COLUMN + assertEquals("ID", tbl.get(7)); // KEY_ALIAS + assertNull(tbl.get(8)); // VALUE_ALIAS + assertEquals("java.lang.Long", tbl.get(9)); // KEY_TYPE_NAME + assertNotNull(tbl.get(10)); // VALUE_TYPE_NAME execute(ignite0, "CREATE TABLE T2(ID LONG PRIMARY KEY, NAME VARCHAR)"); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java index 4f6663d4a67af..a263df877f85f 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/snapshot/IgniteClusterSnapshotWithIndexesTest.java @@ -154,7 +154,7 @@ public void testClusterSnapshotConsistentConfig() throws Exception { IgniteEx snp = startGridsFromSnapshot(grids, SNAPSHOT_NAME); List currIdxNames = executeSql(snp, "SELECT * FROM SYS.INDEXES").stream(). - map(l -> (String)l.get(0)) + map(l -> (String)l.get(6)) .collect(Collectors.toList()); assertTrue("Concurrently created indexes must not exist in the snapshot: " + currIdxNames, diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java index 5167651b66bd9..a037e19a04058 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java @@ -280,48 +280,46 @@ public void testIndexesView() throws Exception { //ToDo: As of now we can see duplicates columns within index due to https://issues.apache.org/jira/browse/IGNITE-11125 - String[][] expectedResults = { - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID2\" ASC, \"ID1\" ASC", "false", "false", "10"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "10"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"-825022849", "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID2\" ASC", "true", "true", "0"}, - - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "IDX_2", "BTREE", "\"ID\" DESC, \"ID\" ASC", "false", "false", "13"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "IDX_2_proxy", "BTREE", "\"ID\" DESC, \"ID\" ASC", "false", "false", "0"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK", "BTREE", "\"ID\" ASC", "true", "true", "5"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK_hash", "HASH", "\"ID\" ASC", "true", "true", "0"}, - {"707660652", "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK_proxy", "BTREE", "\"ID\" ASC", "false", "false", "0"}, - - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "false", "false", "10"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "IDX_AFF_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC", "false", "false", "10"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "10"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"1374144180", "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC", "true", "true", "0"}, - - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC, \"ID1\" ASC, \"ID2\" ASC", "false", "false", "10"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_3", "BTREE", "\"MY_VAL\" ASC, \"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC, \"ID2\" ASC", "false", "false", "10"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "10"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"1102275506", "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC", "true", "true", "0"}, - - {"2584860", "TST1", "TST1", "VALUECLASS", "TST1_INDEX", "BTREE", "\"KEY\" ASC, \"_KEY\" ASC", "false", "false", "10"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "TST1_INDEX_proxy", "BTREE", "\"_KEY\" ASC, \"KEY\" ASC", "false", "false", "0"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK", "BTREE", "\"_KEY\" ASC", "true", "true", "5"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK__SCAN_", "SCAN", "null", "false", "false", "0"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK_hash", "HASH", "\"_KEY\" ASC", "true", "true", "0"}, - {"2584860", "TST1", "TST1", "VALUECLASS", "_key_PK_proxy", "BTREE", "\"KEY\" ASC", "false", "false", "0"} + Object[][] expectedResults = { + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID2\" ASC, \"ID1\" ASC", false, false, 10}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", true, true, 10}, + {-825022849, "SQL_PUBLIC_AFF_CACHE", "PUBLIC", "AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID2\" ASC", false, true, null}, + + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "IDX_2", "BTREE", "\"ID\" DESC, \"ID\" ASC", false, false, 13}, + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "__SCAN_", "SCAN", null, false, false, null}, + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK", "BTREE", "\"ID\" ASC", true, true, 5}, + {707660652, "SQL_PUBLIC_CACHE_SQL", "PUBLIC", "CACHE_SQL", "_key_PK_hash", "HASH", "\"ID\" ASC", false, true, null}, + + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "AFFINITY_KEY", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", false, false, 10}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "IDX_AFF_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC", false, false, 10}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", true, true, 10}, + {1374144180, "SQL_PUBLIC_DFLT_AFF_CACHE", "PUBLIC", "DFLT_AFF_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC", false, true, null}, + + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_1", "BTREE", "\"ID2\" DESC, \"ID1\" ASC, \"MY_VAL\" DESC, \"ID1\" ASC, \"ID2\" ASC", false, false, 10}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "IDX_3", "BTREE", "\"MY_VAL\" ASC, \"ID1\" ASC, \"ID2\" ASC, \"ID1\" ASC, \"ID2\" ASC", false, false, 10}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "__SCAN_", "SCAN", null, false, false, null}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK", "BTREE", "\"ID1\" ASC, \"ID2\" ASC", true, true, 10}, + {1102275506, "SQL_PUBLIC_DFLT_CACHE", "PUBLIC", "DFLT_CACHE", "_key_PK_hash", "HASH", "\"ID1\" ASC, \"ID2\" ASC", false, true, null}, + + {2584860, "TST1", "TST1", "VALUECLASS", "TST1_INDEX", "BTREE", "\"KEY\" ASC, \"_KEY\" ASC", false, false, 10}, + {2584860, "TST1", "TST1", "VALUECLASS", "__SCAN_", "SCAN", null, false, false, null}, + {2584860, "TST1", "TST1", "VALUECLASS", "_key_PK", "BTREE", "\"_KEY\" ASC", true, true, 5}, + {2584860, "TST1", "TST1", "VALUECLASS", "_key_PK_hash", "HASH", "\"_KEY\" ASC", false, true, null}, }; + assertEquals(expectedResults.length, srvNodeIndexes.size()); + for (int i = 0; i < srvNodeIndexes.size(); i++) { List resRow = srvNodeIndexes.get(i); - String[] expRow = expectedResults[i]; + Object[] expRow = expectedResults[i]; assertEquals(expRow.length, resRow.size()); for (int j = 0; j < expRow.length; j++) - assertEquals(Integer.toString(i), expRow[j], String.valueOf(resRow.get(j))); + assertEquals(expRow[j], resRow.get(j)); } } @@ -1067,10 +1065,12 @@ public void testTablesView() throws Exception { "TABLE_NAME = 'CACHE_SQL'"); List expRow = asList( - "CACHE_SQL", // TABLE_NAME - "PUBLIC", // SCHEMA_NAME - "cache_sql", // CACHE_NAME + cacheSqlId, // CACHE_GROUP_ID + "cache_sql", // CACHE_GROUP_NAME cacheSqlId, // CACHE_ID + "cache_sql", // CACHE_NAME + "PUBLIC", // SCHEMA_NAME + "CACHE_SQL", // TABLE_NAME null, // AFFINITY_KEY_COLUMN "ID", // KEY_ALIAS null, // VALUE_ALIAS @@ -1089,10 +1089,12 @@ public void testTablesView() throws Exception { List allExpRows = asList( expRow, asList( - "DFLT_CACHE", // TABLE_NAME - "PUBLIC", // SCHEMA_NAME - "SQL_PUBLIC_DFLT_CACHE", // CACHE_NAME + ddlTabId, // CACHE_GROUP_ID + "SQL_PUBLIC_DFLT_CACHE", // CACHE_GROUP_NAME ddlTabId, // CACHE_ID + "SQL_PUBLIC_DFLT_CACHE", // CACHE_NAME + "PUBLIC", // SCHEMA_NAME + "DFLT_CACHE", // TABLE_NAME "ID2", // AFFINITY_KEY_COLUMN null, // KEY_ALIAS "MY_VAL", // VALUE_ALIAS From 6a706b33bd161422e90d957a4b34ad7ffbd9dc2a Mon Sep 17 00:00:00 2001 From: Vladsz83 Date: Thu, 19 Nov 2020 03:10:12 +0300 Subject: [PATCH 052/110] IGNITE-13663 : Represent in the documenttion affection of several node addresses on failure detection v2. (#8424) --- docs/_docs/clustering/network-configuration.adoc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/_docs/clustering/network-configuration.adoc b/docs/_docs/clustering/network-configuration.adoc index 8c0e0f8f0f9b8..8d47b60a85398 100644 --- a/docs/_docs/clustering/network-configuration.adoc +++ b/docs/_docs/clustering/network-configuration.adoc @@ -49,10 +49,18 @@ tab:C++[unsupported] The following table describes some most important properties of `TcpDiscoverySpi`. You can find the complete list of properties in the javadoc:org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi[] javadoc. +[CAUTION] +==== +You should initialize the `IgniteConfiguration.localHost` or `TcpDiscoverySpi.localAddress` parameter with the network +interface that will be used for inter-node communication. By default, a node binds to and listens on all available IP +addresses of an environment it's running on. It can prolong node failures detection if some of the node's addresses are +not reachable from other cluster nodes. +==== + [cols="1,2,1",opts="header"] |=== |Property | Description| Default Value -| `localAddress`| Local host IP address used for discovery. | By default, the node uses the first non-loopback address it finds. If there is no non-loopback address available, then `java.net.InetAddress.getLocalHost()` is used. +| `localAddress`| Local host IP address used for discovery. If set, overrides the `IgniteConfiguration.localHost` setting. | By default, a node binds to all available network addresses. If there is a non-loopback address available, then java.net.InetAddress.getLocalHost() is used. | `localPort` | The port that the node binds to. If set to a non-default value, other cluster nodes must know this port to be able to discover the node. | `47500` | `localPortRange`| If the `localPort` is busy, the node attempts to bind to the next port (incremented by 1) and continues this process until it finds a free port. The `localPortRange` property defines the number of ports the node will try (starting from `localPort`). | `100` @@ -115,7 +123,7 @@ You can find the list of all properties in the javadoc:org.apache.ignite.spi.com [cols="1,2,1",opts="header"] |=== |Property | Description| Default Value -| `localAddress` | The local address for the communication SPI to bind to. | +| `localAddress` | The local address for the communication SPI to bind to. If set, overrides the `IgniteConfiguration.localHost` setting. | | `localPort` | The local port that the node uses for communication. | `47100` From d3e5b7c11ed037670700eea75851e619d5d1b6b1 Mon Sep 17 00:00:00 2001 From: Vladsz83 Date: Thu, 19 Nov 2020 03:16:31 +0300 Subject: [PATCH 053/110] Fix. (#8448) --- .../spi/discovery/tcp/TcpDiscoverySpi.java | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index 8a2b4c6790444..98370614dc244 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -630,12 +630,20 @@ public void setClientReconnectDisabled(boolean clientReconnectDisabled) { /** * Sets local host IP address that discovery SPI uses. *

- * If not provided, by default a first found non-loopback address - * will be used. If there is no non-loopback address available, - * then {@link InetAddress#getLocalHost()} will be used. + * If not provided, the value is resolved from {@link IgniteConfiguration#getLocalHost()}. If it is empty too, by + * default node binds to all available IP addresses. If there is no non-loopback address then + * {@link InetAddress#getLocalHost()} is used. + *

+ * NOTE: You should assign node address through {@link IgniteConfiguration#setLocalHost(String)} or + * {@code setLocalAddress(String)}. Otherwise, several node addresses may be picked up and can prolong + * detection of node failure. Parameters like {@code failureDetectionTimeout} or {@code reconCnt} work per address + * sequentionally. Example: if node is binded to 3 ip addresses, previous node can take up to + * '{@code failureDetectionTimeout} * 3 + {@code connRecoveryTimeout}' to detect failure of malfunctional node and + * establish connection to other one. * * @param locAddr IP address. * @return {@code this} for chaining. + * @see IgniteConfiguration#setLocalHost(String). */ @IgniteSpiConfiguration(optional = true) public TcpDiscoverySpi setLocalAddress(String locAddr) { @@ -894,15 +902,6 @@ public TcpDiscoveryIpFinder getIpFinder() { *

* If not provided {@link org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder} will * be used by default. - *

- * NOTE: You should assing multiple addresses to a node only if they represent some real physical connections - * which can give more reliability. Providing several addresses can prolong failure detection of current node. - * The timeouts and settings on network operations ({@link #failureDetectionTimeout()}, {@link #sockTimeout}, - * {@link #ackTimeout}, {@link #maxAckTimeout}, {@link #reconCnt}) work per connection/address. The exception is - * {@link #connRecoveryTimeout}. And node addresses are sorted out sequentially. - *

- * Example: if you use {@code failureDetectionTimeout} and have set 3 ip addresses for this node, previous node in - * the ring can take up to 'failureDetectionTimeout * 3' to detect failure of current node. * * @param ipFinder IP finder. * @return {@code this} for chaining. From 1654e9fac61842424c08d26a08ef67569f74746a Mon Sep 17 00:00:00 2001 From: Denis Magda Date: Wed, 18 Nov 2020 16:30:31 -0800 Subject: [PATCH 054/110] ignite docs: improved TcpDiscovery.localAdresses documentation --- .../spi/discovery/tcp/TcpDiscoverySpi.java | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index 98370614dc244..543634268829c 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -628,18 +628,20 @@ public void setClientReconnectDisabled(boolean clientReconnectDisabled) { } /** - * Sets local host IP address that discovery SPI uses. + * Sets network addresses for the Discovery SPI. *

- * If not provided, the value is resolved from {@link IgniteConfiguration#getLocalHost()}. If it is empty too, by - * default node binds to all available IP addresses. If there is no non-loopback address then - * {@link InetAddress#getLocalHost()} is used. + * If not provided, the value is resolved from {@link IgniteConfiguration#getLocalHost()}. If the latter is not set + * as well, the the node binds to all available IP addresses of an environment it's running on. + * If there is no a non-loopback address, then {@link InetAddress#getLocalHost()} is used. *

- * NOTE: You should assign node address through {@link IgniteConfiguration#setLocalHost(String)} or - * {@code setLocalAddress(String)}. Otherwise, several node addresses may be picked up and can prolong - * detection of node failure. Parameters like {@code failureDetectionTimeout} or {@code reconCnt} work per address - * sequentionally. Example: if node is binded to 3 ip addresses, previous node can take up to - * '{@code failureDetectionTimeout} * 3 + {@code connRecoveryTimeout}' to detect failure of malfunctional node and - * establish connection to other one. + * NOTE: You should initialize the {@link IgniteConfiguration#getLocalHost()} or + * {@link TcpDiscoverySpi#getLocalAddress()} parameter with the network + * interface that will be used for inter-node communication. Otherwise, the node can listen on multiple network + * addresses available in the environment and this can prolong node failures detection if some of the addresses are + * not reachable from other cluster nodes. For instance, if the node is bound to 3 network interfaces, + * it can take up to + * '{@link IgniteConfiguration#getFailureDetectionTimeout()} * 3 + {@link TcpDiscoverySpi#getConnectionRecoveryTimeout()}' + * milliseconds for another node to detect a disconnect of the give node. * * @param locAddr IP address. * @return {@code this} for chaining. From 905dae7475fc70e0199adc93d8b1671f0a7c31b8 Mon Sep 17 00:00:00 2001 From: Ilya Kazakov Date: Thu, 19 Nov 2020 15:07:13 +0300 Subject: [PATCH 055/110] IGNITE-13591 Added lastCheckPointStarted metric Signed-off-by: Andrey Gura --- .../org/apache/ignite/DataStorageMetrics.java | 7 +++++++ .../persistence/DataStorageMetricsImpl.java | 16 ++++++++++++++ .../DataStorageMetricsSnapshot.java | 9 ++++++++ .../persistence/checkpoint/Checkpointer.java | 1 + .../visor/node/VisorPersistenceMetrics.java | 21 ++++++++++++++++++- .../mxbean/DataStorageMetricsMXBean.java | 4 ++++ 6 files changed, 57 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java index c2534cbb3414d..e90d255d1a10e 100644 --- a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java @@ -98,6 +98,13 @@ public interface DataStorageMetrics { */ public long getLastCheckpointDuration(); + /** + * Returns time when the last checkpoint was started. + * + * @return Time when the last checkpoint was started. + * */ + public long getLastCheckpointStarted(); + /** * Gets the duration of last checkpoint lock wait in milliseconds. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java index 059eb88b099c9..80006d8a1c2e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java @@ -62,6 +62,9 @@ public class DataStorageMetricsImpl implements DataStorageMetricsMXBean { /** */ private final AtomicLongMetric lastCpDuration; + /** */ + private final AtomicLongMetric lastCpStart; + /** */ private final AtomicLongMetric lastCpFsyncDuration; @@ -163,6 +166,9 @@ public DataStorageMetricsImpl( lastCpDuration = mreg.longMetric("LastCheckpointDuration", "Duration of the last checkpoint in milliseconds."); + lastCpStart = mreg.longMetric("LastCheckpointStart", + "Start timestamp of the last checkpoint."); + lastCpFsyncDuration = mreg.longMetric("LastCheckpointFsyncDuration", "Duration of the sync phase of the last checkpoint in milliseconds."); @@ -250,6 +256,14 @@ public DataStorageMetricsImpl( return lastCpDuration.value(); } + /** {@inheritDoc} */ + @Override public long getLastCheckpointStarted() { + if (!metricsEnabled) + return 0; + + return lastCpStart.value(); + } + /** {@inheritDoc} */ @Override public long getLastCheckpointLockWaitDuration() { if (!metricsEnabled) @@ -597,6 +611,7 @@ public void onCheckpoint( long pagesWriteDuration, long fsyncDuration, long duration, + long start, long totalPages, long dataPages, long cowPages @@ -607,6 +622,7 @@ public void onCheckpoint( lastCpPagesWriteDuration.value(pagesWriteDuration); lastCpFsyncDuration.value(fsyncDuration); lastCpDuration.value(duration); + lastCpStart.value(start); lastCpTotalPages.value(totalPages); lastCpDataPages.value(dataPages); lastCpCowPages.value(cowPages); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java index e1aa7469b44ee..ddb5705c1029b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java @@ -43,6 +43,9 @@ public class DataStorageMetricsSnapshot implements DataStorageMetrics { /** */ private long lastCpDuration; + /** */ + private long lastCpStart; + /** */ private long lastCpLockWaitDuration; @@ -119,6 +122,7 @@ public DataStorageMetricsSnapshot(DataStorageMetrics metrics) { walFsyncTimeAvg = metrics.getWalFsyncTimeAverage(); walBuffPollSpinsNum = metrics.getWalBuffPollSpinsRate(); lastCpDuration = metrics.getLastCheckpointDuration(); + lastCpStart = metrics.getLastCheckpointStarted(); lastCpLockWaitDuration = metrics.getLastCheckpointLockWaitDuration(); lastCpMmarkDuration = metrics.getLastCheckpointMarkDuration(); lastCpPagesWriteDuration = metrics.getLastCheckpointPagesWriteDuration(); @@ -173,6 +177,11 @@ public DataStorageMetricsSnapshot(DataStorageMetrics metrics) { return lastCpDuration; } + /** {@inheritDoc} */ + @Override public long getLastCheckpointStarted() { + return lastCpStart; + } + /** {@inheritDoc} */ @Override public long getLastCheckpointLockWaitDuration() { return lastCpLockWaitDuration; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java index 9e39dfe143e09..6747e583a8ccc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/Checkpointer.java @@ -582,6 +582,7 @@ private void updateMetrics(Checkpoint chp, CheckpointMetricsTracker tracker) { tracker.pagesWriteDuration(), tracker.fsyncDuration(), tracker.totalDuration(), + tracker.checkpointStartTime(), chp.pagesSize, tracker.dataPagesWritten(), tracker.cowPagesWritten() diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java index bcab3186a89d9..dffe840460426 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java @@ -57,6 +57,9 @@ public class VisorPersistenceMetrics extends VisorDataTransferObject { /** */ private long lastCpDuration; + /** */ + private long lastCpStart; + /** */ private long lastCpLockWaitDuration; @@ -136,6 +139,7 @@ public VisorPersistenceMetrics(DataStorageMetrics m) { cpTotalTm = m.getCheckpointTotalTime(); lastCpDuration = m.getLastCheckpointDuration(); + lastCpStart = m.getLastCheckpointStarted(); lastCpLockWaitDuration = m.getLastCheckpointLockWaitDuration(); lastCpMmarkDuration = m.getLastCheckpointMarkDuration(); lastCpPagesWriteDuration = m.getLastCheckpointPagesWriteDuration(); @@ -225,6 +229,15 @@ public long getLastCheckpointingDuration() { return lastCpDuration; } + /** + * Returns time when the last checkpoint was started. + * + * @return Time when the last checkpoint was started. + * */ + public long getLastCheckpointStarted() { + return lastCpStart; + } + /** * @return Checkpoint lock wait time in milliseconds. */ @@ -360,7 +373,7 @@ public long getSparseStorageSize() { /** {@inheritDoc} */ @Override public byte getProtocolVersion() { - return V3; + return V4; } /** {@inheritDoc} */ @@ -397,6 +410,9 @@ public long getSparseStorageSize() { // V3 out.writeLong(storageSize); out.writeLong(sparseStorageSize); + + // V4 + out.writeLong(lastCpStart); } /** {@inheritDoc} */ @@ -435,6 +451,9 @@ public long getSparseStorageSize() { storageSize = in.readLong(); sparseStorageSize = in.readLong(); } + + if (protoVer > V3) + lastCpStart = in.readLong(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java index f48771f38ecc0..be79ec836e66d 100644 --- a/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java @@ -76,6 +76,10 @@ public interface DataStorageMetricsMXBean extends DataStorageMetrics { @MXBeanDescription("Duration of the last checkpoint in milliseconds.") @Override long getLastCheckpointDuration(); + /** {@inheritDoc} */ + @MXBeanDescription("Time when the last checkpoint was started.") + @Override long getLastCheckpointStarted(); + /** {@inheritDoc} */ @MXBeanDescription("Duration of the checkpoint lock wait in milliseconds.") @Override long getLastCheckpointLockWaitDuration(); From 39793a70becd153d7773ae2796c9646a636d1ece Mon Sep 17 00:00:00 2001 From: tledkov Date: Thu, 19 Nov 2020 16:32:36 +0300 Subject: [PATCH 056/110] IGNITE-9182 propagate lazy flag to synthetic SELECT used by first step of DML queries (closes #8473) --- .../processors/query/h2/IgniteH2Indexing.java | 19 +- .../processors/query/h2/dml/DmlAstUtils.java | 66 ++++ .../processors/query/h2/dml/UpdatePlan.java | 17 +- .../query/h2/dml/UpdatePlanBuilder.java | 9 +- .../query/h2/sql/GridSqlSelect.java | 26 ++ .../processors/query/LazyOnDmlTest.java | 328 ++++++++++++++++++ .../IgniteBinaryCacheQueryTestSuite2.java | 3 + 7 files changed, 460 insertions(+), 8 deletions(-) create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LazyOnDmlTest.java diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 0ded892fce376..1154289319a41 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -1600,6 +1600,7 @@ private void checkSecurity(Collection cacheIds) { fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS); fldsQry.setPageSize(pageSize); fldsQry.setLocal(true); + fldsQry.setLazy(U.isFlagSet(flags, GridH2QueryRequest.FLAG_LAZY)); boolean loc = true; @@ -1643,7 +1644,8 @@ private void checkSecurity(Collection cacheIds) { .setEnforceJoinOrder(fldsQry.isEnforceJoinOrder()) .setLocal(fldsQry.isLocal()) .setPageSize(fldsQry.getPageSize()) - .setTimeout(fldsQry.getTimeout(), TimeUnit.MILLISECONDS); + .setTimeout(fldsQry.getTimeout(), TimeUnit.MILLISECONDS) + .setLazy(fldsQry.isLazy()); QueryCursorImpl> cur; @@ -2881,7 +2883,12 @@ private UpdateResult executeUpdateNonTransactional( .setEnforceJoinOrder(qryDesc.enforceJoinOrder()) .setLocal(qryDesc.local()) .setPageSize(qryParams.pageSize()) - .setTimeout(qryParams.timeout(), TimeUnit.MILLISECONDS); + .setTimeout(qryParams.timeout(), TimeUnit.MILLISECONDS) + // On no MVCC mode we cannot use lazy mode when UPDATE query contains updated columns + // in WHERE condition because it may be cause of update one entry several times + // (when index for such columns is selected for scan): + // e.g. : UPDATE test SET val = val + 1 WHERE val >= ? + .setLazy(qryParams.lazy() && plan.canSelectBeLazy()); Iterable> cur; @@ -3014,7 +3021,10 @@ else if (plan.hasRows()) { .setEnforceJoinOrder(qryDesc.enforceJoinOrder()) .setLocal(qryDesc.local()) .setPageSize(qryParams.pageSize()) - .setTimeout((int)timeout, TimeUnit.MILLISECONDS); + .setTimeout((int)timeout, TimeUnit.MILLISECONDS) + // In MVCC mode we can use lazy mode always (when is set up) without dependency on + // updated columns and WHERE condition. + .setLazy(qryParams.lazy()); FieldsQueryCursor> cur = executeSelectForDml( qryDesc.schemaName(), @@ -3055,6 +3065,9 @@ else if (plan.hasRows()) { if (distributedPlan.isReplicatedOnly()) flags |= GridH2QueryRequest.FLAG_REPLICATED; + if (qryParams.lazy()) + flags |= GridH2QueryRequest.FLAG_LAZY; + flags = GridH2QueryRequest.setDataPageScanEnabled(flags, qryParams.dataPageScanEnabled()); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java index e679ff5d558f0..ee9daa4ca8f51 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java @@ -18,9 +18,11 @@ package org.apache.ignite.internal.processors.query.h2.dml; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.Collectors; import org.apache.ignite.IgniteException; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.query.IgniteSQLException; @@ -54,6 +56,7 @@ import org.apache.ignite.lang.IgnitePredicate; import org.h2.command.Parser; import org.h2.expression.Expression; +import org.h2.index.Index; import org.h2.table.Column; import org.h2.table.Table; import org.h2.util.IntArray; @@ -349,6 +352,7 @@ public static GridSqlSelect selectForUpdate(GridSqlUpdate update) { for (GridSqlColumn c : update.cols()) { String newColName = Parser.quoteIdentifier("_upd_" + c.columnName()); + // We have to use aliases to cover cases when the user // wants to update _val field directly (if it's a literal) GridSqlAlias alias = new GridSqlAlias(newColName, elementOrDefault(update.set().get(c.columnName()), c), true); @@ -358,12 +362,74 @@ public static GridSqlSelect selectForUpdate(GridSqlUpdate update) { GridSqlElement where = update.where(); + // On no MVCC mode we cannot use lazy mode when UPDATE query contains index with updated columns + // and that index may be chosen to scan by WHERE condition + // because in this case any rows update may be updated several times. + // e.g. in the cases below we cannot use lazy mode: + // + // 1. CREATE INDEX idx on test(val) + // UPDATE test SET val = val + 1 WHERE val >= ? + // + // 2. CREATE INDEX idx on test(val0, val1) + // UPDATE test SET val1 = val1 + 1 WHERE val0 >= ? + mapQry.canBeLazy(!isIndexWithUpdateColumnsMayBeUsed( + gridTbl, + update.cols().stream() + .map(GridSqlColumn::column) + .collect(Collectors.toSet()), + extractColumns(gridTbl, where))); + mapQry.where(where); mapQry.limit(update.limit()); return mapQry; } + /** + * @return Set columns of the specified table that are used in expression. + */ + private static Set extractColumns(GridH2Table tbl, GridSqlAst expr) { + if (expr == null) + return Collections.emptySet(); + + if (expr instanceof GridSqlColumn && ((GridSqlColumn)expr).column().getTable().equals(tbl)) + return Collections.singleton(((GridSqlColumn)expr).column()); + + HashSet set = new HashSet<>(); + + for (int i = 0; i < expr.size(); ++i) + set.addAll(extractColumns(tbl, expr.child(i))); + + return set; + } + + /** + * @return {@code true} if the index contains update columns may be potentially used for scan. + */ + private static boolean isIndexWithUpdateColumnsMayBeUsed( + GridH2Table tbl, + Set updateCols, + Set whereCols) { + if (F.isEmpty(whereCols)) + return false; + + if (updateCols.size() == 1 && whereCols.size() == 1 + && tbl.rowDescriptor().isValueColumn(F.first(updateCols).getColumnId()) + && tbl.rowDescriptor().isValueColumn(F.first(whereCols).getColumnId())) + return true; + + for (Index idx : tbl.getIndexes()) { + if (idx.equals(tbl.getPrimaryKey()) || whereCols.contains(idx.getColumns()[0])) { + for (Column idxCol : idx.getColumns()) { + if (updateCols.contains(idxCol)) + return true; + } + } + } + + return false; + } + /** * Do what we can to compute default value for this column (mimics H2 behavior). * @see Table#getDefaultValue diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index 66c4bb2c319aa..aeaea29ce723a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -93,6 +93,9 @@ public final class UpdatePlan { /** Additional info for distributed update. */ private final DmlDistributedPlanInfo distributed; + /** Additional info for distributed update. */ + private final boolean canSelectBeLazy; + /** * Constructor. * @@ -125,7 +128,8 @@ public UpdatePlan( List> rows, int rowsNum, @Nullable FastUpdate fastUpdate, - @Nullable DmlDistributedPlanInfo distributed + @Nullable DmlDistributedPlanInfo distributed, + boolean canSelectBeLazy ) { this.colNames = colNames; this.colTypes = colTypes; @@ -145,6 +149,7 @@ public UpdatePlan( this.isLocSubqry = isLocSubqry; this.fastUpdate = fastUpdate; this.distributed = distributed; + this.canSelectBeLazy = canSelectBeLazy; } /** @@ -177,7 +182,8 @@ public UpdatePlan( null, 0, fastUpdate, - distributed + distributed, + true ); } @@ -599,6 +605,13 @@ public Object processRowForTx(List row) throws IgniteCheckedException { } } + /** + * @return {@code true} is the SELECT query may be executed in lazy mode. + */ + public boolean canSelectBeLazy() { + return canSelectBeLazy; + } + /** * Abstract iterator. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index 8cc41b216ba94..fd9496ca8a4e7 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -311,7 +311,8 @@ else if (stmt instanceof GridSqlMerge) { rows, rowsNum, null, - distributed + distributed, + false ); } @@ -472,7 +473,8 @@ else if (stmt instanceof GridSqlDelete) { null, 0, null, - distributed + distributed, + sel.canBeLazy() ); } else { @@ -590,7 +592,8 @@ public static UpdatePlan planForBulkLoad(SqlBulkLoadCommand cmd, GridH2Table tbl null, 0, null, - null + null, + true ); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java index 6ecf002998edd..93c1b8eae6d96 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java @@ -61,6 +61,13 @@ public class GridSqlSelect extends GridSqlQuery { /** */ private boolean isForUpdate; + /** Used only for SELECT based on UPDATE. + * It cannot be lazy when updated columns are used in the conditions. + * In this case index based on these columns may be chosen to scan and some rows may be updated + * more than once time. + */ + private boolean canBeLazy; + /** * @param colIdx Column index as for {@link #column(int)}. * @return Child index for {@link #child(int)}. @@ -437,4 +444,23 @@ public GridSqlSelect copySelectForUpdate() { return copy; } + + /** + * @param canBeLazy see {@link #canBeLazy()}. + */ + public void canBeLazy(boolean canBeLazy) { + this.canBeLazy = canBeLazy; + } + + /** + * Used only for SELECT based on UPDATE. + * It cannot be lazy when updated columns are used in the conditions. + * In this case index based on these columns may be chosen to scan and some rows may be updated + * more than once time. + * + * @return {@code true} is lazy flag is applicable. + */ + public boolean canBeLazy() { + return canBeLazy; + } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LazyOnDmlTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LazyOnDmlTest.java new file mode 100644 index 0000000000000..bed2178aa1c9f --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/LazyOnDmlTest.java @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.ignite.internal.processors.query; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.binary.BinaryObjectBuilder; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.QueryIndex; +import org.apache.ignite.cache.QueryIndexType; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.FieldsQueryCursor; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.index.AbstractIndexingCommonTest; +import org.apache.ignite.internal.processors.query.h2.H2PooledConnection; +import org.apache.ignite.internal.processors.query.h2.H2QueryInfo; +import org.apache.ignite.internal.processors.query.h2.H2Utils; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.jetbrains.annotations.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Tests for lazy mode for DML queries. + */ +@RunWith(Parameterized.class) +public class LazyOnDmlTest extends AbstractIndexingCommonTest { + /** Keys count. */ + private static final int KEY_CNT = 3_000; + + /** */ + @Parameterized.Parameter + public CacheAtomicityMode atomicityMode; + + /** */ + @Parameterized.Parameter(1) + public CacheMode cacheMode; + + /** + * @return Test parameters. + */ + @Parameterized.Parameters(name = "atomicityMode={0}, cacheMode={1}") + public static Collection parameters() { + Set paramsSet = new LinkedHashSet<>(); + + Object[] paramTemplate = new Object[2]; + + for (CacheAtomicityMode atomicityMode : CacheAtomicityMode.values()) { + paramTemplate = Arrays.copyOf(paramTemplate, paramTemplate.length); + + paramTemplate[0] = atomicityMode; + + for (CacheMode cacheMode : new CacheMode[] {CacheMode.PARTITIONED, CacheMode.REPLICATED}) { + Object[] params = Arrays.copyOf(paramTemplate, paramTemplate.length); + + params[1] = cacheMode; + + paramsSet.add(params); + } + } + + return paramsSet; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + GridQueryProcessor.idxCls = CheckLazyIndexing.class; + + startGrids(3); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + + super.afterTestsStopped(); + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + IgniteCache c = grid(0).createCache(new CacheConfiguration() + .setName("test") + .setSqlSchema("TEST") + .setAtomicityMode(atomicityMode) + .setCacheMode(cacheMode) + .setQueryEntities(Collections.singleton(new QueryEntity(Long.class.getName(), "testVal") + .setTableName("test") + .addQueryField("id", Long.class.getName(), null) + .addQueryField("val0", Long.class.getName(), null) + .addQueryField("val1", Long.class.getName(), null) + .addQueryField("val2", Long.class.getName(), null) + .setKeyFieldName("id") + .setIndexes(Collections.singletonList( + new QueryIndex(Arrays.asList("val0", "val1"), QueryIndexType.SORTED) + )) + )) + .setBackups(1) + .setAffinity(new RendezvousAffinityFunction(false, 10))); + + try (IgniteDataStreamer streamer = grid(0).dataStreamer("test")) { + for (long i = 0; i < KEY_CNT; ++i) { + BinaryObjectBuilder bob = grid(0).binary().builder("testVal"); + + bob.setField("val0", i); + bob.setField("val1", i); + bob.setField("val2", i); + + streamer.addData(i, bob.build()); + } + } + + sql("CREATE TABLE table1 (id INT PRIMARY KEY, col0 INT, col1 VARCHAR (100))"); + + sql("INSERT INTO table1 (id, col0, col1) " + + "SELECT 1, 11, 'FIRST' " + + "UNION ALL " + + "SELECT 11,12, 'SECOND' " + + "UNION ALL " + + "SELECT 21, 13, 'THIRD' " + + "UNION ALL " + + "SELECT 31, 14, 'FOURTH'"); + + sql("CREATE TABLE table2 (id INT PRIMARY KEY, col0 INT, col1 VARCHAR (100))"); + + sql("INSERT INTO table2 (id, col0, col1) " + + "SELECT 1, 21, 'TWO-ONE' " + + "UNION ALL " + + "SELECT 11, 22, 'TWO-TWO' " + + "UNION ALL " + + "SELECT 21, 23, 'TWO-THREE' " + + "UNION ALL " + + "SELECT 31, 24, 'TWO-FOUR'"); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + for (String cache : grid(0).cacheNames()) + grid(0).cache(cache).destroy(); + + super.afterTest(); + } + + /** + */ + @Test + public void testUpdateNotLazy() throws Exception { + checkUpdateNotLazy("UPDATE test SET val0 = val0 + 1 WHERE val0 >= 0"); + checkUpdateNotLazy("UPDATE test SET val1 = val1 + 1 WHERE val0 >= 0"); + } + + /** + */ + public void checkUpdateNotLazy(String sql) throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(atomicityMode == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT)) { + List> res = sql(sql).getAll(); + + // Check that all rows updates only ones. + assertEquals((long)KEY_CNT, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testUpdateLazy() throws Exception { + checkUpdateLazy("UPDATE test SET val0 = val0 + 1"); + checkUpdateLazy("UPDATE test SET val2 = val2 + 1 WHERE val2 >= 0"); + checkUpdateLazy("UPDATE test SET val0 = val0 + 1 WHERE val1 >= 0"); + } + + /** + */ + public void checkUpdateLazy(String sql) throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(true)) { + List> res = sql(sql).getAll(); + + // Check that all rows updates only ones. + assertEquals((long)KEY_CNT, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testDeleteWithoutReduce() throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(true)) { + List> res = sql("DELETE FROM test WHERE val0 >= 0").getAll(); + + assertEquals((long)KEY_CNT, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testUpdateFromSubqueryLazy() throws Exception { + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(true)) { + List> res; + + res = sql("UPDATE table1 " + + "SET (col0, col1) = " + + " (SELECT table2.col0, table2.col1 FROM table2 WHERE table2.id = table1.id)" + + "WHERE table1.id in (21, 31)").getAll(); + + assertEquals(2L, res.get(0).get(0)); + + res = sql("UPDATE table1 " + + "SET (col0, col1) = " + + " (SELECT table2.col0, table2.col1 FROM table2 WHERE table2.id = table1.id) " + + "WHERE exists (select * from table2 where table2.id = table1.id) " + + "AND table1.id in (21, 31)").getAll(); + + assertEquals(2L, res.get(0).get(0)); + } + } + + /** + */ + @Test + public void testUpdateValueField() throws Exception { + sql("CREATE TABLE TEST2 (id INT PRIMARY KEY, val INT) " + + "WITH\"WRAP_VALUE=false\""); + + sql("INSERT INTO TEST2 VALUES (0, 0), (1, 1), (2, 2)"); + + try (AutoCloseable checker = CheckLazyIndexing.checkLazy(false)) { + // 'val' field is the alias for _val. There is index for _val. + List> res = sql("UPDATE TEST2 SET _val = _val + 1 WHERE val >=0").getAll(); + + assertEquals(3L, res.get(0).get(0)); + } + } + + /** + * @param sql SQL query. + * @param args Query parameters. + * @return Results cursor. + */ + private FieldsQueryCursor> sql(String sql, Object... args) { + return sql(grid(0), sql, args); + } + + /** + * @param ign Node. + * @param sql SQL query. + * @param args Query parameters. + * @return Results cursor. + */ + private FieldsQueryCursor> sql(IgniteEx ign, String sql, Object... args) { + return ign.context().query().querySqlFields(new SqlFieldsQuery(sql) + .setLazy(true) + .setSchema("TEST") + .setPageSize(1) + .setArgs(args), false); + } + + /** */ + private static class CheckLazyIndexing extends IgniteH2Indexing { + /** */ + private static Boolean expectedLazy; + + /** */ + private static int qryCnt; + + /** {@inheritDoc} */ + @Override public ResultSet executeSqlQueryWithTimer(PreparedStatement stmt, H2PooledConnection conn, String sql, + int timeoutMillis, @Nullable GridQueryCancel cancel, Boolean dataPageScanEnabled, + H2QueryInfo qryInfo) throws IgniteCheckedException { + if (expectedLazy != null) { + assertEquals( + "Unexpected lazy flag [sql=" + sql + ']', + (boolean)expectedLazy, + H2Utils.session(conn.connection()).isLazyQueryExecution() + ); + } + + qryCnt++; + + return super.executeSqlQueryWithTimer(stmt, conn, sql, timeoutMillis, cancel, dataPageScanEnabled, qryInfo); + } + + /** */ + public static AutoCloseable checkLazy(boolean expLazy) { + expectedLazy = expLazy; + + return () -> { + assertTrue("Lazy checker doesn't work properly", CheckLazyIndexing.qryCnt > 0); + + expectedLazy = null; + qryCnt = 0; + }; + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java index 2022fce59e26d..6676e470bcd54 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java @@ -63,6 +63,7 @@ import org.apache.ignite.internal.processors.query.IgniteCacheGroupsSqlSegmentedIndexMultiNodeSelfTest; import org.apache.ignite.internal.processors.query.IgniteCacheGroupsSqlSegmentedIndexSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlCreateTableTemplateTest; +import org.apache.ignite.internal.processors.query.LazyOnDmlTest; import org.apache.ignite.internal.processors.query.LocalQueryLazyTest; import org.apache.ignite.internal.processors.query.LongRunningQueryTest; import org.apache.ignite.internal.processors.query.SqlIndexConsistencyAfterInterruptAtomicCacheOperationTest; @@ -92,6 +93,8 @@ */ @RunWith(Suite.class) @Suite.SuiteClasses({ + LazyOnDmlTest.class, + DefaultQueryTimeoutTestSuite.class, CreateIndexOnInvalidDataTypeTest.class, From 533a6365cece7896e6ade969f085ee41b690759d Mon Sep 17 00:00:00 2001 From: Aleksey Plekhanov Date: Thu, 19 Nov 2020 16:47:54 +0300 Subject: [PATCH 057/110] IGNITE-13719 Java thin client: Fix timeout on idle connection - Fixes #8480. Signed-off-by: Aleksey Plekhanov --- .../client/thin/TcpClientChannel.java | 15 +- .../client/thin/AbstractThinClientTest.java | 20 +- .../internal/client/thin/TimeoutTest.java | 220 ++++++++++++++++++ .../apache/ignite/client/ClientTestSuite.java | 4 +- 4 files changed, 254 insertions(+), 5 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/client/thin/TimeoutTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java index c3576720092b9..7c791305cd1c9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java @@ -168,6 +168,9 @@ class TcpClientChannel implements ClientChannel { /** Receiver thread (processes incoming messages). */ private Thread receiverThread; + /** Send/receive timeout in milliseconds. */ + private final int timeout; + /** Constructor. */ TcpClientChannel(ClientChannelConfiguration cfg) throws ClientConnectionException, ClientAuthenticationException, ClientProtocolError { @@ -176,17 +179,23 @@ class TcpClientChannel implements ClientChannel { Executor cfgExec = cfg.getAsyncContinuationExecutor(); asyncContinuationExecutor = cfgExec != null ? cfgExec : ForkJoinPool.commonPool(); + timeout = cfg.getTimeout(); + try { sock = createSocket(cfg); out = sock.getOutputStream(); dataInput = new ByteCountingDataInput(sock.getInputStream()); + + handshake(DEFAULT_VERSION, cfg.getUserName(), cfg.getUserPassword(), cfg.getUserAttributes()); + + // Disable timeout on socket after handshake, instead, get future result with timeout in "receive" method. + if (timeout > 0) + sock.setSoTimeout(0); } catch (IOException e) { throw handleIOError("addr=" + cfg.getAddress(), e); } - - handshake(DEFAULT_VERSION, cfg.getUserName(), cfg.getUserPassword(), cfg.getUserAttributes()); } /** {@inheritDoc} */ @@ -303,7 +312,7 @@ private ClientRequestFuture send(ClientOperation op, Consumer T receive(ClientRequestFuture pendingReq, Function payloadReader) throws ClientException { try { - byte[] payload = pendingReq.get(); + byte[] payload = timeout > 0 ? pendingReq.get(timeout) : pendingReq.get(); if (payload == null || payloadReader == null) return null; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java index b985a77d5055a..2e2f21d491c95 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/AbstractThinClientTest.java @@ -38,6 +38,24 @@ protected ClientConfiguration getClientConfiguration() { return new ClientConfiguration(); } + /** + * Return thin client port for given node. + * + * @param node Node. + */ + protected int clientPort(ClusterNode node) { + return node.attribute(ClientListenerProcessor.CLIENT_LISTENER_PORT); + } + + /** + * Return host for given node. + * + * @param node Node. + */ + protected String clientHost(ClusterNode node) { + return F.first(node.addresses()); + } + /** * Start thin client with configured endpoints to specified nodes. * @@ -50,7 +68,7 @@ protected IgniteClient startClient(ClusterNode... nodes) { for (int i = 0; i < nodes.length; i++) { ClusterNode node = nodes[i]; - addrs[i] = F.first(node.addresses()) + ":" + node.attribute(ClientListenerProcessor.CLIENT_LISTENER_PORT); + addrs[i] = clientHost(node) + ":" + clientPort(node); } return Ignition.startClient(getClientConfiguration().setAddresses(addrs)); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/TimeoutTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/TimeoutTest.java new file mode 100644 index 0000000000000..2c7bf8847aff3 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/TimeoutTest.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin; + +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteException; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.client.ClientCache; +import org.apache.ignite.client.ClientCacheConfiguration; +import org.apache.ignite.client.ClientConnectionException; +import org.apache.ignite.client.ClientException; +import org.apache.ignite.client.ClientTransaction; +import org.apache.ignite.client.IgniteClient; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.binary.streams.BinaryHeapOutputStream; +import org.apache.ignite.internal.binary.streams.BinaryOutputStream; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridTestUtils; +import org.junit.Test; + +import static org.apache.ignite.configuration.ClientConnectorConfiguration.DFLT_PORT; +import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; +import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ; + +/** + * Thin client timeouts tests. + */ +public class TimeoutTest extends AbstractThinClientTest { + /** + * Default timeout value. + */ + private static final int TIMEOUT = 500; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName).setClientConnectorConfiguration( + new ClientConnectorConfiguration().setHandshakeTimeout(TIMEOUT)); + } + + /** {@inheritDoc} */ + @Override protected ClientConfiguration getClientConfiguration() { + return super.getClientConfiguration().setTimeout(TIMEOUT); + } + + /** + * Test that server closes thin client connection in case of handshake timeout. + */ + @Test + public void testServerClosesThinClientConnectionOnHandshakeTimeout() { + try (Ignite ignite = startGrid(0)) { + long ts0 = System.currentTimeMillis(); + + Socket s = new Socket(); + + s.connect(new InetSocketAddress(clientHost(ignite.cluster().localNode()), + clientPort(ignite.cluster().localNode())), 0); + + s.setSoTimeout(TIMEOUT * 2); + + OutputStream os = s.getOutputStream(); + + try (BinaryOutputStream bos = new BinaryHeapOutputStream(32)) { + bos.writeInt(1000); // Size. + + os.write(bos.arrayCopy()); + os.flush(); + + InputStream is = s.getInputStream(); + + assertEquals(-1, is.read()); // Connection and stream closed by server after timeout. + + long ts1 = System.currentTimeMillis(); + + assertTrue("Unexpected timeout [ts0=" + ts0 + ", ts1=" + ts1 + ']', + ts1 - ts0 >= TIMEOUT && ts1 - ts0 < TIMEOUT * 2); + } + finally { + s.close(); + } + } + catch (Exception e) { + fail("Exception while sending message: " + e.getMessage()); + } + } + + /** + * Test client timeout on handshake. + */ + @Test + @SuppressWarnings("ThrowableNotThrown") + public void testClientTimeoutOnHandshake() throws Exception { + ServerSocket sock = new ServerSocket(); + + sock.bind(new InetSocketAddress("127.0.0.1", DFLT_PORT)); + + AtomicBoolean connectionAccepted = new AtomicBoolean(); + + CountDownLatch latch = new CountDownLatch(1); + + IgniteInternalFuture fut = GridTestUtils.runAsync(() -> { + try { + Socket accepted = sock.accept(); + + connectionAccepted.set(true); + + latch.await(TIMEOUT * 2, TimeUnit.MILLISECONDS); + + U.closeQuiet(accepted); + } + catch (Exception e) { + throw new IgniteException("Accept thread failed: " + e.getMessage(), e); + } + }); + + long ts0 = System.currentTimeMillis(); + + try { + GridTestUtils.assertThrowsWithCause( + (Runnable)() -> Ignition.startClient(getClientConfiguration().setAddresses("127.0.0.1:" + DFLT_PORT)), + ClientConnectionException.class); + } + finally { + latch.countDown(); + } + + U.closeQuiet(sock); + + assertTrue(connectionAccepted.get()); + + long ts1 = System.currentTimeMillis(); + + assertTrue("Unexpected timeout [ts0=" + ts0 + ", ts1=" + ts1 + ']', + ts1 - ts0 >= TIMEOUT && ts1 - ts0 < TIMEOUT * 2); + + fut.get(); + } + + /** + * Test client timeout on operation. + */ + @Test + @SuppressWarnings("ThrowableNotThrown") + public void testClientTimeoutOnOperation() throws Exception { + try (Ignite ignite = startGrid(0)) { + try (IgniteClient client = startClient(0)) { + ClientCache cache = client.getOrCreateCache(new ClientCacheConfiguration() + .setName("cache").setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)); + + doSleep(TIMEOUT * 2); + + // Should not fail if connection is idle. + cache.put(0, 0); + + CyclicBarrier barrier = new CyclicBarrier(2); + + IgniteInternalFuture fut = GridTestUtils.runAsync(() -> { + try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) { + cache.put(0, 0); + + barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS); + barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS); + } + catch (Exception e) { + throw new IgniteException(e); + } + }); + + // Wait for the key locked. + barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS); + + long ts0 = System.currentTimeMillis(); + + try (ClientTransaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) { + try { + GridTestUtils.assertThrowsWithCause(() -> cache.put(0, 0), ClientException.class); + } + finally { + // To unlock another thread. + barrier.await(TIMEOUT * 2, TimeUnit.MILLISECONDS); + } + } + + long ts1 = System.currentTimeMillis(); + + assertTrue("Unexpected timeout [ts0=" + ts0 + ", ts1=" + ts1 + ']', + ts1 - ts0 >= TIMEOUT && ts1 - ts0 < TIMEOUT * 2); + + fut.get(); + } + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java index 55a24617984bb..48d346f38e3f4 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java @@ -27,6 +27,7 @@ import org.apache.ignite.internal.client.thin.ThinClientPartitionAwarenessResourceReleaseTest; import org.apache.ignite.internal.client.thin.ThinClientPartitionAwarenessStableTopologyTest; import org.apache.ignite.internal.client.thin.ThinClientPartitionAwarenessUnstableTopologyTest; +import org.apache.ignite.internal.client.thin.TimeoutTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -60,7 +61,8 @@ ThinClientPartitionAwarenessResourceReleaseTest.class, ThinClientPartitionAwarenessDiscoveryTest.class, ReliableChannelTest.class, - CacheAsyncTest.class + CacheAsyncTest.class, + TimeoutTest.class }) public class ClientTestSuite { // No-op. From 52cb38c641620446edd5a1033a62205deca41f2b Mon Sep 17 00:00:00 2001 From: Ilya Kasnacheev Date: Thu, 19 Nov 2020 18:05:05 +0300 Subject: [PATCH 058/110] IGNITE-13711 Log exception where query initialization failed for cache - Fixes #8463. --- .../ignite/internal/processors/cache/GridCacheProcessor.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index d3beafcc91e36..01124fbb665db 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -1711,7 +1711,8 @@ public IgniteInternalFuture startCachesOnLocalJoin( ctx.query().initQueryStructuresForNotStartedCache(cacheDesc); } catch (Exception e) { - log.error("Can't initialize query structures for not started cache [cacheName=" + cacheDesc.cacheName() + "]"); + log.error("Can't initialize query structures for not started cache [cacheName=" + + cacheDesc.cacheName() + "]", e); } }); From 2b2b07842ffc5512b0b1207ac4b03db887899862 Mon Sep 17 00:00:00 2001 From: gurustron Date: Thu, 19 Nov 2020 21:26:48 +0300 Subject: [PATCH 059/110] IGNITE-13336 .NET: Fix misleading exception when LINQ expression can't be translated to SQL (#8459) * IGNITE-13336 - repro. * IGNITE-13336 - exception message.. * IGNITE-13336 - tests. --- .../Cache/Query/Linq/CacheLinqTest.Misc.cs | 28 +++++++++++++++++++ .../Impl/CacheQueryExpressionVisitor.cs | 6 ++-- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs index 6d26131b82af8..012f90026ab28 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs @@ -27,11 +27,14 @@ namespace Apache.Ignite.Core.Tests.Cache.Query.Linq { using System; + using System.Collections.Generic; using System.Linq; + using System.Linq.Expressions; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Configuration; using Apache.Ignite.Linq; using NUnit.Framework; + using NUnit.Framework.Constraints; ///

/// Tests LINQ. @@ -353,5 +356,30 @@ public void TestTimeout() Assert.IsTrue(ex.ToString().Contains("QueryCancelledException: The query was cancelled while executing.")); } + + /// + /// Tests that is not supported. + /// + [Test] + public void TestInvokeThrowsNotSupportedException() + { + var constraint = new ReusableConstraint(Is.TypeOf() + .And.Message.StartsWith("The LINQ expression '") + .And.Message.Contains("Invoke") + .And.Message.Contains( + "could not be translated. Either rewrite the query in a form that can be translated, or switch to client evaluation explicitly by inserting a call to either AsEnumerable() or ToList().")); + + Func, bool> filter = entry => false; + // ReSharper disable once ReturnValueOfPureMethodIsNotUsed + Assert.Throws(constraint, () => GetPersonCache().AsCacheQueryable() + .Where(x => filter(x)) + .ToList()); + + Func, int> selector = x => x.Key; + // ReSharper disable once ReturnValueOfPureMethodIsNotUsed + Assert.Throws(constraint, () => GetPersonCache().AsCacheQueryable() + .Select(x => selector(x)) + .FirstOrDefault()); + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs index cc14260bc280b..91bde9a0a9d21 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs @@ -334,6 +334,7 @@ protected override Expression VisitMember(MemberExpression expression) return expression; } + /// /// Gets the name of the field from a member expression, with quotes when necessary. /// @@ -515,9 +516,8 @@ protected override Expression VisitNew(NewExpression expression) [SuppressMessage("Microsoft.Design", "CA1062:Validate arguments of public methods")] protected override Expression VisitInvocation(InvocationExpression expression) { - VisitArguments(expression.Arguments); - - return expression; + throw new NotSupportedException("The LINQ expression '" + expression + + "' could not be translated. Either rewrite the query in a form that can be translated, or switch to client evaluation explicitly by inserting a call to either AsEnumerable() or ToList()."); } /** */ From 417a1e0d701f1d535a08580c4e518094575a225c Mon Sep 17 00:00:00 2001 From: Denis Magda Date: Fri, 20 Nov 2020 14:47:08 -0800 Subject: [PATCH 060/110] ignite docs: set the latest spring-data artifact id after receiving user feedback --- .../extensions-and-integrations/spring/spring-data.adoc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/_docs/extensions-and-integrations/spring/spring-data.adoc b/docs/_docs/extensions-and-integrations/spring/spring-data.adoc index 65b1f23e43872..8216a591bde99 100644 --- a/docs/_docs/extensions-and-integrations/spring/spring-data.adoc +++ b/docs/_docs/extensions-and-integrations/spring/spring-data.adoc @@ -32,12 +32,18 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-spring-data + ignite-spring-data_2.2 {ignite.version} ---- -- +[NOTE] +==== +If your Spring Data version is earlier than Spring Data 2.2 then set `ignite-spring-data_2.0` +or `ignite-spring-data` as an `artifactId` in the pom.xml configuration. +==== + == Apache Ignite Repository Apache Ignite introduces a special `IgniteRepository` interface that extends default `CrudRepository`. This interface From 0a8f535f3f457854e5c9624ba4b6d6df2b29799f Mon Sep 17 00:00:00 2001 From: Slava Koptilin Date: Mon, 23 Nov 2020 10:46:50 +0300 Subject: [PATCH 061/110] IGNITE-13715 Fixed invalid cron expression in ComputeScheduleExample. Fixes #8470 --- .../misc/schedule/ComputeScheduleExample.java | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java b/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java index d480309b84bfb..c58de3bc4b10d 100644 --- a/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java +++ b/examples/src/main/java-lgpl/org/apache/ignite/examples/misc/schedule/ComputeScheduleExample.java @@ -29,6 +29,9 @@ * Demonstrates a cron-based {@link Runnable} execution scheduling. * Test runnable object broadcasts a phrase to all cluster nodes every minute * three times with initial scheduling delay equal to five seconds. + * This example uses an Ignite extension to Cron syntax, + * which can be used to specify an initial delay in seconds and a number of runs. + * https://apacheignite.readme.io/docs/cron-based-scheduling#syntax-extension *

* Remote nodes should always be started with special configuration file which * enables P2P class loading: {@code 'ignite.{sh|bat} examples/config/example-ignite.xml'}. @@ -68,12 +71,19 @@ public static void main(String[] args) throws IgniteException { return invocations; } }, - "{9, 5, 3} * * * * *" // Cron expression. + // Callable object broadcasts a phrase to all cluster nodes every minute + // three times with initial scheduling delay equal to five seconds. + // https://apacheignite.readme.io/docs/cron-based-scheduling#syntax-extension + "{5, 3} * * * * *" // Cron expression. ); while (!fut.isDone()) System.out.println(">>> Invocation #: " + fut.get()); + // In case the Cron expression is invalid, SchedulerFuture will be immediately completed with an error, + // that provides additional details. + fut.get(); + System.out.println(); System.out.println(">>> Schedule future is done and has been unscheduled."); System.out.println(">>> Check all nodes for hello message output."); From c508f3b101d23760ff10cc664fb6de4cb4b5de8b Mon Sep 17 00:00:00 2001 From: Maxim Muzafarov Date: Mon, 23 Nov 2020 13:59:04 +0300 Subject: [PATCH 062/110] IGNITE-13724 Fix checkstyle config for dymanic source directories in ignite-examples (#8482) --- examples/pom.xml | 35 ++++++++++++++++++++++ parent/pom.xml | 76 ++++++++++++++++++++++++++++++------------------ 2 files changed, 82 insertions(+), 29 deletions(-) diff --git a/examples/pom.xml b/examples/pom.xml index 388a78ad029b5..08fe50ae27ebb 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -248,6 +248,16 @@ net.alchim31.maven scala-maven-plugin + + org.apache.maven.plugins + maven-checkstyle-plugin + + + ${spark.folder} + ${spark.test.folder} + + + @@ -279,6 +289,21 @@ ${project.version} + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + ${lgpl.folder} + ${lgpl.test.folder} + + + + + spark-2.4 @@ -321,6 +346,16 @@ net.alchim31.maven scala-maven-plugin + + org.apache.maven.plugins + maven-checkstyle-plugin + + + ${spark.folder} + ${spark.test.folder} + + + diff --git a/parent/pom.xml b/parent/pom.xml index 4291becd7901f..ab52dbb6bd199 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -107,7 +107,7 @@ 1.5.0 3.5.0 3.1.1 - 8.35 + 8.37 3.4.6 8.0.13 1.1.2 @@ -226,6 +226,35 @@ + + org.apache.maven.plugins + maven-checkstyle-plugin + ${maven.checkstyle.plugin.version} + + true + + ${project.build.sourceDirectory} + ${project.build.testSourceDirectory} + + true + true + true + true + ${project.build.directory}/checkstyle-result.xml + ../checkstyle/checkstyle.xml + ../checkstyle/checkstyle-suppressions.xml + true + **/generated/**/* + + + + com.puppycrawl.tools + checkstyle + ${checkstyle.puppycrawl.version} + + + + net.alchim31.maven scala-maven-plugin @@ -736,6 +765,20 @@ + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + style + + check + + validate + + + @@ -747,34 +790,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - ${maven.checkstyle.plugin.version} - - - style - - check - - validate - - true - true - true - true - ${project.build.directory}/checkstyle-result.xml - ../checkstyle/checkstyle.xml - ../checkstyle/checkstyle-suppressions.xml - true - **/generated/**/* - - - - - - com.puppycrawl.tools - checkstyle - ${checkstyle.puppycrawl.version} - - + + false + From 21e5ba57db948c0d522f2066e66650c46185e7f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=94=D0=BC=D0=B8=D1=82=D1=80=D0=B8=D0=B9=20=D0=A0=D1=8F?= =?UTF-8?q?=D0=B1=D0=BE=D0=B2?= Date: Tue, 24 Nov 2020 10:00:40 +0300 Subject: [PATCH 063/110] IGNITE-13450 Add event fired before SQL query execution (#8252) Co-authored-by: Nikolay --- .../org/apache/ignite/events/EventType.java | 10 ++ .../ignite/events/SqlQueryExecutionEvent.java | 138 ++++++++++++++++++ .../processors/query/h2/IgniteH2Indexing.java | 24 ++- .../SqlStatisticsUserQueriesFastTest.java | 1 + .../SqlStatisticsUserQueriesLongTest.java | 4 +- .../internal/metric/UserQueriesTestBase.java | 58 +++++++- .../IgniteCacheAbstractQuerySelfTest.java | 71 +++++++-- 7 files changed, 287 insertions(+), 19 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java diff --git a/modules/core/src/main/java/org/apache/ignite/events/EventType.java b/modules/core/src/main/java/org/apache/ignite/events/EventType.java index db1a88e443c14..a51c25faeb54c 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/EventType.java +++ b/modules/core/src/main/java/org/apache/ignite/events/EventType.java @@ -922,6 +922,16 @@ public interface EventType { */ public static final int EVT_CLUSTER_SNAPSHOT_FAILED = 151; + /** + * Built-in event type: query execution. + *

+ * NOTE: all types in range from 1 to 1000 are reserved for + * internal Ignite events and should not be used by user-defined events. + * + * @see SqlQueryExecutionEvent + */ + public static final int EVT_SQL_QUERY_EXECUTION = 160; + /** * All cluster snapshot events. This array can be directly passed into * {@link IgniteEvents#localListen(IgnitePredicate, int...)} method to diff --git a/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java new file mode 100644 index 0000000000000..0541dd857172e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.events; + +import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; + +/** + * Query execution event. + *

+ * Grid events are used for notification about what happens within the grid. Note that by + * design Ignite keeps all events generated on the local node locally and it provides + * APIs for performing a distributed queries across multiple nodes: + *

    + *
  • + * {@link org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate, long, int...)} - + * asynchronously querying events occurred on the nodes specified, including remote nodes. + *
  • + *
  • + * {@link org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate, int...)} - + * querying only local events stored on this local node. + *
  • + *
  • + * {@link org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate, int...)} - + * listening to local grid events (events from remote nodes not included). + *
  • + *
+ * User can also wait for events using method {@link org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate, int...)}. + *

Events and Performance

+ * Note that by default all events in Ignite are enabled and therefore generated and stored + * by whatever event storage SPI is configured. Ignite can and often does generate thousands events per seconds + * under the load and therefore it creates a significant additional load on the system. If these events are + * not needed by the application this load is unnecessary and leads to significant performance degradation. + *

+ * It is highly recommended to enable only those events that your application logic requires + * by using {@link org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} method in Ignite configuration. Note that certain + * events are required for Ignite's internal operations and such events will still be generated but not stored by + * event storage SPI if they are disabled in Ignite configuration. + * + * @see EventType#EVT_SQL_QUERY_EXECUTION + */ +public class SqlQueryExecutionEvent extends EventAdapter { + /** */ + private static final long serialVersionUID = 0L; + + /** Query text. */ + private final String text; + + /** Query arguments. */ + @GridToStringInclude + private final Object[] args; + + /** Security subject ID. */ + private final UUID subjId; + + /** + * @param node Node where event was fired. + * @param msg Event message. + * @param text Query text. + * @param args Query arguments. + * @param subjId Security subject ID. + */ + public SqlQueryExecutionEvent( + ClusterNode node, + String msg, + @Nullable String text, + @Nullable Object[] args, + @Nullable UUID subjId + ) { + super(node, msg, EVT_SQL_QUERY_EXECUTION); + + this.text = text; + this.args = args; + this.subjId = subjId; + } + + /** + * Gets query text. + *

+ * Applicable for {@code SQL}, {@code SQL fields} queries. + * + * @return Query text. + */ + @Nullable public String text() { + return text; + } + + /** + * Gets query arguments. + *

+ * Applicable for {@code SQL} and {@code SQL fields} queries. + * + * @return Query arguments. + */ + @Nullable public Object[] arguments() { + return args.clone(); + } + + /** + * Gets security subject ID. + * + * @return Security subject ID. + */ + @Nullable public UUID subjectId() { + return subjId; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlQueryExecutionEvent.class, this, + "nodeId8", U.id8(node().id()), + "msg", message(), + "type", name(), + "tstamp", timestamp()); + } +} + diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 1154289319a41..d06418cb0b3db 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -51,6 +51,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.EventType; +import org.apache.ignite.events.SqlQueryExecutionEvent; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridTopic; import org.apache.ignite.internal.IgniteInternalFuture; @@ -200,6 +201,7 @@ import static java.util.Objects.isNull; import static java.util.Objects.nonNull; import static org.apache.ignite.IgniteSystemProperties.IGNITE_MVCC_TX_SIZE_CACHING_THRESHOLD; +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; import static org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager.TX_SIZE_THRESHOLD; import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.checkActive; import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccEnabled; @@ -1019,7 +1021,7 @@ private FieldsQueryCursor> executeCommand( IgniteQueryErrorCode.UNSUPPORTED_OPERATION); } - Long qryId = registerRunningQuery(qryDesc, null); + Long qryId = registerRunningQuery(qryDesc, qryParams, null); CommandResult res = null; @@ -1202,7 +1204,7 @@ private List>> executeDml( ) { IndexingQueryFilter filter = (qryDesc.local() ? backupFilter(null, qryParams.partitions()) : null); - Long qryId = registerRunningQuery(qryDesc, cancel); + Long qryId = registerRunningQuery(qryDesc, qryParams, cancel); Exception failReason = null; @@ -1287,7 +1289,7 @@ private List>> executeSelect( assert cancel != null; // Register query. - Long qryId = registerRunningQuery(qryDesc, cancel); + Long qryId = registerRunningQuery(qryDesc, qryParams, cancel); try (TraceSurroundings ignored = MTC.support(ctx.tracing().create(SQL_CURSOR_OPEN, MTC.span()))) { GridNearTxLocal tx = null; @@ -1546,17 +1548,29 @@ private Iterable> lockSelectedRows(Iterable> cur, GridCacheConte * Register running query. * * @param qryDesc Query descriptor. + * @param qryParams Query parameters. * @param cancel Query cancel state holder. * @return Id of registered query or {@code null} if query wasn't registered. */ - private Long registerRunningQuery(QueryDescriptor qryDesc, GridQueryCancel cancel) { - return runningQryMgr.register( + private Long registerRunningQuery(QueryDescriptor qryDesc, QueryParameters qryParams, GridQueryCancel cancel) { + Long res = runningQryMgr.register( qryDesc.sql(), GridCacheQueryType.SQL_FIELDS, qryDesc.schemaName(), qryDesc.local(), cancel ); + + if (ctx.event().isRecordable(EVT_SQL_QUERY_EXECUTION)) { + ctx.event().record(new SqlQueryExecutionEvent( + ctx.discovery().localNode(), + GridCacheQueryType.SQL_FIELDS.name() + " query execution.", + qryDesc.sql(), + qryParams.arguments(), + ctx.security().enabled() ? ctx.security().securityContext().subject().id() : null)); + } + + return res; } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java index 5368382d23b57..83e4c2e7ee496 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesFastTest.java @@ -301,6 +301,7 @@ public void testLocalSelectFailed() { public void testLocalSelectCanceled() { assertMetricsIncrementedOnlyOnReducer(() -> startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID <> suspendHook(ID)").setLocal(true)), + 2, "success", "failed", "canceled"); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java index 74b21cb3c42b3..abe9afefcc7bc 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/SqlStatisticsUserQueriesLongTest.java @@ -92,7 +92,7 @@ public void testMetricsOnRemoteMapFail() throws Exception { SuspendQuerySqlFunctions.setProcessRowsToSuspend(1); assertMetricsIncrementedOnlyOnReducer(() -> - startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), + startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), 2, "success", "failed", "canceled"); } @@ -128,7 +128,7 @@ public void testMetricsOnLocalMapFail() throws Exception { SuspendQuerySqlFunctions.setProcessRowsToSuspend(1); assertMetricsIncrementedOnlyOnReducer(() -> - startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), + startAndKillQuery(new SqlFieldsQuery("SELECT * FROM TAB WHERE ID < 200 AND suspendHook(ID) <> 5 ")), 2, "success", "failed", "canceled"); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java index 15a346d393322..611e2849ca96f 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/metric/UserQueriesTestBase.java @@ -18,21 +18,27 @@ package org.apache.ignite.internal.metric; import java.util.Collection; +import java.util.Collections; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.ignite.cache.query.QueryCancelledException; import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.events.SqlQueryExecutionEvent; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.query.GridRunningQueryInfo; +import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.metric.LongMetric; import org.apache.ignite.spi.metric.Metric; import org.apache.ignite.testframework.GridTestUtils; import org.junit.Assert; +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; import static org.apache.ignite.internal.processors.query.RunningQueryManager.SQL_USER_QUERIES_REG_NAME; /** @@ -52,13 +58,36 @@ public class UserQueriesTestBase extends SqlStatisticsAbstractTest { /** The second node index. This node should execute only map parts of the queries. */ protected static final int MAPPER_IDX = 1; + /** */ + private static final AtomicInteger SQL_QRY_EXEC_EVT_CNTR = new AtomicInteger(); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + IgnitePredicate lsnr = evt -> { + assertNotNull(evt.text()); + + SQL_QRY_EXEC_EVT_CNTR.incrementAndGet(); + + return true; + }; + + int[] evts = new int[] {EVT_SQL_QUERY_EXECUTION}; + + cfg.setIncludeEventTypes(evts); + cfg.setLocalEventListeners(Collections.singletonMap(lsnr, evts)); + + return cfg; + } + /** * Verify that after specified action is performed, all metrics are left unchanged. * * @param act Action. */ protected void assertMetricsRemainTheSame(Runnable act) { - assertMetricsAre(fetchAllMetrics(REDUCER_IDX), fetchAllMetrics(MAPPER_IDX), act); + assertMetricsAre(fetchAllMetrics(REDUCER_IDX), fetchAllMetrics(MAPPER_IDX), act, 0); } /** @@ -68,6 +97,21 @@ protected void assertMetricsRemainTheSame(Runnable act) { * @param incrementedMetrics array of metrics to check. */ protected void assertMetricsIncrementedOnlyOnReducer(Runnable act, String... incrementedMetrics) { + assertMetricsIncrementedOnlyOnReducer(act, 1, incrementedMetrics); + } + + /** + * Verify that after action is performed, specified metrics gets incremented only on reducer node. + * + * @param act action (callback) to perform. + * @param qryCnt Amount of queries. + * @param incrementedMetrics array of metrics to check. + */ + protected void assertMetricsIncrementedOnlyOnReducer( + Runnable act, + int qryCnt, + String... incrementedMetrics + ) { Map expValuesMapper = fetchAllMetrics(MAPPER_IDX); Map expValuesReducer = fetchAllMetrics(REDUCER_IDX); @@ -75,7 +119,7 @@ protected void assertMetricsIncrementedOnlyOnReducer(Runnable act, String... inc for (String incMet : incrementedMetrics) expValuesReducer.compute(incMet, (name, val) -> val + 1); - assertMetricsAre(expValuesReducer, expValuesMapper, act); + assertMetricsAre(expValuesReducer, expValuesMapper, act, qryCnt); } /** @@ -97,11 +141,16 @@ private Map fetchAllMetrics(int nodeIdx) { * @param expMetricsReducer Expected metrics on reducer. * @param expMetricsMapper Expected metrics on mapper. * @param act callback to perform. Usually sql query execution. + * @param qryEvtCnt Expected sql query events. */ private void assertMetricsAre( Map expMetricsReducer, Map expMetricsMapper, - Runnable act) { + Runnable act, + int qryEvtCnt + ) { + SQL_QRY_EXEC_EVT_CNTR.set(0); + act.run(); expMetricsReducer.forEach((mName, expVal) -> { @@ -115,6 +164,9 @@ private void assertMetricsAre( Assert.assertEquals("Unexpected value for metric " + mName, (long)expVal, actVal); }); + + Assert.assertEquals("Unexpected records for SqlQueryExecutionEvent.", + qryEvtCnt, SQL_QRY_EXEC_EVT_CNTR.get()); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java index 114c4aa6b09c2..36e6d7a547ac5 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java @@ -46,6 +46,7 @@ import org.apache.ignite.IgniteBinary; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.Ignition; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMode; @@ -63,7 +64,10 @@ import org.apache.ignite.cache.query.annotations.QueryTextField; import org.apache.ignite.cache.store.CacheStore; import org.apache.ignite.cache.store.CacheStoreAdapter; +import org.apache.ignite.client.Config; +import org.apache.ignite.client.IgniteClient; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; @@ -71,6 +75,7 @@ import org.apache.ignite.events.CacheQueryReadEvent; import org.apache.ignite.events.Event; import org.apache.ignite.events.EventType; +import org.apache.ignite.events.SqlQueryExecutionEvent; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.processors.cache.query.QueryCursorEx; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; @@ -95,6 +100,7 @@ import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_EXECUTED; import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_OBJECT_READ; +import static org.apache.ignite.events.EventType.EVT_SQL_QUERY_EXECUTION; import static org.apache.ignite.internal.processors.cache.query.CacheQueryType.FULL_TEXT; import static org.apache.ignite.internal.processors.cache.query.CacheQueryType.SCAN; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -1457,14 +1463,6 @@ public void testArray() throws Exception { assertArrayEquals(new Long[]{4L, 5L, 6L}, e.getValue().arr); } - /** - * @throws Exception If failed. - */ - @Test - public void testSqlQueryEvents() throws Exception { - checkSqlQueryEvents(); - } - /** * @throws Exception If failed. */ @@ -1491,7 +1489,8 @@ public void testFieldsQueryMetadata() throws Exception { /** * @throws Exception If failed. */ - private void checkSqlQueryEvents() throws Exception { + @Test + public void testSqlQueryEvents() throws Exception { final IgniteCache cache = jcache(Integer.class, Integer.class); final boolean evtsDisabled = cache.getConfiguration(CacheConfiguration.class).isEventsDisabled(); final CountDownLatch execLatch = new CountDownLatch(evtsDisabled ? 0 : @@ -1545,6 +1544,60 @@ private void checkSqlQueryEvents() throws Exception { } } + /** + * @throws Exception If failed. + */ + @Test + public void testClientQueryExecutedEvents() throws Exception { + CountDownLatch execLatch = new CountDownLatch(9); + + IgnitePredicate lsnr = evt -> { + assertNotNull(evt.text()); + + execLatch.countDown(); + + return true; + }; + + ignite().events().localListen(lsnr, EVT_SQL_QUERY_EXECUTION); + + ClientConfiguration cc = new ClientConfiguration().setAddresses(Config.SERVER); + + try (IgniteClient client = Ignition.startClient(cc)) { + client.query(new SqlFieldsQuery("create table TEST_TABLE(key int primary key, val int)")) + .getAll(); + + client.query(new SqlFieldsQuery("insert into TEST_TABLE values (?, ?)").setArgs(1, 1)) + .getAll(); + + client.query(new SqlFieldsQuery("update TEST_TABLE set val = ?2 where key = ?1").setArgs(1, 2)) + .getAll(); + + client.query(new SqlFieldsQuery("select * from TEST_TABLE")) + .getAll(); + + client.query(new SqlFieldsQuery("create index idx_1 on TEST_TABLE(key)")) + .getAll(); + + client.query(new SqlFieldsQuery("drop index idx_1")) + .getAll(); + + client.query(new SqlFieldsQuery("alter table TEST_TABLE add column val2 int")) + .getAll(); + + client.query(new SqlFieldsQuery("alter table TEST_TABLE drop val2")) + .getAll(); + + client.query(new SqlFieldsQuery("drop table TEST_TABLE")) + .getAll(); + + assert execLatch.await(3_000, MILLISECONDS); + } + finally { + ignite().events().stopLocalListen(lsnr, EVT_SQL_QUERY_EXECUTION); + } + } + /** * @throws Exception If failed. */ From 2ed8e7c641f8b57986cd50dca55ff4e5026c9fc3 Mon Sep 17 00:00:00 2001 From: Nikolay Izhikov Date: Wed, 25 Nov 2020 10:27:56 +0300 Subject: [PATCH 064/110] [MINOR] IGNITE-13450 Mark query arguments as sensitive info --- .../ignite/events/SqlQueryExecutionEvent.java | 2 +- .../IgniteCacheAbstractQuerySelfTest.java | 23 ++++++++++++++++--- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java index 0541dd857172e..4700d7b9fff15 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java +++ b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java @@ -68,7 +68,7 @@ public class SqlQueryExecutionEvent extends EventAdapter { private final String text; /** Query arguments. */ - @GridToStringInclude + @GridToStringInclude(sensitive = true) private final Object[] args; /** Security subject ID. */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java index 36e6d7a547ac5..660f56e6c392e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java @@ -46,6 +46,7 @@ import org.apache.ignite.IgniteBinary; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.Ignition; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheAtomicityMode; @@ -89,6 +90,7 @@ import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.WithSystemProperty; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; @@ -1544,15 +1546,30 @@ public void testSqlQueryEvents() throws Exception { } } - /** - * @throws Exception If failed. - */ + /** @throws Exception If failed. */ @Test + @WithSystemProperty(key = IgniteSystemProperties.IGNITE_TO_STRING_INCLUDE_SENSITIVE, value = "false") public void testClientQueryExecutedEvents() throws Exception { + doTestClientQueryExecutedEvents(false); + } + + /** @throws Exception If failed. */ + @Test + @WithSystemProperty(key = IgniteSystemProperties.IGNITE_TO_STRING_INCLUDE_SENSITIVE, value = "true") + public void testClientQueryExecutedEventsIncludeSensitive() throws Exception { + doTestClientQueryExecutedEvents(true); + } + + /** */ + public void doTestClientQueryExecutedEvents(boolean inclSens) throws Exception { CountDownLatch execLatch = new CountDownLatch(9); IgnitePredicate lsnr = evt -> { assertNotNull(evt.text()); + if (inclSens) + assertTrue(evt.toString().contains("args=")); + else + assertFalse(evt.toString().contains("args=")); execLatch.countDown(); From 92ea7634a20bb5c7248e2cbe45930bf6b17a87de Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Wed, 25 Nov 2020 15:42:14 +0300 Subject: [PATCH 065/110] IGNITE-13755 .NET: Remove unused CacheFlags enum --- .../Cache/Query/Linq/CacheLinqTest.Misc.cs | 1 - .../Apache.Ignite.Core.csproj | 1 - .../Impl/Client/Cache/CacheFlags.cs | 38 ------------------- 3 files changed, 40 deletions(-) delete mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheFlags.cs diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs index 012f90026ab28..15bcfa6f186ee 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Misc.cs @@ -27,7 +27,6 @@ namespace Apache.Ignite.Core.Tests.Cache.Query.Linq { using System; - using System.Collections.Generic; using System.Linq; using System.Linq.Expressions; using Apache.Ignite.Core.Cache; diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj index 0dec27719260b..ca3f1225c6daf 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj @@ -161,7 +161,6 @@ - diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheFlags.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheFlags.cs deleted file mode 100644 index e24d952a7af09..0000000000000 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheFlags.cs +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -namespace Apache.Ignite.Core.Impl.Client.Cache -{ - using System; - - ///

- /// Cache operation flags. - /// - [Flags] - internal enum CacheFlags : byte - { - /// - /// No flags. - /// - None = 0x00, - - /// - /// Keep binary. - /// - KeepBinary = 0x01 - } -} From ebaa57461e60f12426c5ae7a7d222deb1d77d3b1 Mon Sep 17 00:00:00 2001 From: Mikhail Petrov <32207922+ololo3000@users.noreply.github.com> Date: Wed, 25 Nov 2020 16:52:52 +0300 Subject: [PATCH 066/110] IGNITE-12380: Adds event for node validation failure. (#7057) --- .../org/apache/ignite/events/EventType.java | 14 +++ .../events/NodeValidationFailedEvent.java | 72 ++++++++++++ .../ignite/spi/discovery/tcp/ServerImpl.java | 3 + .../IgniteNodeValidationFailedEventTest.java | 106 ++++++++++++++++++ .../testsuites/IgniteKernalSelfTestSuite.java | 4 +- .../zk/internal/ZookeeperDiscoveryImpl.java | 3 + .../zk/ZookeeperDiscoverySpiTestSuite4.java | 4 +- 7 files changed, 204 insertions(+), 2 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/events/NodeValidationFailedEvent.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/IgniteNodeValidationFailedEventTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/events/EventType.java b/modules/core/src/main/java/org/apache/ignite/events/EventType.java index a51c25faeb54c..3da980109d906 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/EventType.java +++ b/modules/core/src/main/java/org/apache/ignite/events/EventType.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteEvents; import org.apache.ignite.IgniteSnapshot; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.GridComponent; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.eventstorage.NoopEventStorageSpi; @@ -932,6 +933,19 @@ public interface EventType { */ public static final int EVT_SQL_QUERY_EXECUTION = 160; + /** + * Built-in event type: node validation failed. + *
+ * This event is triggered if a node join fails due to a node validation failure. + *

+ * NOTE: all types in range from 1 to 1000 are reserved for + * internal Ignite events and should not be used by user-defined events. + * + * @see NodeValidationFailedEvent + * @see GridComponent#validateNode + */ + public static final int EVT_NODE_VALIDATION_FAILED = 170; + /** * All cluster snapshot events. This array can be directly passed into * {@link IgniteEvents#localListen(IgnitePredicate, int...)} method to diff --git a/modules/core/src/main/java/org/apache/ignite/events/NodeValidationFailedEvent.java b/modules/core/src/main/java/org/apache/ignite/events/NodeValidationFailedEvent.java new file mode 100644 index 0000000000000..74cf3a9f5a19f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/events/NodeValidationFailedEvent.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.events; + +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.GridComponent; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.spi.IgniteNodeValidationResult; + +import static org.apache.ignite.events.EventType.EVT_NODE_VALIDATION_FAILED; + +/** + * This event is triggered if any of {@link GridComponent}s fail to validate the joining node + * while join message processing. + * + * @see EventType#EVT_NODE_VALIDATION_FAILED + * @see GridComponent#validateNode + */ +public class NodeValidationFailedEvent extends EventAdapter { + /** */ + private static final long serialVersionUID = 0L; + + /** The node that attempted to join cluster. */ + private final ClusterNode evtNode; + + /** Validation result. */ + private final IgniteNodeValidationResult res; + + /** + * Creates new node validation event with given parameters. + * + * @param node Local node. + * @param evtNode Node which couldn't join the topology due to a validation failure. + * @param res Joining node validation result. + */ + public NodeValidationFailedEvent(ClusterNode node, ClusterNode evtNode, IgniteNodeValidationResult res) { + super(node, res.message(), EVT_NODE_VALIDATION_FAILED); + + this.evtNode = evtNode; + this.res = res; + } + + /** @return Node that couldn't join the topology due to a validation failure. */ + public ClusterNode eventNode() { + return evtNode; + } + + /** @return Joining node validation result. */ + public IgniteNodeValidationResult validationResult() { + return res; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(NodeValidationFailedEvent.class, this, "parent", super.toString()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index d0c8e8e49d989..006f1c754e36e 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -71,6 +71,7 @@ import org.apache.ignite.cache.CacheMetrics; import org.apache.ignite.cluster.ClusterMetrics; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.events.NodeValidationFailedEvent; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteFeatures; @@ -4403,6 +4404,8 @@ else if (node.clientRouterNodeId() == null && utilityPool.execute( new Runnable() { @Override public void run() { + spi.getSpiContext().recordEvent(new NodeValidationFailedEvent(locNode, node, err0)); + boolean ping = node.id().equals(err0.nodeId()) ? pingNode(node) : pingNode(err0.nodeId()); if (!ping) { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteNodeValidationFailedEventTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteNodeValidationFailedEventTest.java new file mode 100644 index 0000000000000..6989144d01027 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteNodeValidationFailedEventTest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.events.Event; +import org.apache.ignite.events.NodeValidationFailedEvent; +import org.apache.ignite.internal.processors.security.impl.TestSecurityPluginProvider; +import org.apache.ignite.spi.IgniteNodeValidationResult; +import org.apache.ignite.spi.IgniteSpiException; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.events.EventType.EVT_NODE_VALIDATION_FAILED; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_NODE_CONSISTENT_ID; +import static org.apache.ignite.plugin.security.SecurityPermissionSetBuilder.ALLOW_ALL; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; + +/** Tests joining node validation failed event. */ +public class IgniteNodeValidationFailedEventTest extends GridCommonAbstractTest { + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName) + .setIncludeEventTypes(EVT_NODE_VALIDATION_FAILED) + .setConsistentId(igniteInstanceName); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(); + } + + /** */ + @Test + public void testNodeValidationFailedEvent() throws Exception { + startGrid(0); + + CountDownLatch evtLatch = new CountDownLatch(1); + + AtomicReference listenedEvtRef = new AtomicReference<>(); + + grid(0).events().localListen(evt -> { + assertTrue(listenedEvtRef.compareAndSet(null, evt)); + + evtLatch.countDown(); + + return true; + }, EVT_NODE_VALIDATION_FAILED); + + startGrid(1); + + String invalidNodeName = getTestIgniteInstanceName(2); + + IgniteConfiguration invalidCfg = getConfiguration(invalidNodeName) + .setPluginProviders(new TestSecurityPluginProvider("login", "", ALLOW_ALL, false)); + + assertThrowsWithCause(() -> startGrid(optimize(invalidCfg)), IgniteSpiException.class); + + evtLatch.await(); + + Event listenedEvt = listenedEvtRef.get(); + + assertTrue(listenedEvt instanceof NodeValidationFailedEvent); + + NodeValidationFailedEvent validationEvt = (NodeValidationFailedEvent)listenedEvt; + + assertEquals(invalidNodeName, validationEvt.eventNode().attribute(ATTR_NODE_CONSISTENT_ID)); + + IgniteNodeValidationResult validationRes = validationEvt.validationResult(); + + assertNotNull(validationRes); + + String errMsg = validationRes.message(); + + assertNotNull(errMsg); + assertTrue(errMsg.contains( + "Local node's grid security processor class is not equal to remote node's grid security processor class")); + } + + /** */ + @Test + public void testEventDisabledByDefault() throws Exception { + IgniteEx ignite = startGrid(super.getConfiguration(getTestIgniteInstanceName(0))); + + assertFalse(ignite.context().event().isRecordable(EVT_NODE_VALIDATION_FAILED)); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java index 432ee923bce19..b79d174669e9d 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java @@ -37,6 +37,7 @@ import org.apache.ignite.internal.GridVersionSelfTest; import org.apache.ignite.internal.IgniteConcurrentEntryProcessorAccessStopTest; import org.apache.ignite.internal.IgniteConnectionConcurrentReserveAndRemoveTest; +import org.apache.ignite.internal.IgniteNodeValidationFailedEventTest; import org.apache.ignite.internal.IgniteUpdateNotifierPerClusterSettingSelfTest; import org.apache.ignite.internal.LongJVMPauseDetectorTest; import org.apache.ignite.internal.ThreadNameValidationTest; @@ -111,7 +112,8 @@ DeploymentRequestOfUnknownClassProcessingTest.class, ThreadNameValidationTest.class, NodeWithFilterRestartTest.class, - ClusterActiveStateChangeWithNodeOutOfBaselineTest.class + ClusterActiveStateChangeWithNodeOutOfBaselineTest.class, + IgniteNodeValidationFailedEventTest.class }) public class IgniteKernalSelfTestSuite { } diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java index bdd37d1a4c62c..279aa1d80ea07 100644 --- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java +++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java @@ -54,6 +54,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CommunicationFailureResolver; import org.apache.ignite.events.EventType; +import org.apache.ignite.events.NodeValidationFailedEvent; import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException; import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException; @@ -2092,6 +2093,8 @@ private ZkNodeValidateResult validateJoiningNode(ZkJoiningNodeData joiningNodeDa } if (err != null) { + spi.getSpiContext().recordEvent(new NodeValidationFailedEvent(locNode, node, err)); + LT.warn(log, err.message()); res.err = err.sendMessage(); diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java index a9146cc3d38f3..ac73c29885d27 100644 --- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java +++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite4.java @@ -18,6 +18,7 @@ package org.apache.ignite.spi.discovery.zk; import org.apache.ignite.internal.ClusterNodeMetricsUpdateTest; +import org.apache.ignite.internal.IgniteNodeValidationFailedEventTest; import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCachePutRetryAtomicSelfTest; import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCachePutRetryTransactionalSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicMultiNodeFullApiSelfTest; @@ -42,7 +43,8 @@ GridCacheReplicatedAtomicMultiNodeFullApiSelfTest.class, IgniteCacheReplicatedQuerySelfTest.class, DistributedMetaStorageTest.class, - DistributedMetaStoragePersistentTest.class + DistributedMetaStoragePersistentTest.class, + IgniteNodeValidationFailedEventTest.class }) public class ZookeeperDiscoverySpiTestSuite4 { /** */ From 8806703b407c69f4e1fe4eb23d2ea85226f42d9c Mon Sep 17 00:00:00 2001 From: sergeyuttsel Date: Thu, 26 Nov 2020 01:42:11 +0300 Subject: [PATCH 067/110] IGNITE-13731 Used last finished exchange future for cache validation to avoid contention on synchronized method GridCachePartitionExchangeManager.ExchangeFutureSet#values. Fixes #8479 Signed-off-by: Slava Koptilin --- .../dht/GridDhtTransactionalCacheAdapter.java | 18 ++- .../colocated/GridDhtColocatedLockFuture.java | 26 ++-- .../distributed/near/GridNearLockFuture.java | 24 ---- .../cache/TransactionValidationTest.java | 126 ++++++++++++++++++ .../GridExchangeFreeSwitchTest.java | 105 ++++++++++++++- .../testsuites/IgniteCacheTestSuite2.java | 2 + 6 files changed, 257 insertions(+), 44 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/TransactionValidationTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java index b2e7b30c6b965..1831c1055d2ed 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheEntryPredicate; +import org.apache.ignite.internal.processors.cache.CacheInvalidStateException; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.CacheOperationContext; import org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap; @@ -52,6 +53,7 @@ import org.apache.ignite.internal.processors.cache.distributed.GridDistributedUnlockRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysResponse; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; @@ -1145,6 +1147,16 @@ public IgniteInternalFuture lockAllAsync( tx.topologyVersion(req.topologyVersion()); } + + GridDhtPartitionsExchangeFuture lastFinishedFut = ctx.shared().exchange().lastFinishedFuture(); + + CacheOperationContext opCtx = ctx.operationContextPerCall(); + + CacheInvalidStateException validateCacheE = lastFinishedFut + .validateCache(ctx, opCtx != null && opCtx.recovery(), req.txRead(), null, keys); + + if (validateCacheE != null) + throw validateCacheE; } else { fut = new GridDhtLockFuture(ctx, @@ -1299,9 +1311,7 @@ else if (!b) } } catch (IgniteCheckedException | RuntimeException e) { - String err = "Failed to unmarshal at least one of the keys for lock request message: " + req; - - U.error(log, err, e); + U.error(log, req, e); if (tx != null) { try { @@ -1327,7 +1337,7 @@ else if (!b) } return new GridDhtFinishedFuture<>( - new IgniteCheckedException(err, e)); + new IgniteCheckedException(e)); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java index 8d0abde17e20d..7ba91d7e94705 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java @@ -40,7 +40,9 @@ import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheEntryPredicate; +import org.apache.ignite.internal.processors.cache.CacheInvalidStateException; import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.CacheOperationContext; import org.apache.ignite.internal.processors.cache.CacheStoppedException; import org.apache.ignite.internal.processors.cache.GridCacheCompoundIdentityFuture; import org.apache.ignite.internal.processors.cache.GridCacheContext; @@ -53,6 +55,7 @@ import org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockMapping; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockResponse; @@ -804,20 +807,6 @@ void map() { } } - for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) { - if (fut.exchangeDone() && fut.topologyVersion().equals(lastChangeVer)) { - Throwable err = fut.validateCache(cctx, recovery, read, null, keys); - - if (err != null) { - onDone(err); - - return; - } - - break; - } - } - // Continue mapping on the same topology version as it was before. synchronized (this) { if (this.topVer == null) @@ -1392,6 +1381,15 @@ private boolean mapAsPrimary(Collection keys, AffinityTopologyVe lockLocally(distributedKeys, topVer); } + GridDhtPartitionsExchangeFuture lastFinishedFut = cctx.shared().exchange().lastFinishedFuture(); + + CacheOperationContext opCtx = cctx.operationContextPerCall(); + + CacheInvalidStateException validateCacheE = lastFinishedFut.validateCache(cctx, opCtx != null && opCtx.recovery(), read, null, keys); + + if (validateCacheE != null) + onDone(validateCacheE); + return true; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java index 73b3aff5b65e7..b88f74c722e35 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java @@ -843,30 +843,6 @@ void map() { topVer = tx.topologyVersionSnapshot(); if (topVer != null) { - for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) { - if (fut.exchangeDone() && fut.topologyVersion().equals(topVer)) { - Throwable err = null; - - // Before cache validation, make sure that this topology future is already completed. - try { - fut.get(); - } - catch (IgniteCheckedException e) { - err = fut.error(); - } - - err = (err == null) ? fut.validateCache(cctx, recovery, read, null, keys) : err; - - if (err != null) { - onDone(err); - - return; - } - - break; - } - } - // Continue mapping on the same topology version as it was before. if (this.topVer == null) this.topVer = topVer; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/TransactionValidationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/TransactionValidationTest.java new file mode 100644 index 0000000000000..5813aed1652ac --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/TransactionValidationTest.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import javax.cache.CacheException; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTransactionalCache; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.Transaction; +import org.junit.Test; + +import static org.apache.ignite.testframework.MvccFeatureChecker.Feature.NEAR_CACHE; +import static org.apache.ignite.testframework.MvccFeatureChecker.skipIfNotSupported; + +/** + * Tests check that second operation in transaction fail if it doesn't pass validation. + */ +public class TransactionValidationTest extends GridCommonAbstractTest { + /** + * @throws Exception If failed. + */ + @Test + public void validationOnRemoteNode() throws Exception { + validationTest(true, false); + } + + /** + * @throws Exception If failed. + */ + @Test + public void validationOnLocalNode() throws Exception { + validationTest(false, false); + } + + /** + * @throws Exception If failed. + */ + @Test + public void validationOnNearCache() throws Exception { + skipIfNotSupported(NEAR_CACHE); + + validationTest(true, true); + } + + /** + * @throws Exception If failed. + */ + public void validationTest(boolean distributed, boolean nearCache) throws Exception { + IgniteEx txCrd; + + if (distributed && nearCache) + txCrd = startGrids(2); + else if (distributed && !nearCache) { + startGrids(2); + + txCrd = startClientGrid(2); + } + else + txCrd = startGrid(0); + + CacheConfiguration cfgCache0 = new CacheConfiguration<>("cache0") + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); + + CacheConfiguration cfgCache1 = new CacheConfiguration<>("cache1") + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) + .setTopologyValidator(nodes -> false); + + if (nearCache) { + cfgCache0.setNearConfiguration(new NearCacheConfiguration<>()); + + cfgCache1.setNearConfiguration(new NearCacheConfiguration<>()); + } + + IgniteCache cache0 = txCrd.createCache(cfgCache0); + + IgniteCache cache1 = txCrd.createCache(cfgCache1); + + try (Transaction tx = txCrd.transactions().txStart()) { + cache0.put(1, 1); + + boolean isNearCache = ((GatewayProtectedCacheProxy) cache1).context().cache() instanceof GridNearTransactionalCache; + + if (nearCache) + assertTrue("Must be near cache", isNearCache); + else + assertTrue("Must not be near cache", !isNearCache); + + try { + cache1.put(1, 1); + + fail("Validation broken"); + } + catch (CacheException e) { + assertTrue(X.getFullStackTrace(e), + X.hasCause(e, "cache topology is not valid", CacheInvalidStateException.class)); + } + } + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java index 05774ce100f1b..ef94050792764 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridExchangeFreeSwitchTest.java @@ -61,6 +61,7 @@ import static org.apache.ignite.internal.IgniteFeatures.PME_FREE_SWITCH; import static org.apache.ignite.internal.IgniteFeatures.allNodesSupports; import static org.apache.ignite.internal.IgniteFeatures.nodeSupports; +import static org.apache.ignite.testframework.GridTestUtils.runAsync; /** * @@ -369,8 +370,11 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l Ignite candidate; MvccProcessor proc; + int nodeToStop; + do { - candidate = G.allGrids().get(r.nextInt(nodes)); + nodeToStop = r.nextInt(nodes); + candidate = grid(nodeToStop); proc = ((IgniteEx)candidate).context().coordinators(); } @@ -383,11 +387,33 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l AtomicInteger key_from = new AtomicInteger(); - CountDownLatch readyLatch = new CountDownLatch((backups > 0 ? 4 : 2) * multiplicator); + CountDownLatch readyLatch = new CountDownLatch((backups > 0 ? 6 : 3) * multiplicator); CountDownLatch failedLatch = new CountDownLatch(1); IgniteCache failedCache = failed.getOrCreateCache(cacheName); + int nodeToStop0 = nodeToStop; + + IgniteInternalFuture checkRebalanced = runAsync(() -> { + try { + failedLatch.await(); + } + catch (Exception e) { + fail("Should not happen [exception=" + e + "]"); + } + for (int i = 0; i < nodes; i++) { + if (i != nodeToStop0) { + GridDhtPartitionsExchangeFuture lastFinishedFut = + grid(i).cachex(cacheName).context().shared().exchange().lastFinishedFuture(); + + assertTrue(lastFinishedFut.rebalanced()); + + assertTrue(lastFinishedFut.topologyVersion() + .equals(new AffinityTopologyVersion(nodes + 1, 0))); + } + } + }); + IgniteInternalFuture nearThenNearFut = multithreadedAsync(() -> { try { List keys = nearKeys(failedCache, 2, key_from.addAndGet(100)); @@ -406,6 +432,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); primaryCache.put(key1, key1); @@ -438,6 +465,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); try { backupCache.put(key1, key1); @@ -470,6 +498,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); try { primaryCache.put(key1, key1); @@ -502,6 +531,7 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l readyLatch.countDown(); failedLatch.await(); + checkRebalanced.get(); primaryCache.put(key1, key1); @@ -516,6 +546,75 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l } }, multiplicator) : new GridFinishedFuture<>(); + IgniteInternalFuture primaryThenNearFut = multithreadedAsync(() -> { + try { + Integer key0 = primaryKeys(failedCache, 1, key_from.addAndGet(100)).get(0); + Integer key1 = nearKeys(failedCache, 1, key_from.addAndGet(100)).get(0); + + Ignite primary = primaryNode(key1, cacheName); + + assertNotSame(failed, primary); + + IgniteCache primaryCache = primary.getOrCreateCache(cacheName); + + try (Transaction tx = primary.transactions().txStart()) { + primaryCache.put(key0, key0); + + readyLatch.countDown(); + failedLatch.await(); + checkRebalanced.get(); + + primaryCache.put(key1, key1); + + try { + tx.commit(); + + fail("Should not happen"); + } + catch (Exception ignored) { + // Transaction broken because of primary left. + } + } + } + catch (Exception e) { + fail("Should not happen [exception=" + e + "]"); + } + }, multiplicator); + + IgniteInternalFuture primaryThenPrimaryWithSameKeyFut = backups > 0 ? multithreadedAsync(() -> { + try { + List keys = primaryKeys(failedCache, 2, key_from.addAndGet(100)); + + Integer key0 = keys.get(0); + + Ignite backup = backupNode(key0, cacheName); + + assertNotSame(failed, backup); + + IgniteCache backupCache = backup.getOrCreateCache(cacheName); + + try (Transaction tx = backup.transactions().txStart()) { + backupCache.put(key0, key0); + + readyLatch.countDown(); + failedLatch.await(); + checkRebalanced.get(); + + try { + backupCache.put(key0, key0 + 1); + + fail("Should not happen"); + } + catch (Exception ignored) { + // Transaction broken because of primary left. + } + } + } + catch (Exception e) { + fail("Should not happen [exception=" + e + "]"); + } + }, multiplicator) : new GridFinishedFuture<>(); + readyLatch.await(); failed.close(); // Stopping node. @@ -528,6 +627,8 @@ private void testNoTransactionsWaitAtNodeLeft(int backups, PartitionLossPolicy l primaryThenPrimaryFut.get(); nearThenPrimaryFut.get(); nearThenBackupFut.get(); + primaryThenNearFut.get(); + primaryThenPrimaryWithSameKeyFut.get(); int pmeFreeCnt = 0; diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java index 63516f8af6a79..4fb38bd3e7e11 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java @@ -58,6 +58,7 @@ import org.apache.ignite.internal.processors.cache.MemoryPolicyConfigValidationTest; import org.apache.ignite.internal.processors.cache.NoPresentCacheInterceptorOnClientTest; import org.apache.ignite.internal.processors.cache.NonAffinityCoordinatorDynamicStartStopTest; +import org.apache.ignite.internal.processors.cache.TransactionValidationTest; import org.apache.ignite.internal.processors.cache.distributed.CacheDetectLostPartitionsTest; import org.apache.ignite.internal.processors.cache.distributed.CacheLoadingConcurrentGridStartSelfTest; import org.apache.ignite.internal.processors.cache.distributed.CacheLoadingConcurrentGridStartSelfTestAllowOverwrite; @@ -383,6 +384,7 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, IgniteReflectionFactorySelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, NoPresentCacheInterceptorOnClientTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CacheDetectLostPartitionsTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, TransactionValidationTest.class, ignoredTests); return suite; } From be3072ff278a2542e41d008b5379473867df3814 Mon Sep 17 00:00:00 2001 From: Ilya Kasnacheev Date: Thu, 26 Nov 2020 18:00:14 +0300 Subject: [PATCH 068/110] IGNITE-13730 Unify fallback property for SSL key store provider - Fixes #8477. --- .../org/apache/ignite/IgniteJdbcDriver.java | 12 ++++++--- .../client/GridClientConfiguration.java | 16 +++++++----- .../ssl/GridSslBasicContextFactory.java | 26 ++++++++----------- .../client/thin/TcpClientChannel.java | 8 +++--- .../jdbc/thin/ConnectionPropertiesImpl.java | 2 +- .../internal/jdbc/thin/JdbcThinSSLUtil.java | 13 +++++++--- .../apache/ignite/ssl/SslContextFactory.java | 22 +++++++++++----- .../client/ClientConfigurationTest.java | 8 +++--- .../ignite/testframework/GridTestUtils.java | 9 ++++--- .../apache/ignite/client/SecurityTest.java | 8 +++--- 10 files changed, 77 insertions(+), 47 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java index 61a944fec126e..9c0948a115fae 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java @@ -31,6 +31,10 @@ import org.apache.ignite.internal.jdbc.JdbcConnection; import org.apache.ignite.internal.jdbc.JdbcDriverPropertyInfo; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + /** * JDBC driver implementation for In-Memory Data Grid. *

@@ -485,10 +489,10 @@ public class IgniteJdbcDriver implements Driver { info.getProperty("ignite.client.ssl.enabled", "false"), "Flag indicating that SSL is needed for connection."), new JdbcDriverPropertyInfo("ignite.client.ssl.protocol", - info.getProperty("ignite.client.ssl.protocol", "TLS"), + info.getProperty("ignite.client.ssl.protocol", DFLT_SSL_PROTOCOL), "SSL protocol."), new JdbcDriverPropertyInfo("ignite.client.ssl.key.algorithm", - info.getProperty("ignite.client.ssl.key.algorithm", "SunX509"), + info.getProperty("ignite.client.ssl.key.algorithm", DFLT_KEY_ALGORITHM), "Key manager algorithm."), new JdbcDriverPropertyInfo("ignite.client.ssl.keystore.location", info.getProperty("ignite.client.ssl.keystore.location", ""), @@ -497,7 +501,7 @@ public class IgniteJdbcDriver implements Driver { info.getProperty("ignite.client.ssl.keystore.password", ""), "Key store password."), new JdbcDriverPropertyInfo("ignite.client.ssl.keystore.type", - info.getProperty("ignite.client.ssl.keystore.type", "jks"), + info.getProperty("ignite.client.ssl.keystore.type", DFLT_STORE_TYPE), "Key store type."), new JdbcDriverPropertyInfo("ignite.client.ssl.truststore.location", info.getProperty("ignite.client.ssl.truststore.location", ""), @@ -506,7 +510,7 @@ public class IgniteJdbcDriver implements Driver { info.getProperty("ignite.client.ssl.truststore.password", ""), "Trust store password."), new JdbcDriverPropertyInfo("ignite.client.ssl.truststore.type", - info.getProperty("ignite.client.ssl.truststore.type", "jks"), + info.getProperty("ignite.client.ssl.truststore.type", DFLT_STORE_TYPE), "Trust store type."), new JdbcDriverPropertyInfo("ignite.client.credentials", info.getProperty("ignite.client.credentials", ""), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java index 8e373ae235530..c0135a5e57fcd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientConfiguration.java @@ -39,6 +39,10 @@ import org.apache.ignite.plugin.security.SecurityCredentialsProvider; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + /** * Java client configuration. */ @@ -721,8 +725,8 @@ public void load(String prefix, Properties in) throws GridClientException { String sslEnabled = in.getProperty(prefix + "ssl.enabled"); - String sslProto = in.getProperty(prefix + "ssl.protocol", "TLS"); - String sslKeyAlg = in.getProperty(prefix + "ssl.key.algorithm", "SunX509"); + String sslProto = in.getProperty(prefix + "ssl.protocol"); + String sslKeyAlg = in.getProperty(prefix + "ssl.key.algorithm"); String keyStorePath = in.getProperty(prefix + "ssl.keystore.location"); String keyStorePwd = in.getProperty(prefix + "ssl.keystore.password"); @@ -780,8 +784,8 @@ public void load(String prefix, Properties in) throws GridClientException { if (!F.isEmpty(sslEnabled) && Boolean.parseBoolean(sslEnabled)) { GridSslBasicContextFactory factory = new GridSslBasicContextFactory(); - factory.setProtocol(F.isEmpty(sslProto) ? "TLS" : sslProto); - factory.setKeyAlgorithm(F.isEmpty(sslKeyAlg) ? "SunX509" : sslKeyAlg); + factory.setProtocol(F.isEmpty(sslProto) ? DFLT_SSL_PROTOCOL : sslProto); + factory.setKeyAlgorithm(F.isEmpty(sslKeyAlg) ? DFLT_KEY_ALGORITHM : sslKeyAlg); if (F.isEmpty(keyStorePath)) throw new IllegalArgumentException("SSL key store location is not specified."); @@ -791,7 +795,7 @@ public void load(String prefix, Properties in) throws GridClientException { if (keyStorePwd != null) factory.setKeyStorePassword(keyStorePwd.toCharArray()); - factory.setKeyStoreType(F.isEmpty(keyStoreType) ? "jks" : keyStoreType); + factory.setKeyStoreType(F.isEmpty(keyStoreType) ? DFLT_STORE_TYPE : keyStoreType); if (F.isEmpty(trustStorePath)) factory.setTrustManagers(GridSslBasicContextFactory.getDisabledTrustManager()); @@ -801,7 +805,7 @@ public void load(String prefix, Properties in) throws GridClientException { if (trustStorePwd != null) factory.setTrustStorePassword(trustStorePwd.toCharArray()); - factory.setTrustStoreType(F.isEmpty(trustStoreType) ? "jks" : trustStoreType); + factory.setTrustStoreType(F.isEmpty(trustStoreType) ? DFLT_STORE_TYPE : trustStoreType); } setSslContextFactory(factory); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java index d4075c1c71c35..e500705414de1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java @@ -37,6 +37,11 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.ssl.SSLContextWrapper; +import org.apache.ignite.ssl.SslContextFactory; + +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; /** * Basic ssl context factory that provides ssl context configuration with specified key @@ -54,15 +59,6 @@ */ @Deprecated public class GridSslBasicContextFactory implements GridSslContextFactory { - /** Default key store type. */ - public static final String DFLT_STORE_TYPE = "JKS"; - - /** Default SSL protocol. */ - public static final String DFLT_SSL_PROTOCOL = "TLS"; - - /** Default key manager algorithm. */ - public static final String DFLT_KEY_ALGORITHM = "SunX509"; - /** SSL protocol. */ private String proto = DFLT_SSL_PROTOCOL; @@ -106,8 +102,8 @@ public String getKeyStoreType() { } /** - * Sets key store type used in context initialization. If not provided, {@link #DFLT_STORE_TYPE} will - * be used. + * Sets key store type used in context initialization. If not provided, {@link SslContextFactory#DFLT_STORE_TYPE} + * will be used. * * @param keyStoreType Key store type. */ @@ -127,8 +123,8 @@ public String getTrustStoreType() { } /** - * Sets trust store type used in context initialization. If not provided, {@link #DFLT_STORE_TYPE} will - * be used. + * Sets trust store type used in context initialization. If not provided, {@link SslContextFactory#DFLT_STORE_TYPE} + * will be used. * * @param trustStoreType Trust store type. */ @@ -148,7 +144,7 @@ public String getProtocol() { } /** - * Sets protocol for secure transport. If not specified, {@link #DFLT_SSL_PROTOCOL} will be used. + * Sets protocol for secure transport. If not specified, {@link SslContextFactory#DFLT_SSL_PROTOCOL} will be used. * * @param proto SSL protocol name. */ @@ -159,7 +155,7 @@ public void setProtocol(String proto) { } /** - * Gets algorithm that will be used to create a key manager. If not specified, {@link #DFLT_KEY_ALGORITHM} + * Gets algorithm that will be used to create a key manager. If not specified, {@link SslContextFactory#DFLT_KEY_ALGORITHM} * will be used. * * @return Key manager algorithm. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java index 7c791305cd1c9..25df909af4fee 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java @@ -103,6 +103,8 @@ import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.AUTHORIZATION; import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.BITMAP_FEATURES; import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.PARTITION_AWARENESS; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; /** * Implements {@link ClientChannel} over TCP. @@ -931,7 +933,7 @@ private static SSLSocketFactory getSslSocketFactory(ClientChannelConfiguration c String keyStoreType = or.apply( cfg.getSslClientCertificateKeyStoreType(), - or.apply(System.getProperty("javax.net.ssl.keyStoreType"), "JKS") + or.apply(System.getProperty("javax.net.ssl.keyStoreType"), DFLT_STORE_TYPE) ); String keyStorePwd = or.apply( @@ -946,7 +948,7 @@ private static SSLSocketFactory getSslSocketFactory(ClientChannelConfiguration c String trustStoreType = or.apply( cfg.getSslTrustCertificateKeyStoreType(), - or.apply(System.getProperty("javax.net.ssl.trustStoreType"), "JKS") + or.apply(System.getProperty("javax.net.ssl.trustStoreType"), DFLT_STORE_TYPE) ); String trustStorePwd = or.apply( @@ -954,7 +956,7 @@ private static SSLSocketFactory getSslSocketFactory(ClientChannelConfiguration c System.getProperty("javax.net.ssl.trustStorePassword") ); - String algorithm = or.apply(cfg.getSslKeyAlgorithm(), "SunX509"); + String algorithm = or.apply(cfg.getSslKeyAlgorithm(), DFLT_KEY_ALGORITHM); String proto = toString(cfg.getSslProtocol()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java index 9cc585331f994..bacb797b93e6e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java @@ -138,7 +138,7 @@ false, new PropertyValidator() { /** SSL: Key algorithm name. */ private StringProperty sslKeyAlgorithm = new StringProperty("sslKeyAlgorithm", - "SSL key algorithm name", "SunX509", null, false, null); + "SSL key algorithm name", null, null, false, null); /** SSL: Client certificate key store url. */ private StringProperty sslClientCertificateKeyStoreUrl = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java index d62f939cbe255..e410e4d1fa2c2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinSSLUtil.java @@ -33,6 +33,10 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.ssl.SslContextFactory; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + /** * SSL utility method to create SSL connetion. */ @@ -136,7 +140,7 @@ private static SSLSocketFactory getSSLSocketFactory(ConnectionProperties connPro cliCertKeyStorePwd = System.getProperty("javax.net.ssl.keyStorePassword"); if (cliCertKeyStoreType == null) - cliCertKeyStoreType = System.getProperty("javax.net.ssl.keyStoreType", "JKS"); + cliCertKeyStoreType = System.getProperty("javax.net.ssl.keyStoreType", DFLT_STORE_TYPE); if (trustCertKeyStoreUrl == null) trustCertKeyStoreUrl = System.getProperty("javax.net.ssl.trustStore"); @@ -145,10 +149,13 @@ private static SSLSocketFactory getSSLSocketFactory(ConnectionProperties connPro trustCertKeyStorePwd = System.getProperty("javax.net.ssl.trustStorePassword"); if (trustCertKeyStoreType == null) - trustCertKeyStoreType = System.getProperty("javax.net.ssl.trustStoreType", "JKS"); + trustCertKeyStoreType = System.getProperty("javax.net.ssl.trustStoreType", DFLT_STORE_TYPE); if (sslProtocol == null) - sslProtocol = "TLS"; + sslProtocol = DFLT_SSL_PROTOCOL; + + if (keyAlgorithm == null) + keyAlgorithm = DFLT_KEY_ALGORITHM; SslContextFactory f = new SslContextFactory(); diff --git a/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java b/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java index 7fe7c6fbea7ce..4fef6636b3d4b 100644 --- a/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java @@ -60,14 +60,25 @@ public class SslContextFactory implements Factory { /** */ private static final long serialVersionUID = 0L; - /** Default key store type. */ - public static final String DFLT_STORE_TYPE = "JKS"; + /** Default key / trust store type. */ + public static final String DFLT_STORE_TYPE = System.getProperty("javax.net.ssl.keyStoreType", "JKS"); /** Default SSL protocol. */ public static final String DFLT_SSL_PROTOCOL = "TLS"; - /** Default key manager algorithm. */ - public static final String DFLT_KEY_ALGORITHM = "SunX509"; + /** + * Property name to specify default key/trust manager algorithm. + * + * @deprecated Use {@code "ssl.KeyManagerFactory.algorithm"} instead as per JSSE standard. + * + * Should be considered for deletion in 9.0. + */ + @Deprecated + public static final String IGNITE_KEY_ALGORITHM_PROPERTY = "ssl.key.algorithm"; + + /** Default key manager / trust manager algorithm. Specifying different trust manager algorithm is not supported. */ + public static final String DFLT_KEY_ALGORITHM = System.getProperty("ssl.KeyManagerFactory.algorithm", + System.getProperty(IGNITE_KEY_ALGORITHM_PROPERTY, "SunX509")); /** SSL protocol. */ private String proto = DFLT_SSL_PROTOCOL; @@ -178,8 +189,7 @@ public String getKeyAlgorithm() { } /** - * Sets key manager algorithm that will be used to create a key manager. Notice that in most cased default value - * suites well, however, on Android platform this value need to be set to X509. + * Sets key manager algorithm that will be used to create a key manager. * * @param keyAlgorithm Key algorithm name. */ diff --git a/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java b/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java index 287c6ec6793bc..dcb78efa5bdcb 100644 --- a/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java @@ -42,6 +42,8 @@ import org.junit.Test; import org.junit.rules.Timeout; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; import static org.junit.Assert.assertTrue; /** @@ -63,12 +65,12 @@ public void testSerialization() throws IOException, ClassNotFoundException { ) .setSslMode(SslMode.REQUIRED) .setSslClientCertificateKeyStorePath("client.jks") - .setSslClientCertificateKeyStoreType("JKS") + .setSslClientCertificateKeyStoreType(DFLT_STORE_TYPE) .setSslClientCertificateKeyStorePassword("123456") .setSslTrustCertificateKeyStorePath("trust.jks") - .setSslTrustCertificateKeyStoreType("JKS") + .setSslTrustCertificateKeyStoreType(DFLT_STORE_TYPE) .setSslTrustCertificateKeyStorePassword("123456") - .setSslKeyAlgorithm("SunX509"); + .setSslKeyAlgorithm(DFLT_KEY_ALGORITHM); ByteArrayOutputStream outBytes = new ByteArrayOutputStream(); diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java index 9e68277ed017f..0b19056594c01 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java @@ -123,6 +123,9 @@ import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -1931,13 +1934,13 @@ public static boolean waitForCondition(GridAbsPredicate cond, BooleanSupplier wa * @throws IOException If keystore cannot be accessed. */ public static SSLContext sslContext() throws GeneralSecurityException, IOException { - SSLContext ctx = SSLContext.getInstance("TLS"); + SSLContext ctx = SSLContext.getInstance(DFLT_SSL_PROTOCOL); char[] storePass = keyStorePassword().toCharArray(); - KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509"); + KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance(DFLT_KEY_ALGORITHM); - KeyStore keyStore = KeyStore.getInstance("JKS"); + KeyStore keyStore = KeyStore.getInstance(DFLT_STORE_TYPE); keyStore.load(new FileInputStream(U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path"))), storePass); diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java b/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java index dae7a9e362532..e64713a5c9baf 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java @@ -40,6 +40,8 @@ import org.junit.Test; import org.junit.rules.Timeout; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -113,12 +115,12 @@ public void testEncryption() throws Exception { try (IgniteClient client = Ignition.startClient(clientCfg .setSslMode(SslMode.REQUIRED) .setSslClientCertificateKeyStorePath(rsrcPath.apply("/client.jks")) - .setSslClientCertificateKeyStoreType("JKS") + .setSslClientCertificateKeyStoreType(DFLT_STORE_TYPE) .setSslClientCertificateKeyStorePassword("123456") .setSslTrustCertificateKeyStorePath(rsrcPath.apply("/trust.jks")) - .setSslTrustCertificateKeyStoreType("JKS") + .setSslTrustCertificateKeyStoreType(DFLT_STORE_TYPE) .setSslTrustCertificateKeyStorePassword("123456") - .setSslKeyAlgorithm("SunX509") + .setSslKeyAlgorithm(DFLT_KEY_ALGORITHM) .setSslTrustAll(false) .setSslProtocol(SslProtocol.TLS) )) { From 4d28cb5790547ca82a0bcd3d8ededc8856ccb263 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Thu, 26 Nov 2020 18:42:24 +0300 Subject: [PATCH 069/110] IGNITE-13760 .NET: Fix NullPointerException in GetAffinity on client nodes Cache context does not exist on client nodes unless `GetCache` was called, so `GridCacheAffinityManager` retrieval in `PlatformAffinity` constructor can cause NPE. `GridCacheAffinityManager` was added to `PlatformAffinity` solely for Platform Cache needs and is not used otherwise, so it makes sense to move this functionality to a dedicated `PlatformAffinityManager` class. This fixes the bug, because cache context always exists by the time Platform Cache is created. --- .../platform/PlatformProcessorImpl.java | 12 ++- .../cache/affinity/PlatformAffinity.java | 48 +----------- .../affinity/PlatformAffinityManager.java | 78 +++++++++++++++++++ .../Cache/Affinity/AffinityTest.cs | 56 +++++++++---- .../Apache.Ignite.Core.csproj | 1 + .../Impl/Cache/CacheAffinityImpl.cs | 23 +----- .../Impl/Cache/CacheAffinityManager.cs | 53 +++++++++++++ .../Impl/Cache/Platform/PlatformCache.cs | 28 +++---- .../Cache/Platform/PlatformCacheManager.cs | 36 ++++----- .../Impl/Client/IgniteClient.cs | 6 ++ .../Impl/IIgniteInternal.cs | 7 ++ .../dotnet/Apache.Ignite.Core/Impl/Ignite.cs | 47 +++++++---- 12 files changed, 265 insertions(+), 130 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinityManager.java create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityManager.cs diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java index 61dec3a4768b4..266b9de026183 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java @@ -52,6 +52,7 @@ import org.apache.ignite.internal.processors.platform.cache.PlatformCacheExtension; import org.apache.ignite.internal.processors.platform.cache.PlatformCacheManager; import org.apache.ignite.internal.processors.platform.cache.affinity.PlatformAffinity; +import org.apache.ignite.internal.processors.platform.cache.affinity.PlatformAffinityManager; import org.apache.ignite.internal.processors.platform.cache.store.PlatformCacheStore; import org.apache.ignite.internal.processors.platform.cluster.PlatformClusterGroup; import org.apache.ignite.internal.processors.platform.datastreamer.PlatformDataStreamer; @@ -193,6 +194,9 @@ public class PlatformProcessorImpl extends GridProcessorAdapter implements Platf /** */ private static final int OP_GET_OR_CREATE_LOCK = 38; + /** */ + private static final int OP_GET_AFFINITY_MANAGER = 39; + /** Start latch. */ private final CountDownLatch startLatch = new CountDownLatch(1); @@ -637,7 +641,7 @@ private void loggerLog(int level, String message, String category, String errorI } case OP_GET_AFFINITY: { - return new PlatformAffinity(platformCtx, ctx, reader.readString()); + return new PlatformAffinity(platformCtx, reader.readString()); } case OP_GET_DATA_STREAMER: { @@ -739,6 +743,12 @@ private void loggerLog(int level, String message, String category, String errorI return lock == null ? null : new PlatformLock(platformCtx, lock); } + + case OP_GET_AFFINITY_MANAGER: { + int cacheId = reader.readInt(); + + return new PlatformAffinityManager(platformCtx, cacheId); + } } return PlatformAbstractTarget.throwUnsupported(type); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java index e18be6485b97c..a0d79f24f5535 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinity.java @@ -23,22 +23,17 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.affinity.Affinity; import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.binary.BinaryRawReaderEx; import org.apache.ignite.internal.binary.BinaryRawWriterEx; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; -import org.apache.ignite.internal.processors.cache.GridCacheAffinityManager; -import org.apache.ignite.internal.processors.cache.GridCacheUtils; import org.apache.ignite.internal.processors.platform.PlatformAbstractTarget; import org.apache.ignite.internal.processors.platform.PlatformContext; import org.apache.ignite.internal.processors.platform.utils.PlatformUtils; -import org.apache.ignite.internal.util.typedef.C1; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; /** - * Native cache wrapper implementation. + * Affinity wrapper for platforms. */ public class PlatformAffinity extends PlatformAbstractTarget { /** */ @@ -86,45 +81,28 @@ public class PlatformAffinity extends PlatformAbstractTarget { /** */ public static final int OP_PARTITIONS = 15; - /** */ - public static final int OP_IS_ASSIGNMENT_VALID = 16; - - /** */ - private static final C1 TO_NODE_ID = new C1() { - @Nullable @Override public UUID apply(ClusterNode node) { - return node != null ? node.id() : null; - } - }; - /** Underlying cache affinity. */ private final Affinity aff; /** Discovery manager */ private final GridDiscoveryManager discovery; - /** Affinity manager. */ - private final GridCacheAffinityManager affMgr; - /** * Constructor. * * @param platformCtx Context. - * @param igniteCtx Ignite context. * @param name Cache name. */ - public PlatformAffinity(PlatformContext platformCtx, GridKernalContext igniteCtx, @Nullable String name) + public PlatformAffinity(PlatformContext platformCtx, @Nullable String name) throws IgniteCheckedException { super(platformCtx); - this.aff = igniteCtx.grid().affinity(name); + aff = platformCtx.kernalContext().grid().affinity(name); if (aff == null) throw new IgniteCheckedException("Cache with the given name doesn't exist: " + name); - this.affMgr = this.platformCtx.kernalContext().cache().context().cacheContext(GridCacheUtils.cacheId(name)) - .affinity(); - - discovery = igniteCtx.discovery(); + discovery = platformCtx.kernalContext().discovery(); } /** {@inheritDoc} */ @@ -172,24 +150,6 @@ public PlatformAffinity(PlatformContext platformCtx, GridKernalContext igniteCtx return aff.isPrimaryOrBackup(node, key) ? TRUE : FALSE; } - case OP_IS_ASSIGNMENT_VALID: { - AffinityTopologyVersion ver = new AffinityTopologyVersion(reader.readLong(), reader.readInt()); - int part = reader.readInt(); - AffinityTopologyVersion endVer = affMgr.affinityTopologyVersion(); - - if (!affMgr.primaryChanged(part, ver, endVer)) { - return TRUE; - } - - if (!affMgr.partitionLocalNode(part, endVer)) { - return FALSE; - } - - // Special case: late affinity assignment when primary changes to local node due to a node join. - // Specified partition is local, and near cache entries are valid for primary keys. - return ver.topologyVersion() == endVer.topologyVersion() ? TRUE : FALSE; - } - default: return super.processInStreamOutLong(type, reader); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinityManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinityManager.java new file mode 100644 index 0000000000000..92306a6326fd0 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/affinity/PlatformAffinityManager.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.cache.affinity; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.GridCacheAffinityManager; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.platform.PlatformAbstractTarget; +import org.apache.ignite.internal.processors.platform.PlatformContext; + +/** + * AffinityManager wrapper for platforms. + */ +public class PlatformAffinityManager extends PlatformAbstractTarget { + /** */ + public static final int OP_IS_ASSIGNMENT_VALID = 1; + + /** Affinity manager. */ + private final GridCacheAffinityManager affMgr; + + /** + * Constructor. + * + * @param platformCtx Context. + */ + public PlatformAffinityManager(PlatformContext platformCtx, int cacheId) { + super(platformCtx); + + GridCacheContext ctx = platformCtx.kernalContext().cache().context().cacheContext(cacheId); + + if (ctx == null) + throw new IgniteException("Cache doesn't exist: " + cacheId); + + affMgr = ctx.affinity(); + } + + /** {@inheritDoc} */ + @Override public long processInStreamOutLong(int type, BinaryRawReaderEx reader) throws IgniteCheckedException { + if (type == OP_IS_ASSIGNMENT_VALID) + { + AffinityTopologyVersion ver = new AffinityTopologyVersion(reader.readLong(), reader.readInt()); + int part = reader.readInt(); + AffinityTopologyVersion endVer = affMgr.affinityTopologyVersion(); + + if (!affMgr.primaryChanged(part, ver, endVer)) { + return TRUE; + } + + if (!affMgr.partitionLocalNode(part, endVer)) { + return FALSE; + } + + // Special case: late affinity assignment when primary changes to local node due to a node join. + // Specified partition is local, and near cache entries are valid for primary keys. + return ver.topologyVersion() == endVer.topologyVersion() ? TRUE : FALSE; + } + + return super.processInStreamOutLong(type, reader); + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs index 73a1c1f8da8be..37bc53be75728 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityTest.cs @@ -38,15 +38,9 @@ public sealed class AffinityTest [TestFixtureSetUp] public void StartGrids() { - for (int i = 0; i < 3; i++) + for (var i = 0; i < 3; i++) { - var cfg = new IgniteConfiguration(TestUtils.GetTestConfiguration()) - { - SpringConfigUrl = Path.Combine("Config", "native-client-test-cache-affinity.xml"), - IgniteInstanceName = "grid-" + i - }; - - Ignition.Start(cfg); + Ignition.Start(GetConfig(i, client: i == 2)); } } @@ -75,6 +69,21 @@ public void TestAffinity() Assert.AreEqual(node.Id, aff.MapKeyToNode(new AffinityTestKey(i, 1)).Id); } + /// + /// Tests that affinity can be retrieved from client node right after the cache has been started on server node. + /// + [Test] + public void TestAffinityRetrievalForNewCache() + { + var server = Ignition.GetIgnite("grid-0"); + var client = Ignition.GetIgnite("grid-2"); + + var serverCache = server.CreateCache(TestUtils.TestName); + var clientAff = client.GetAffinity(serverCache.Name); + + Assert.IsNotNull(clientAff); + } + /// /// Test affinity with binary flag. /// @@ -101,18 +110,20 @@ public void TestAffinityBinary() /// /// Tests that works when used on a property of a type that is /// specified as or and - /// configured in a Spring XML file. + /// configured in a Spring XML file. /// [Test] public void TestAffinityKeyMappedWithQueryEntitySpringXml() { - TestAffinityKeyMappedWithQueryEntity0(Ignition.GetIgnite("grid-0"), "cache1"); - TestAffinityKeyMappedWithQueryEntity0(Ignition.GetIgnite("grid-1"), "cache1"); + foreach (var ignite in Ignition.GetAll()) + { + TestAffinityKeyMappedWithQueryEntity0(ignite, "cache1"); + } } /// /// Tests that works when used on a property of a type that is - /// specified as or . + /// specified as or . /// [Test] public void TestAffinityKeyMappedWithQueryEntity() @@ -194,7 +205,20 @@ public override int GetHashCode() return _id; } } - + + /// + /// Gets Ignite config. + /// + private static IgniteConfiguration GetConfig(int idx, bool client = false) + { + return new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + SpringConfigUrl = Path.Combine("Config", "native-client-test-cache-affinity.xml"), + IgniteInstanceName = "grid-" + idx, + ClientMode = client + }; + } + /// /// Query entity key. /// @@ -204,12 +228,12 @@ private class QueryEntityKey /** */ [QuerySqlField] public string Data { get; set; } - + /** */ [AffinityKeyMapped] public long AffinityKey { get; set; } } - + /// /// Query entity key. /// @@ -219,7 +243,7 @@ private class QueryEntityValue /** */ [QuerySqlField] public string Name { get; set; } - + /** */ [AffinityKeyMapped] public long AffKey { get; set; } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj index ca3f1225c6daf..58ea5f7aadb57 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj @@ -101,6 +101,7 @@ + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs index 869518eb52909..a49074b219f68 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheAffinityImpl.cs @@ -20,7 +20,6 @@ namespace Apache.Ignite.Core.Impl.Cache using System; using System.Collections.Generic; using Apache.Ignite.Core.Cache; - using Apache.Ignite.Core.Cache.Affinity; using Apache.Ignite.Core.Cluster; using Apache.Ignite.Core.Impl.Binary; using Apache.Ignite.Core.Impl.Binary.IO; @@ -76,12 +75,9 @@ internal class CacheAffinityImpl : PlatformTargetAdapter, ICacheAffinity /** */ private const int OpPartitions = 15; - /** */ - private const int OpIsAssignmentValid = 16; - /** */ private readonly bool _keepBinary; - + /** Grid. */ private readonly IIgniteInternal _ignite; @@ -115,7 +111,7 @@ public int GetPartition(TK key) public bool IsPrimary(IClusterNode n, TK key) { IgniteArgumentCheck.NotNull(n, "n"); - + IgniteArgumentCheck.NotNull(key, "key"); return DoOutOp(OpIsPrimary, n.Id, key) == True; @@ -220,19 +216,6 @@ public IList MapPartitionToPrimaryAndBackups(int part) return DoOutInOp(OpMapPartitionToPrimaryAndBackups, w => w.WriteObject(part), r => ReadNodes(r)); } - /// - /// Checks whether given partition is still assigned to the same node as in specified version. - /// - internal bool IsAssignmentValid(AffinityTopologyVersion version, int partition) - { - return DoOutOp(OpIsAssignmentValid, (IBinaryStream s) => - { - s.WriteLong(version.Version); - s.WriteInt(version.MinorVersion); - s.WriteInt(partition); - }) != 0; - } - /** */ protected override T Unmarshal(IBinaryStream stream) { @@ -283,4 +266,4 @@ private Dictionary ReadDictionary(IBinaryStream reader, Func + /// Affinity manager. + /// + internal class CacheAffinityManager : PlatformTargetAdapter + { + /** */ + private const int OpIsAssignmentValid = 1; + + /// + /// Initializes a new instance of class. + /// + /// Target. + internal CacheAffinityManager(IPlatformTargetInternal target) : base(target) + { + // No-op. + } + + /// + /// Checks whether given partition is still assigned to the same node as in specified version. + /// + internal bool IsAssignmentValid(AffinityTopologyVersion version, int partition) + { + return DoOutOp(OpIsAssignmentValid, (IBinaryStream s) => + { + s.WriteLong(version.Version); + s.WriteInt(version.MinorVersion); + s.WriteInt(partition); + }) != 0; + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs index d18aad9b50c52..c7bb09fafe522 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCache.cs @@ -32,8 +32,8 @@ namespace Apache.Ignite.Core.Impl.Cache.Platform internal sealed class PlatformCache : IPlatformCache { /** Affinity. */ - private readonly CacheAffinityImpl _affinity; - + private readonly CacheAffinityManager _affinity; + /** Keep binary flag. */ private readonly bool _keepBinary; @@ -44,7 +44,7 @@ internal sealed class PlatformCache : IPlatformCache private readonly Func _affinityTopologyVersionFunc; /** Underlying map. */ - private readonly ConcurrentDictionary> _map = + private readonly ConcurrentDictionary> _map = new ConcurrentDictionary>(); /** Stopped flag. */ @@ -52,9 +52,9 @@ internal sealed class PlatformCache : IPlatformCache /// /// Initializes a new instance of the class. - /// Called via reflection from . + /// Called via reflection from . /// - public PlatformCache(Func affinityTopologyVersionFunc, CacheAffinityImpl affinity, bool keepBinary) + public PlatformCache(Func affinityTopologyVersionFunc, CacheAffinityManager affinity, bool keepBinary) { _affinityTopologyVersionFunc = affinityTopologyVersionFunc; _affinity = affinity; @@ -78,7 +78,7 @@ public bool TryGetValue(TKey key, out TVal val) PlatformCacheEntry entry; var key0 = (TK) (object) key; - + if (_map.TryGetValue(key0, out entry)) { if (IsValid(entry)) @@ -106,7 +106,7 @@ public int GetSize(int? partition) } var count = 0; - + foreach (var e in _map) { if (!IsValid(e.Value)) @@ -118,7 +118,7 @@ public int GetSize(int? partition) { continue; } - + count++; } @@ -179,7 +179,7 @@ public void Stop() _stopped = true; Clear(); } - + /** */ public void Clear() { @@ -227,19 +227,19 @@ public IEnumerable> GetEntries(int? partitio /// When primary node changes for a key, GridNearCacheEntry stops receiving updates for that key, /// because reader ("subscription") on new primary is not yet established. /// - /// This method is similar to GridNearCacheEntry.valid(). + /// This method is similar to GridNearCacheEntry.valid(). /// /// Entry to validate. /// Value type. /// True if entry is valid and can be returned to the user; false otherwise. private bool IsValid(PlatformCacheEntry entry) { - // See comments on _affinityTopologyVersionFunc about boxed copy approach. + // See comments on _affinityTopologyVersionFunc about boxed copy approach. var currentVerBoxed = _affinityTopologyVersionFunc(); var entryVerBoxed = entry.Version; - + Debug.Assert(currentVerBoxed != null); - + if (ReferenceEquals(currentVerBoxed, entryVerBoxed)) { // Happy path: true on stable topology. @@ -267,7 +267,7 @@ private bool IsValid(PlatformCacheEntry entry) return valid; } - + /// /// Gets boxed affinity version. Reuses existing boxing copy to reduce allocations. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs index f1fe492a06c96..e7f913010fbd4 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Platform/PlatformCacheManager.cs @@ -38,12 +38,12 @@ internal class PlatformCacheManager /// Holds thread-local key/val pair to be used for updating platform cache. /// internal static readonly ThreadLocal ThreadLocalPair = new ThreadLocal(); - + /// /// Platform caches per cache id. /// Multiple instances can point to the same Ignite cache, - /// and share one instance. - /// + /// and share one instance. + /// private readonly CopyOnWriteConcurrentDictionary _caches = new CopyOnWriteConcurrentDictionary(); @@ -56,9 +56,9 @@ private readonly CopyOnWriteConcurrentDictionary _caches /// Current topology version. Store as object for atomic updates. /// private volatile object _affinityTopologyVersion; - + /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// Ignite. public PlatformCacheManager(IIgniteInternal ignite) @@ -77,7 +77,7 @@ public IPlatformCache GetOrCreatePlatformCache(CacheConfiguration cacheConfigura Debug.Assert(cacheConfiguration != null); var cacheId = BinaryUtils.GetCacheId(cacheConfiguration.Name); - + return _caches.GetOrAdd(cacheId, _ => CreatePlatformCache(cacheConfiguration)); } @@ -89,15 +89,15 @@ public IPlatformCache TryGetPlatformCache(int cacheId) IPlatformCache platformCache; return _caches.TryGetValue(cacheId, out platformCache) ? platformCache : null; } - + /// /// Reads cache entry from a stream and updates the platform cache. /// public void Update(int cacheId, IBinaryStream stream, Marshaller marshaller) { - var cache = _caches.GetOrAdd(cacheId, + var cache = _caches.GetOrAdd(cacheId, _ => CreatePlatformCache(_ignite.GetCacheConfiguration(cacheId))); - + cache.Update(stream, marshaller); } @@ -133,7 +133,7 @@ public void OnAffinityTopologyVersionChanged(AffinityTopologyVersion affinityTop { _affinityTopologyVersion = affinityTopologyVersion; } - + /// /// Creates platform cache. /// @@ -141,9 +141,9 @@ private IPlatformCache CreatePlatformCache(CacheConfiguration cacheConfiguration { var platformCfg = cacheConfiguration.PlatformCacheConfiguration; Debug.Assert(platformCfg != null); - + Func affinityTopologyVersionFunc = () => _affinityTopologyVersion; - var affinity = _ignite.GetAffinity(cacheConfiguration.Name); + var affinity = _ignite.GetAffinityManager(cacheConfiguration.Name); var keepBinary = platformCfg.KeepBinary; TypeResolver resolver = null; @@ -164,7 +164,7 @@ private IPlatformCache CreatePlatformCache(CacheConfiguration cacheConfiguration if (resolved == null) { throw new InvalidOperationException(string.Format( - "Can not create .NET Platform Cache: {0}.{1} is invalid. Failed to resolve type: '{2}'", + "Can not create .NET Platform Cache: {0}.{1} is invalid. Failed to resolve type: '{2}'", typeof(PlatformCacheConfiguration).Name, fieldName, typeName)); } @@ -174,16 +174,16 @@ private IPlatformCache CreatePlatformCache(CacheConfiguration cacheConfiguration var keyType = resolve(platformCfg.KeyTypeName, "KeyTypeName"); var valType = resolve(platformCfg.ValueTypeName, "ValueTypeName"); var cacheType = typeof(PlatformCache<,>).MakeGenericType(keyType, valType); - + var platformCache = Activator.CreateInstance( - cacheType, - affinityTopologyVersionFunc, + cacheType, + affinityTopologyVersionFunc, affinity, keepBinary); - + return (IPlatformCache) platformCache; } - + /// /// Handles client disconnect. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs index 7dee984368df8..6f5ec93d2f5f5 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs @@ -229,6 +229,12 @@ public CacheAffinityImpl GetAffinity(string cacheName) throw GetClientNotSupportedException(); } + /** */ + public CacheAffinityManager GetAffinityManager(string cacheName) + { + throw GetClientNotSupportedException(); + } + /** */ public CacheConfiguration GetCacheConfiguration(int cacheId) { diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs index e62b89c1de1db..d727908ef03f1 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IIgniteInternal.cs @@ -92,6 +92,13 @@ internal interface IIgniteInternal /// Cache data affinity service. CacheAffinityImpl GetAffinity(string cacheName); + /// + /// Gets internal affinity manager for a given cache. + /// + /// Cache name. + /// Cache affinity manager. + CacheAffinityManager GetAffinityManager(string cacheName); + /// /// Gets cache name by id. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs index 9dfa40b480779..640f47219c3c7 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs @@ -35,6 +35,7 @@ namespace Apache.Ignite.Core.Impl using Apache.Ignite.Core.DataStructures; using Apache.Ignite.Core.Events; using Apache.Ignite.Core.Impl.Binary; + using Apache.Ignite.Core.Impl.Binary.IO; using Apache.Ignite.Core.Impl.Cache; using Apache.Ignite.Core.Impl.Cache.Platform; using Apache.Ignite.Core.Impl.Cluster; @@ -101,7 +102,8 @@ private enum Op SetBaselineAutoAdjustTimeout = 35, GetCacheConfig = 36, GetThreadLocal = 37, - GetOrCreateLock = 38 + GetOrCreateLock = 38, + GetAffinityManager = 39, } /** */ @@ -139,7 +141,7 @@ private enum Op new ConcurrentDictionary(); /** Client reconnect task completion source. */ - private volatile TaskCompletionSource _clientReconnectTaskCompletionSource = + private volatile TaskCompletionSource _clientReconnectTaskCompletionSource = new TaskCompletionSource(); /** Plugin processor. */ @@ -189,7 +191,7 @@ public Ignite(IgniteConfiguration cfg, string name, IPlatformTargetInternal proc SetCompactFooter(); _pluginProcessor = new PluginProcessor(this); - + _platformCacheManager = new PlatformCacheManager(this); } @@ -470,7 +472,7 @@ public ICache GetOrCreateCache(CacheConfiguration configuration, public ICache GetOrCreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration, PlatformCacheConfiguration platformCacheConfiguration) { - return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, + return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, Op.GetOrCreateCacheFromConfig); } @@ -491,7 +493,7 @@ public ICache CreateCache(CacheConfiguration configuration) } /** */ - public ICache CreateCache(CacheConfiguration configuration, + public ICache CreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration) { return CreateCache(configuration, nearConfiguration, null); @@ -501,14 +503,14 @@ public ICache CreateCache(CacheConfiguration configuration, public ICache CreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration, PlatformCacheConfiguration platformCacheConfiguration) { - return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, + return GetOrCreateCache(configuration, nearConfiguration, platformCacheConfiguration, Op.CreateCacheFromConfig); } /// /// Gets or creates the cache. /// - private ICache GetOrCreateCache(CacheConfiguration configuration, + private ICache GetOrCreateCache(CacheConfiguration configuration, NearCacheConfiguration nearConfiguration, PlatformCacheConfiguration platformCacheConfiguration, Op op) { IgniteArgumentCheck.NotNull(configuration, "configuration"); @@ -646,10 +648,21 @@ CacheAffinityImpl IIgniteInternal.GetAffinity(string cacheName) IgniteArgumentCheck.NotNull(cacheName, "cacheName"); var aff = DoOutOpObject((int) Op.GetAffinity, w => w.WriteString(cacheName)); - + return new CacheAffinityImpl(aff, false); } + /** */ + public CacheAffinityManager GetAffinityManager(string cacheName) + { + IgniteArgumentCheck.NotNull(cacheName, "cacheName"); + + var mgr = DoOutOpObject((int) Op.GetAffinityManager, + (IBinaryStream s) => s.WriteInt(BinaryUtils.GetCacheId(cacheName))); + + return new CacheAffinityManager(mgr); + } + /** */ public ICacheAffinity GetAffinity(string cacheName) { @@ -918,7 +931,7 @@ public void DisableWal(string cacheName) public void EnableWal(string cacheName) { IgniteArgumentCheck.NotNull(cacheName, "cacheName"); - + DoOutOp((int) Op.EnableWal, w => w.WriteString(cacheName)); } @@ -933,7 +946,7 @@ public bool IsWalEnabled(string cacheName) /** */ public void SetTxTimeoutOnPartitionMapExchange(TimeSpan timeout) { - DoOutOp((int) Op.SetTxTimeoutOnPartitionMapExchange, + DoOutOp((int) Op.SetTxTimeoutOnPartitionMapExchange, (BinaryWriter w) => w.WriteLong((long) timeout.TotalMilliseconds)); } @@ -1005,7 +1018,7 @@ public IIgniteLock GetOrCreateLock(string name) { Name = name }; - + return GetOrCreateLock(configuration, true); } @@ -1014,7 +1027,7 @@ public IIgniteLock GetOrCreateLock(LockConfiguration configuration, bool create) { IgniteArgumentCheck.NotNull(configuration, "configuration"); IgniteArgumentCheck.NotNullOrEmpty(configuration.Name, "configuration.Name"); - + // Create a copy to ignore modifications from outside. var cfg = new LockConfiguration(configuration); @@ -1025,7 +1038,7 @@ public IIgniteLock GetOrCreateLock(LockConfiguration configuration, bool create) w.WriteBoolean(configuration.IsFair); w.WriteBoolean(create); }); - + return target == null ? null : new IgniteLock(target, cfg); } @@ -1123,13 +1136,13 @@ public void UpdateNodeInfo(long memPtr) internal ITransactions GetTransactionsWithLabel(string label) { Debug.Assert(label != null); - + var platformTargetInternal = DoOutOpObject((int) Op.GetTransactions, s => { var w = BinaryUtils.Marshaller.StartMarshal(s); w.WriteString(label); }); - + return new TransactionsImpl(this, platformTargetInternal, GetLocalNode().Id, label); } @@ -1163,7 +1176,7 @@ internal void OnClientDisconnected() // Raise events. _clientReconnectTaskCompletionSource = new TaskCompletionSource(); - + var handler = ClientDisconnected; if (handler != null) handler.Invoke(this, EventArgs.Empty); @@ -1176,7 +1189,7 @@ internal void OnClientDisconnected() internal void OnClientReconnected(bool clusterRestarted) { _marsh.OnClientReconnected(clusterRestarted); - + _clientReconnectTaskCompletionSource.TrySetResult(clusterRestarted); var handler = ClientReconnected; From ea97f8ab24a74eac75b41e41bfa33ada1ee269f4 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Thu, 26 Nov 2020 19:22:09 +0300 Subject: [PATCH 070/110] IGNITE-13753 Fix non-thread-safe collection in JmxMetricExporterSpi - Fixes #8492. Signed-off-by: Alexey Goncharuk --- .../spi/metric/jmx/JmxMetricExporterSpi.java | 9 +- .../spi/metric/jmx/DummyMBeanServer.java | 291 ++++++++++++++++++ .../metric/jmx/JmxMetricExporterSpiTest.java | 141 +++++++++ .../ignite/testsuites/IgniteSpiTestSuite.java | 5 +- 4 files changed, 443 insertions(+), 3 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/DummyMBeanServer.java create mode 100644 modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpiTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java index 7671a81d0f3ce..fe560073fec1f 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpi.java @@ -18,6 +18,7 @@ package org.apache.ignite.spi.metric.jmx; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.function.Predicate; import javax.management.JMException; @@ -46,7 +47,7 @@ public class JmxMetricExporterSpi extends IgniteSpiAdapter implements MetricExpo private @Nullable Predicate filter; /** Registered beans. */ - private final List mBeans = new ArrayList<>(); + private final List mBeans = Collections.synchronizedList(new ArrayList<>()); /** {@inheritDoc} */ @Override public void spiStart(@Nullable String igniteInstanceName) throws IgniteSpiException { @@ -127,6 +128,10 @@ private void unregister(ReadOnlyMetricRegistry mreg) { unregBean(ignite, bean); } + /** + * @param ignite Ignite instance. + * @param bean Bean name to unregister. + */ private void unregBean(Ignite ignite, ObjectName bean) { MBeanServer jmx = ignite.configuration().getMBeanServer(); @@ -143,7 +148,7 @@ private void unregBean(Ignite ignite, ObjectName bean) { /** {@inheritDoc} */ @Override public void setMetricRegistry(ReadOnlyMetricManager reg) { - this.mreg = reg; + mreg = reg; } /** {@inheritDoc} */ diff --git a/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/DummyMBeanServer.java b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/DummyMBeanServer.java new file mode 100644 index 0000000000000..4d9c467738b99 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/DummyMBeanServer.java @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.metric.jmx; + +import java.io.ObjectInputStream; +import java.util.Set; +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.MBeanInfo; +import javax.management.MBeanServer; +import javax.management.NotificationFilter; +import javax.management.NotificationListener; +import javax.management.ObjectInstance; +import javax.management.ObjectName; +import javax.management.QueryExp; +import javax.management.loading.ClassLoaderRepository; + +/** + * + */ +class DummyMBeanServer implements MBeanServer { + /** */ + public static final String[] DOMAINS = new String[0]; + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name, ObjectName ldrName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance createMBean(String clsName, ObjectName name, ObjectName ldrName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance registerMBean(Object obj, ObjectName name) { + return new ObjectInstance(name, obj.getClass().getName()); + } + + /** + * {@inheritDoc} + */ + @Override public void unregisterMBean(ObjectName name) { + + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInstance getObjectInstance(ObjectName name) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Set queryMBeans(ObjectName name, QueryExp qry) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Set queryNames(ObjectName name, QueryExp qry) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public boolean isRegistered(ObjectName name) { + return false; + } + + /** + * {@inheritDoc} + */ + @Override public Integer getMBeanCount() { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object getAttribute(ObjectName name, String attribute) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public AttributeList getAttributes(ObjectName name, String[] atts) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public void setAttribute(ObjectName name, Attribute attribute) { + + } + + /** + * {@inheritDoc} + */ + @Override public AttributeList setAttributes(ObjectName name, AttributeList atts) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object invoke(ObjectName name, String operationName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public String getDefaultDomain() { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public String[] getDomains() { + return DOMAINS; + } + + /** + * {@inheritDoc} + */ + @Override public void addNotificationListener(ObjectName name, NotificationListener lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public void addNotificationListener(ObjectName name, ObjectName lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, ObjectName lsnr) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, ObjectName lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, NotificationListener lsnr) { + + } + + /** + * {@inheritDoc} + */ + @Override public void removeNotificationListener(ObjectName name, NotificationListener lsnr, NotificationFilter filter, Object handback) { + + } + + /** + * {@inheritDoc} + */ + @Override public MBeanInfo getMBeanInfo(ObjectName name) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public boolean isInstanceOf(ObjectName name, String clsName) { + return false; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName, ObjectName ldrName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public Object instantiate(String clsName, ObjectName ldrName, Object[] params, String[] signature) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInputStream deserialize(ObjectName name, byte[] data) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInputStream deserialize(String clsName, byte[] data) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ObjectInputStream deserialize(String clsName, ObjectName ldrName, byte[] data) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ClassLoader getClassLoaderFor(ObjectName mbeanName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ClassLoader getClassLoader(ObjectName ldrName) { + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ClassLoaderRepository getClassLoaderRepository() { + return null; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpiTest.java b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpiTest.java new file mode 100644 index 0000000000000..ee6e0f94f8965 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/spi/metric/jmx/JmxMetricExporterSpiTest.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.metric.jmx; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import org.apache.commons.collections.iterators.EmptyIterator; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.spi.metric.Metric; +import org.apache.ignite.spi.metric.ReadOnlyMetricManager; +import org.apache.ignite.spi.metric.ReadOnlyMetricRegistry; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.IgniteTestResources; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.junit.Test; + +/** + * + */ +public class JmxMetricExporterSpiTest extends GridCommonAbstractTest { + /** + * + */ + @Test + public void testConcurrentRegistration() throws IgniteCheckedException { + JmxMetricExporterSpi spi = new JmxMetricExporterSpi(); + + new IgniteTestResources(new DummyMBeanServer()).inject(spi); + + TestMetricsManager testMgr = new TestMetricsManager(); + + spi.setMetricRegistry(testMgr); + + spi.spiStart("testInstance"); + + testMgr.runRegistersConcurrent(); + testMgr.runUnregisters(); + } + + /** + * + */ + @SuppressWarnings("unchecked") + private static class TestMetricsManager implements ReadOnlyMetricManager { + /** */ + private final List> creation = new ArrayList<>(); + + /** */ + private final List> rmv = new ArrayList<>(); + + /** {@inheritDoc} */ + @Override public void addMetricRegistryCreationListener(Consumer lsnr) { + creation.add(lsnr); + } + + /** {@inheritDoc} */ + @Override public void addMetricRegistryRemoveListener(Consumer lsnr) { + rmv.add(lsnr); + } + + /** {@inheritDoc} */ + @NotNull @Override public Iterator iterator() { + return EmptyIterator.INSTANCE; + } + + /** + * + */ + public void runRegistersConcurrent() { + final AtomicInteger cntr = new AtomicInteger(); + + GridTestUtils.runMultiThreadedAsync(() -> { + for (int i = 0; i < 20; i++) { + for (Consumer lsnr : creation) + lsnr.accept(new ReadOnlyMetricRegistryStub("stub-" + cntr.getAndIncrement())); + } + }, Runtime.getRuntime().availableProcessors() * 2, "runner-"); + + } + + /** + * + */ + public void runUnregisters() { + for (int i = 0; i < Runtime.getRuntime().availableProcessors() * 2 * 20; i++) { + for (Consumer lsnr : creation) + lsnr.accept(new ReadOnlyMetricRegistryStub("stub-" + i)); + } + } + + /** + * + */ + private static class ReadOnlyMetricRegistryStub implements ReadOnlyMetricRegistry { + /** */ + private final String name; + + /** + * @param name Stub name. + */ + private ReadOnlyMetricRegistryStub(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String name() { + return name; + } + + /** {@inheritDoc} */ + @Override public @Nullable M findMetric(String name) { + return null; + } + + /** {@inheritDoc} */ + @NotNull @Override public Iterator iterator() { + return EmptyIterator.INSTANCE; + } + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java index 8a7f0daa7760e..bd53f92355669 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiTestSuite.java @@ -20,6 +20,7 @@ import org.apache.ignite.internal.managers.GridManagerLocalMessageListenerSelfTest; import org.apache.ignite.internal.managers.GridNoopManagerSelfTest; import org.apache.ignite.spi.encryption.KeystoreEncryptionSpiSelfTest; +import org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpiTest; import org.junit.runner.RunWith; import org.junit.runners.Suite; @@ -58,7 +59,9 @@ // Local Message Listener tests. GridManagerLocalMessageListenerSelfTest.class, - KeystoreEncryptionSpiSelfTest.class + KeystoreEncryptionSpiSelfTest.class, + + JmxMetricExporterSpiTest.class }) public class IgniteSpiTestSuite { } From 15a5da500c08948ee081533af97a9f1c2c8330f8 Mon Sep 17 00:00:00 2001 From: samaitra Date: Thu, 26 Nov 2020 14:16:27 -0600 Subject: [PATCH 071/110] IGNITE-12951 Update documents for migrated extensions - Fixes #8488. Signed-off-by: samaitra --- .../streaming/camel-streamer.adoc | 6 +++--- .../streaming/flink-streamer.adoc | 6 +++--- .../extensions-and-integrations/streaming/flume-sink.adoc | 2 +- .../extensions-and-integrations/streaming/jms-streamer.adoc | 4 ++-- .../streaming/kafka-streamer.adoc | 6 +++--- .../streaming/mqtt-streamer.adoc | 2 +- .../streaming/rocketmq-streamer.adoc | 6 +++--- .../streaming/storm-streamer.adoc | 6 +++--- .../streaming/twitter-streamer.adoc | 6 +++--- .../streaming/zeromq-streamer.adoc | 4 ++-- 10 files changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc index 8734d8133e95d..a42129383c68d 100644 --- a/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/camel-streamer.adoc @@ -43,7 +43,7 @@ to interact with Ignite Caches, Compute, Events, Messaging, etc. from within a C == Maven Dependency -To make use of the `ignite-camel` streamer, you need to add the following dependency: +To make use of the `ignite-camel-ext` streamer, you need to add the following dependency: [tabs] -- @@ -52,8 +52,8 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-camel - ${ignite.version} + ignite-camel-ext + ${ignite-camel-ext.version} ---- -- diff --git a/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc index f25ed05dce9b2..92ab398167d67 100644 --- a/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/flink-streamer.adoc @@ -21,7 +21,7 @@ Starting data transfer to Ignite cache can be done with the following steps. . Import Ignite Flink Sink Module in Maven Project If you are using Maven to manage dependencies of your project, you can add Flink module -dependency like this (replace `${ignite.version}` with actual Ignite version you are +dependency like this (replace `${ignite-flink-ext.version}` with actual Ignite Flink Extension version you are interested in): + [tabs] @@ -38,8 +38,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-flink - ${ignite.version} + ignite-flink-ext + ${ignite-flink-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc b/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc index 97a741df8add7..3697c7cf08e18 100644 --- a/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/flume-sink.adoc @@ -41,7 +41,7 @@ plugins.d/ `-- libext |-- cache-api-1.0.0.jar |-- ignite-core-x.x.x.jar - |-- ignite-flume-x.x.x.jar <-- IgniteSink + |-- ignite-flume-ext.x.x.x.jar <-- IgniteSink |-- ignite-spring-x.x.x.jar |-- spring-aop-4.1.0.RELEASE.jar |-- spring-beans-4.1.0.RELEASE.jar diff --git a/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc index 5c7c883ec6e69..b3f9be9864bb7 100644 --- a/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/jms-streamer.adoc @@ -116,8 +116,8 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-jms11 - ${ignite.version} + ignite-jms11-ext + ${ignite-jms11-ext.version} ---- -- diff --git a/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc index f00946a78d62e..a45fa4d792f8f 100644 --- a/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/kafka-streamer.adoc @@ -153,7 +153,7 @@ http://node1:8080/ignite?cmd=size&cacheName=cache1 == Streaming data with Ignite Kafka Streamer Module If you are using Maven to manage dependencies of your project, first of all you will have to add Kafka Streamer module -dependency like this (replace `${ignite.version}` with actual Ignite version you are interested in): +dependency like this (replace `${ignite-kafka-ext.version}` with actual Ignite Kafka Extension version you are interested in): [tabs] -- @@ -169,8 +169,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-kafka - ${ignite.version} + ignite-kafka-ext + ${ignite-kafka-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc index f7ec04cf8c306..1339c97fce268 100644 --- a/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/mqtt-streamer.adoc @@ -73,4 +73,4 @@ streamer.start(); ---- -- -Refer to the Javadocs of the `ignite-mqtt` module for more info on the available options. +Refer to the Javadocs of the `ignite-mqtt-ext` module for more info on the available options. diff --git a/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc index 4f7dcbb9e5854..a302ca722c4b2 100644 --- a/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/rocketmq-streamer.adoc @@ -20,7 +20,7 @@ to Ignite. To use Ignite RocketMQ Streamer module . Import it to your Maven project. If you are using Maven to manage dependencies of your project, you can add an Ignite -RocketMQ module dependency like this (replace `${ignite.version}` with actual Ignite version you are interested in): +RocketMQ module dependency like this (replace `${ignite-rocketmq-ext.version}` with actual Ignite RocketMQ Extension version you are interested in): + [tabs] -- @@ -36,8 +36,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-rocketmq - ${ignite.version} + ignite-rocketmq-ext + ${ignite-rocketmq-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc index e6871b7980775..887712e1058fd 100644 --- a/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/storm-streamer.adoc @@ -19,7 +19,7 @@ Apache Ignite Storm Streamer module provides streaming via http://storm.apache.o Starting data transfer to Ignite can be done with the following steps. . Import Ignite Storm Streamer Module In Maven Project. If you are using Maven to manage dependencies of your project, -you can add Storm module dependency like this (replace `${ignite.version}` with actual Ignite version you are interested in): +you can add Storm module dependency like this (replace `${ignite-storm-ext.version}` with actual Ignite Storm Extension version you are interested in): + [tabs] -- @@ -35,8 +35,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-storm - ${ignite.version} + ignite-storm-ext + ${ignite-storm-ext.version} ... diff --git a/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc index 8c6e65737f603..4f47c60f25063 100644 --- a/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/twitter-streamer.adoc @@ -18,7 +18,7 @@ Ignite Twitter Streamer module consumes tweets from Twitter and feeds the transf To stream data from Twitter into Ignite, you need to: -. Import Ignite Twitter Module with Maven and replace `${ignite.version}` with the actual Ignite version you are interested in. +. Import Ignite Twitter Module with Maven and replace `${ignite-twitter-ext.version}` with the actual Ignite Twitter Extension version you are interested in. + [tabs] -- @@ -27,8 +27,8 @@ tab:pom.xml[] ---- org.apache.ignite - ignite-twitter - ${ignite.version} + ignite-twitter-ext + ${ignite-twitter-ext.version} ---- -- diff --git a/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc b/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc index 9432624be1cfa..918c0e827f94e 100644 --- a/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc +++ b/docs/_docs/extensions-and-integrations/streaming/zeromq-streamer.adoc @@ -29,8 +29,8 @@ tab:pom.xml[] ... org.apache.ignite - ignite-zeromq - ${ignite.version} + ignite-zeromq-ext + ${ignite-zeromq-ext.version} ... From 743195f62716204da61b0356a3b7086eaeb4a595 Mon Sep 17 00:00:00 2001 From: Denis Mekhanikov Date: Mon, 30 Nov 2020 15:53:07 +0300 Subject: [PATCH 072/110] IGNITE-13487 Move logging of delegated errors to debug level. (#8283) --- .../processors/job/GridJobWorker.java | 42 ++++++----- .../processors/task/GridTaskWorker.java | 74 +++++++++++-------- 2 files changed, 64 insertions(+), 52 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java index b66c65eb35a76..1ff0daad0c7ad 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobWorker.java @@ -38,7 +38,6 @@ import org.apache.ignite.events.JobEvent; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; -import org.apache.ignite.internal.GridInternalException; import org.apache.ignite.internal.GridJobContextImpl; import org.apache.ignite.internal.GridJobExecuteResponse; import org.apache.ignite.internal.GridJobSessionImpl; @@ -484,7 +483,8 @@ boolean initialize(GridDeployment dep, Class taskCls) { job = SecurityUtils.sandboxedProxy(ctx, ComputeJob.class, job); } catch (IgniteCheckedException e) { - U.error(log, "Failed to initialize job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); + if (log.isDebugEnabled()) + U.error(log, "Failed to initialize job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); ex = new IgniteException(e); } @@ -618,28 +618,29 @@ private void execute0(boolean skipNtf) { assert ex != null; } else { - if (X.hasCause(e, GridInternalException.class)) { - // Print exception for internal errors only if debug is enabled. - if (log.isDebugEnabled()) - U.error(log, "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); - } - else if (X.hasCause(e, InterruptedException.class)) { - String msg = "Job was cancelled [jobId=" + ses.getJobId() + ", ses=" + ses + ']'; - - if (log.isDebugEnabled()) - U.error(log, msg, e); - else - U.warn(log, msg); + if (X.hasCause(e, InterruptedException.class)) { + if (log.isDebugEnabled()) { + U.error(log, + "Job was cancelled [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); + } } else if (X.hasCause(e, GridServiceNotFoundException.class) || - X.hasCause(e, ClusterTopologyCheckedException.class)) - // Should be throttled, because GridServiceProxy continuously retry getting service. - LT.error(log, e, "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']'); + X.hasCause(e, ClusterTopologyCheckedException.class)) { + if (log.isDebugEnabled()) { + // Should be throttled, because GridServiceProxy continuously retry getting service. + LT.error(log, e, "Failed to execute job [jobId=" + ses.getJobId() + + ", ses=" + ses + ']'); + } + } else { - U.error(log, "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']', e); + String msg = "Failed to execute job [jobId=" + ses.getJobId() + ", ses=" + ses + ']'; + + if (X.hasCause(e, OutOfMemoryError.class)) { + U.error(log, msg, e); - if (X.hasCause(e, OutOfMemoryError.class)) ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + } else if (log.isDebugEnabled()) + U.error(log, msg, e); } ex = e; @@ -722,7 +723,8 @@ else if (sysStopping && X.hasCause(e, InterruptedException.class, IgniteInterrup assert msg != null; assert ex != null; - U.error(log, msg, e); + if (log.isDebugEnabled()) + U.error(log, msg, e); return ex; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java index ecb48e9b8e652..1b74977c161ed 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java @@ -52,7 +52,6 @@ import org.apache.ignite.events.JobEvent; import org.apache.ignite.events.TaskEvent; import org.apache.ignite.internal.ComputeTaskInternalFuture; -import org.apache.ignite.internal.GridInternalException; import org.apache.ignite.internal.GridJobCancelRequest; import org.apache.ignite.internal.GridJobExecuteRequest; import org.apache.ignite.internal.GridJobExecuteResponse; @@ -79,7 +78,6 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.util.worker.GridWorker; -import org.apache.ignite.internal.visor.util.VisorClusterGroupEmptyException; import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.marshaller.Marshaller; @@ -423,7 +421,8 @@ public boolean isInternal() { return; } - U.warn(log, "Task has timed out: " + ses); + if (log.isDebugEnabled()) + U.warn(log, "Task has timed out: " + ses); recordTaskEvent(EVT_TASK_TIMEDOUT, "Task has timed out."); @@ -541,13 +540,14 @@ private void initializeSpis() { processDelayedResponses(); } catch (ClusterGroupEmptyCheckedException e) { - U.warn(log, "Failed to map task jobs to nodes (topology projection is empty): " + ses); + if (log.isDebugEnabled()) + U.warn(log, "Failed to map task jobs to nodes (topology projection is empty): " + ses); finishTask(null, e); } catch (IgniteException | IgniteCheckedException e) { if (!fut.isCancelled()) { - if (!(e instanceof VisorClusterGroupEmptyException)) + if (log.isDebugEnabled()) U.error(log, "Failed to map task jobs to nodes: " + ses, e); finishTask(null, e); @@ -560,7 +560,8 @@ else if (log.isDebugEnabled()) String errMsg = "Failed to map task jobs to nodes due to undeclared user exception" + " [cause=" + e.getMessage() + ", ses=" + ses + "]"; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); finishTask(null, new ComputeUserUndeclaredException(errMsg, e)); @@ -842,7 +843,8 @@ void onResponse(GridJobExecuteResponse msg) { ctx.resource().invokeAnnotated(dep, jobRes.getJob(), ComputeJobAfterSend.class); } catch (IgniteCheckedException e) { - U.error(log, "Error deserializing job response: " + res, e); + if (log.isDebugEnabled()) + U.error(log, "Error deserializing job response: " + res, e); finishTask(null, e); } @@ -975,7 +977,8 @@ else if (plc != null && !waitForAffTop && !retry) { } } catch (IgniteCheckedException e) { - U.error(log, "Failed to obtain topology [ses=" + ses + ", err=" + e + ']', e); + if (log.isDebugEnabled()) + U.error(log, "Failed to obtain topology [ses=" + ses + ", err=" + e + ']', e); finishTask(null, e); @@ -1033,7 +1036,8 @@ private void sendRetryRequest(final long waitms, final GridJobResultImpl jRes, f sendRequest(jRes); } catch (Exception e) { - U.error(log, "Failed to re-map job or retry request [ses=" + ses + "]", e); + if (log.isDebugEnabled()) + U.error(log, "Failed to re-map job or retry request [ses=" + ses + "]", e); finishTask(null, e); } @@ -1080,13 +1084,7 @@ private void sendRetryRequest(final long waitms, final GridJobResultImpl jRes, f return plc; } catch (IgniteException e) { - if (X.hasCause(e, GridInternalException.class)) { - // Print internal exceptions only if debug is enabled. - if (log.isDebugEnabled()) - U.error(log, "Failed to obtain remote job result policy for result from " + - "ComputeTask.result(..) method (will fail the whole task): " + jobRes, e); - } - else if (X.hasCause(e, ComputeJobFailoverException.class)) { + if (X.hasCause(e, ComputeJobFailoverException.class)) { IgniteCheckedException e0 = new IgniteCheckedException(" Job was not failed over because " + "ComputeJobResultPolicy.FAILOVER was not returned from " + "ComputeTask.result(...) method for job result with ComputeJobFailoverException.", e); @@ -1097,13 +1095,16 @@ else if (X.hasCause(e, ComputeJobFailoverException.class)) { } else if (X.hasCause(e, GridServiceNotFoundException.class) || X.hasCause(e, ClusterTopologyCheckedException.class)) { - // Should be throttled, because GridServiceProxy continuously retry getting service. - LT.error(log, e, "Failed to obtain remote job result policy for result from " + - "ComputeTask.result(..) method (will fail the whole task): " + jobRes); + if (log.isDebugEnabled()) { + // Should be throttled, because GridServiceProxy continuously retry getting service. + LT.error(log, e, "Failed to obtain remote job result policy for result from " + + "ComputeTask.result(..) method (will fail the whole task): " + jobRes); + } } - else + else if (log.isDebugEnabled()) { U.error(log, "Failed to obtain remote job result policy for result from " + "ComputeTask.result(..) method (will fail the whole task): " + jobRes, e); + } finishTask(null, e); @@ -1114,7 +1115,8 @@ else if (X.hasCause(e, GridServiceNotFoundException.class) || "ComputeTask.result(..) method due to undeclared user exception " + "(will fail the whole task): " + jobRes; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); Throwable tmp = new ComputeUserUndeclaredException(errMsg, e); @@ -1163,12 +1165,14 @@ private void reduce(final List results) { recordTaskEvent(EVT_TASK_REDUCED, "Task reduced."); } catch (ClusterTopologyCheckedException e) { - U.warn(log, "Failed to reduce job results for task (any nodes from task topology left grid?): " + task); + if (log.isDebugEnabled()) + U.warn(log, "Failed to reduce job results for task (any nodes from task topology left grid?): " + task); userE = e; } catch (IgniteCheckedException e) { - U.error(log, "Failed to reduce job results for task: " + task, e); + if (log.isDebugEnabled()) + U.error(log, "Failed to reduce job results for task: " + task, e); userE = e; } @@ -1177,7 +1181,8 @@ private void reduce(final List results) { String errMsg = "Failed to reduce job results due to undeclared user exception [task=" + task + ", err=" + e + ']'; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); userE = new ComputeUserUndeclaredException(errMsg, e); @@ -1215,7 +1220,8 @@ private boolean failover( String errMsg = "Failed to failover job due to undeclared user exception [job=" + jobRes.getJob() + ", err=" + e + ']'; - U.error(log, errMsg, e); + if (log.isDebugEnabled()) + U.error(log, errMsg, e); finishTask(null, new ComputeUserUndeclaredException(errMsg, e)); @@ -1356,9 +1362,11 @@ private void sendRequest(ComputeJobResult res) { // that we make this check because we cannot count on exception being // thrown in case of send failure. if (curNode == null) { - U.warn(log, "Failed to send job request because remote node left grid (if fail-over is enabled, " + - "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + - ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + if (log.isDebugEnabled()) { + U.warn(log, "Failed to send job request because remote node left grid (if fail-over is enabled, " + + "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + + ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + } ctx.resource().invokeAnnotated(dep, res.getJob(), ComputeJobAfterSend.class); @@ -1461,13 +1469,15 @@ private void sendRequest(ComputeJobResult res) { // Avoid stack trace if node has left grid. if (deadNode) { - U.warn(log, "Failed to send job request because remote node left grid (if failover is enabled, " + - "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + - ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + if (log.isDebugEnabled()) { + U.warn(log, "Failed to send job request because remote node left grid (if failover is enabled, " + + "will attempt fail-over to another node) [node=" + node + ", taskName=" + ses.getTaskName() + + ", taskSesId=" + ses.getId() + ", jobSesId=" + res.getJobContext().getJobId() + ']'); + } fakeErr = new ClusterTopologyException("Failed to send job due to node failure: " + node, e); } - else + else if (log.isDebugEnabled()) U.error(log, "Failed to send job request: " + req, e); } From 006c566d6743d00c63eb816930b9c41f22e1e7a6 Mon Sep 17 00:00:00 2001 From: korlov42 Date: Tue, 1 Dec 2020 12:26:58 +0300 Subject: [PATCH 073/110] IGNITE-13765 Incorrect work of predicates (< and >) in where clause with compound primary key (closes #8510) --- .../processors/query/h2/database/H2Tree.java | 34 +++++++++++++++++++ .../query/h2/database/H2TreeIndex.java | 3 +- .../cache/index/BasicIndexTest.java | 15 ++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java index 99a0add610c4c..4fb9a41a5051a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java @@ -625,6 +625,40 @@ public int compareRows(H2Row r1, H2Row r2) { return mvccCompare(r1, r2); } + /** + * Checks both rows are the same.

+ * Primarly used to verify both search rows are the same and we can apply + * the single row lookup optimization. + * + * @param r1 The first row. + * @param r2 Another row. + * @return {@code true} in case both rows are efficiently the same, {@code false} otherwise. + */ + boolean checkRowsTheSame(H2Row r1, H2Row r2) { + if (r1 == r2) + return true; + + for (int i = 0, len = cols.length; i < len; i++) { + IndexColumn idxCol = cols[i]; + + int idx = idxCol.column.getColumnId(); + + Value v1 = r1.getValue(idx); + Value v2 = r2.getValue(idx); + + if (v1 == null && v2 == null) + continue; + + if (!(v1 != null && v2 != null)) + return false; + + if (compareValues(v1, v2) != 0) + return false; + } + + return true; + } + /** * @param io IO. * @param pageAddr Page address. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java index 1b7ffe7046815..4701e8a054a50 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java @@ -26,6 +26,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; + import javax.cache.CacheException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; @@ -423,7 +424,7 @@ public boolean rebuildRequired() { /** */ private boolean isSingleRowLookup(SearchRow lower, SearchRow upper, H2Tree tree) { return !cctx.mvccEnabled() && indexType.isPrimaryKey() && lower != null && upper != null && - tree.compareRows((H2Row)lower, (H2Row)upper) == 0 && hasAllIndexColumns(lower); + tree.checkRowsTheSame((H2Row)lower, (H2Row)upper) && hasAllIndexColumns(lower); } /** */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java index 24a3759743406..9673af847a6e0 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/BasicIndexTest.java @@ -1372,6 +1372,21 @@ public void testStopNodeOnSqlQueryWithIncompatibleType() throws Exception { assertFalse(grid().context().isStopping()); } + /** */ + @Test + public void testOpenRangePredicateOnCompoundPk() throws Exception { + inlineSize = 10; + + startGrid(); + + sql("create table test (id1 int, id2 int, val int, constraint pk primary key (id1, id2))"); + + for (int i = 1; i <= 5; i++) + sql("insert into test (id1, id2, val) values (?, ?, ?)", 0, i, i); + + assertEquals(5, sql("select * from test where id1 = 0 and id2 > 0").getAll().size()); + } + /** */ private void checkAll() { IgniteCache cache = grid(0).cache(DEFAULT_CACHE_NAME); From 2eaabd94ef5e06a6e3f788aa6aa5114b4eca439b Mon Sep 17 00:00:00 2001 From: "Andrew V. Mashenkov" Date: Tue, 1 Dec 2020 12:54:22 +0300 Subject: [PATCH 074/110] IGNITE-13756: Fix node crash if incorrect SQL query fails. (#8495) Test added. --- .../thin/JdbcThinComplexQuerySelfTest.java | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java index 610d9008de37b..4b2242e57006b 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.affinity.AffinityKey; @@ -28,6 +29,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; @@ -255,6 +257,38 @@ public void testCalculatedValue() throws Exception { assert cnt == 3; } + /** + * @throws Exception If failed. + */ + @Test + public void testWrongArgumentType() throws Exception { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = '2'")) { + assertFalse(rs.next()); + } + + // Check non-indexed field. + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + + // Check indexed field. + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = '2'")) { + assertFalse(rs.next()); + } + + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + } + /** * Person. */ @@ -264,7 +298,7 @@ private static class Person implements Serializable { private final int id; /** Name. */ - @QuerySqlField(index = false) + @QuerySqlField(index = true) private final String name; /** Age. */ From f5dfac22bd79fd0f13cce9840fe20114ab718976 Mon Sep 17 00:00:00 2001 From: Slava Koptilin Date: Wed, 2 Dec 2020 01:32:03 +0300 Subject: [PATCH 075/110] IGNITE-13716 Fixed an issue where control utility did not hide sensitive information. Fixes #8471 --- .../internal/commandline/CommandHandler.java | 33 ++++++++++- .../internal/commandline/CommonArgParser.java | 15 +++++ ...GridCommandHandlerSslWithSecurityTest.java | 59 ++++++++++++++++++- 3 files changed, 104 insertions(+), 3 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java index a612e1c15102e..14c8799ecda70 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java @@ -46,6 +46,7 @@ import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.logger.java.JavaLoggerFileHandler; import org.apache.ignite.logger.java.JavaLoggerFormatter; @@ -266,7 +267,7 @@ public int execute(List rawArgs) { } logger.info("Command [" + commandName + "] started"); - logger.info("Arguments: " + String.join(" ", rawArgs)); + logger.info("Arguments: " + argumentsToString(rawArgs)); logger.info(DELIM); lastOperationRes = command.execute(clientCfg, logger, args.verbose()); @@ -454,6 +455,36 @@ private boolean isConnectionClosedSilentlyException(Throwable e) { return false; } + /** + * Joins user's arguments and hides sensitive information. + * + * @param rawArgs Arguments which user has provided. + * @return String which could be shown in console and pritned to log. + */ + private String argumentsToString(List rawArgs) { + boolean hide = false; + + SB sb = new SB(); + + for (int i = 0; i < rawArgs.size(); i++) { + if (hide) { + sb.a("***** "); + + hide = false; + + continue; + } + + String arg = rawArgs.get(i); + + sb.a(arg).a(' '); + + hide = CommonArgParser.isSensitiveArgument(arg); + } + + return sb.toString(); + } + /** * Does one of three things: *

    diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java index 52e95f44af5e1..e27179d7191f9 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java @@ -103,6 +103,9 @@ public class CommonArgParser { /** List of optional auxiliary commands. */ private static final Set AUX_COMMANDS = new HashSet<>(); + /** Set of sensitive arguments */ + private static final Set SENSITIVE_ARGUMENTS = new HashSet<>(); + static { AUX_COMMANDS.add(CMD_HOST); AUX_COMMANDS.add(CMD_PORT); @@ -127,6 +130,18 @@ public class CommonArgParser { AUX_COMMANDS.add(CMD_TRUSTSTORE); AUX_COMMANDS.add(CMD_TRUSTSTORE_PASSWORD); AUX_COMMANDS.add(CMD_TRUSTSTORE_TYPE); + + SENSITIVE_ARGUMENTS.add(CMD_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_KEYSTORE_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_TRUSTSTORE_PASSWORD); + } + + /** + * @param arg To check. + * @return True if provided argument is among sensitive one and not should be displayed. + */ + public static boolean isSensitiveArgument(String arg) { + return SENSITIVE_ARGUMENTS.contains(arg); } /** diff --git a/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java b/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java index 18a60ca2e2031..a34c619846a06 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/internal/processors/security/GridCommandHandlerSslWithSecurityTest.java @@ -17,22 +17,29 @@ package org.apache.ignite.internal.processors.security; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Handler; +import java.util.logging.Logger; import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.commandline.CommandHandler; import org.apache.ignite.internal.commandline.NoopConsole; import org.apache.ignite.internal.processors.security.impl.TestSecurityPluginProvider; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; +import static org.apache.ignite.cluster.ClusterState.ACTIVE; import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_OK; import static org.apache.ignite.internal.commandline.CommandList.DEACTIVATE; import static org.apache.ignite.plugin.security.SecurityPermissionSetBuilder.ALLOW_ALL; +import static org.apache.ignite.testframework.GridTestUtils.assertContains; import static org.apache.ignite.testframework.GridTestUtils.keyStorePassword; import static org.apache.ignite.testframework.GridTestUtils.keyStorePath; import static org.apache.ignite.testframework.GridTestUtils.sslTrustedFactory; @@ -47,13 +54,51 @@ public class GridCommandHandlerSslWithSecurityTest extends GridCommonAbstractTes /** Password. */ private final String pwd = "testPwd"; + /** System out. */ + protected static PrintStream sysOut; + + /** + * Test out - can be injected via {@link #injectTestSystemOut()} instead of System.out and analyzed in test. + * Will be as well passed as a handler output for an anonymous logger in the test. + */ + protected static ByteArrayOutputStream testOut; + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + testOut = new ByteArrayOutputStream(16 * 1024); + + sysOut = System.out; + } + /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { super.afterTest(); + System.setOut(sysOut); + + testOut.reset(); + stopAllGrids(); } + /** + * Sets test output stream. + */ + protected void injectTestSystemOut() { + System.setOut(new PrintStream(testOut)); + } + + /** + * Flushes all Logger handlers to make log data available to test. + * @param hnd Command handler. + */ + private void flushCommandOutput(CommandHandler hnd) { + Logger log = U.field(hnd, "logger"); + Arrays.stream(log.getHandlers()).forEach(Handler::flush); + } + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { return super.getConfiguration(igniteInstanceName) @@ -77,7 +122,7 @@ public class GridCommandHandlerSslWithSecurityTest extends GridCommonAbstractTes public void testInputKeyTrustStorePwdOnlyOnce() throws Exception { IgniteEx crd = startGrid(); - crd.cluster().active(true); + crd.cluster().state(ACTIVE); CommandHandler cmd = new CommandHandler(); @@ -129,7 +174,9 @@ else if (fmt.contains("truststore")) { public void testConnector() throws Exception { IgniteEx crd = startGrid(); - crd.cluster().active(true); + crd.cluster().state(ACTIVE); + + injectTestSystemOut(); CommandHandler hnd = new CommandHandler(); @@ -143,5 +190,13 @@ public void testConnector() throws Exception { "--truststore-password", keyStorePassword())); assertEquals(EXIT_CODE_OK, exitCode); + + flushCommandOutput(hnd); + + // Make sure all sensitive information is masked. + String testOutput = testOut.toString(); + assertContains(log, testOutput, "--password *****"); + assertContains(log, testOutput, "--keystore-password *****"); + assertContains(log, testOutput, "--truststore-password *****"); } } From 236bbb22bc3339d3d4b59b3bc3a56c2a0dcfdd98 Mon Sep 17 00:00:00 2001 From: Konstantin Sirotkin Date: Wed, 2 Dec 2020 11:54:56 +0300 Subject: [PATCH 076/110] IGNITE-13572 Don't skip filtering for caches with zero backups - Fixes #8439. Signed-off-by: Ilya Kasnacheev --- .../spi/indexing/IndexingQueryFilterImpl.java | 4 - ...namicEnableIndexingConcurrentSelfTest.java | 9 +- .../cache/index/SqlPartitionEvictionTest.java | 215 ++++++++++++++++++ .../IgniteBinaryCacheQueryTestSuite.java | 4 + 4 files changed, 222 insertions(+), 10 deletions(-) create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlPartitionEvictionTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java index 07366328e3f3b..c1e03cd47d224 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java @@ -68,10 +68,6 @@ public IndexingQueryFilterImpl(GridKernalContext ctx, @Nullable AffinityTopology if (cache.context().isReplicated()) return null; - // No backups and explicit partitions -> nothing to filter. - if (cache.configuration().getBackups() == 0 && parts == null) - return null; - return new IndexingQueryCacheFilter(cache.context().affinity(), parts, topVer, ctx.discovery().localNode()); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java index e28aed9e6bed4..bdaa1ba4d10f7 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicEnableIndexingConcurrentSelfTest.java @@ -226,8 +226,7 @@ public void testNodeJoinOnPendingOperation() throws Exception { ignitionStart(serverConfiguration(2), finishLatch); ignitionStart(serverConfiguration(3), finishLatch); - // TODO: https://issues.apache.org/jira/browse/IGNITE-13572 - awaitPartitionMapExchange(true, true, null); + awaitPartitionMapExchange(); assertFalse(tblFut.isDone()); @@ -332,8 +331,7 @@ public void testConcurrentRebalance() throws Exception { ignitionStart(serverConfiguration(4)); - // TODO: https://issues.apache.org/jira/browse/IGNITE-13572 - awaitPartitionMapExchange(true, true, null); + awaitPartitionMapExchange(); tblFut.get(); @@ -495,8 +493,7 @@ public void testConcurrentEnableIndexing() throws Exception { // Check that only one successful attempt. assertEquals(1, success.get()); - // TODO: https://issues.apache.org/jira/browse/IGNITE-13572 - awaitPartitionMapExchange(true, true, null); + awaitPartitionMapExchange(); for (Ignite g: G.allGrids()) { assertEquals(LARGE_NUM_ENTRIES, query(g, SELECT_ALL_QUERY).size()); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlPartitionEvictionTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlPartitionEvictionTest.java new file mode 100644 index 0000000000000..baeade3d38650 --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlPartitionEvictionTest.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.index; + +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.CacheWriteSynchronizationMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; +import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static java.util.concurrent.TimeUnit.SECONDS; + +/** */ +@RunWith(Parameterized.class) +public class SqlPartitionEvictionTest extends GridCommonAbstractTest { + /** */ + private static final String POI_CACHE_NAME = "POI_CACHE"; + + /** */ + private static final String POI_SCHEMA_NAME = "DOMAIN"; + + /** */ + private static final String POI_TABLE_NAME = "POI"; + + /** */ + private static final String POI_CLASS_NAME = "PointOfInterest"; + + /** */ + private static final String ID_FIELD_NAME = "id"; + + /** */ + private static final String NAME_FIELD_NAME = "name"; + + /** */ + private static final String LATITUDE_FIELD_NAME = "latitude"; + + /** */ + private static final String LONGITUDE_FIELD_NAME = "longitude"; + + /** */ + private static final int NUM_ENTITIES = 1_000; + + /** Test parameters. */ + @Parameterized.Parameters(name = "backups_count={0}") + public static Iterable params() { + return Arrays.asList( + new Object[] { 0 }, + new Object[] { 1 }, + new Object[] { 2 } + ); + } + + /** + * Number of partition backups. + */ + @Parameterized.Parameter + public int backupsCount; + + /** + * For awaiting of eviction start. + */ + private static final CountDownLatch LATCH = new CountDownLatch(1); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setCacheConfiguration(new CacheConfiguration<>(POI_CACHE_NAME) + .setAtomicityMode(CacheAtomicityMode.ATOMIC) + .setSqlSchema("DOMAIN") + .setQueryEntities(Collections.singletonList(queryEntity())) + .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC) + .setCacheMode(CacheMode.PARTITIONED) + .setBackups(backupsCount) + ); + + cfg.setActiveOnStart(true); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(true); + + super.afterTest(); + } + + /** + * Tests sql query result after eviction partitions. + */ + @Test + public void testSqlConsistencyOnEviction() throws Exception { + IgniteEx ig = null; + + int idx = 0; + while (idx <= backupsCount) + ig = ignitionStart(idx++); + + loadData(ig, 0, NUM_ENTITIES); + + ignitionStart(idx); + + awaitPartitionMapExchange(); + + U.await(LATCH, 10, SECONDS); + + for (Ignite g: G.allGrids()) + assertEquals(NUM_ENTITIES, query(g, "SELECT * FROM " + POI_TABLE_NAME).size()); + } + + /** */ + private void loadData(IgniteEx node, int start, int end) { + try (IgniteDataStreamer streamer = node.dataStreamer(POI_CACHE_NAME)) { + Random rnd = ThreadLocalRandom.current(); + + for (int i = start; i < end; i++) { + BinaryObject bo = node.binary().builder(POI_CLASS_NAME) + .setField(NAME_FIELD_NAME, "POI_" + i, String.class) + .setField(LATITUDE_FIELD_NAME, rnd.nextDouble(), Double.class) + .setField(LONGITUDE_FIELD_NAME, rnd.nextDouble(), Double.class) + .build(); + + streamer.addData(i, bo); + } + } + } + + /** */ + protected List> query(Ignite ig, String sql) { + IgniteCache cache = ig.cache(POI_CACHE_NAME).withKeepBinary(); + + return cache.query(new SqlFieldsQuery(sql).setSchema(POI_SCHEMA_NAME)).getAll(); + } + + /** */ + private QueryEntity queryEntity() { + LinkedHashMap fields = new LinkedHashMap<>(); + fields.put(ID_FIELD_NAME, Integer.class.getName()); + fields.put(NAME_FIELD_NAME, String.class.getName()); + fields.put(LATITUDE_FIELD_NAME, Double.class.getName()); + fields.put(LONGITUDE_FIELD_NAME, Double.class.getName()); + + return new QueryEntity() + .setKeyType(Integer.class.getName()) + .setKeyFieldName(ID_FIELD_NAME) + .setValueType(POI_CLASS_NAME) + .setTableName(POI_TABLE_NAME) + .setFields(fields); + } + + /** */ + private IgniteEx ignitionStart(int idx) throws Exception { + GridQueryProcessor.idxCls = BlockingIndexing.class; + + IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(idx)); + + return startGrid(cfg); + } + + /** + * Blocking indexing processor. + */ + private static class BlockingIndexing extends IgniteH2Indexing { + @Override public void remove(GridCacheContext cctx, GridQueryTypeDescriptor type, + CacheDataRow row) throws IgniteCheckedException { + U.sleep(50); + + LATCH.countDown(); + + super.remove(cctx, type, row); + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java index 457d2cd42f7de..ab6d67ca5c11f 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite.java @@ -168,6 +168,7 @@ import org.apache.ignite.internal.processors.cache.index.OptimizedMarshallerIndexNameTest; import org.apache.ignite.internal.processors.cache.index.QueryEntityValidationSelfTest; import org.apache.ignite.internal.processors.cache.index.SchemaExchangeSelfTest; +import org.apache.ignite.internal.processors.cache.index.SqlPartitionEvictionTest; import org.apache.ignite.internal.processors.cache.index.SqlTransactionCommandsWithMvccDisabledSelfTest; import org.apache.ignite.internal.processors.cache.index.StopNodeOnRebuildIndexFailureTest; import org.apache.ignite.internal.processors.cache.local.IgniteCacheLocalAtomicQuerySelfTest; @@ -579,6 +580,9 @@ // Partition loss. IndexingCachePartitionLossPolicySelfTest.class, + // Partitions eviction + SqlPartitionEvictionTest.class, + // GROUP_CONCAT IgniteSqlGroupConcatCollocatedTest.class, IgniteSqlGroupConcatNotCollocatedTest.class, From edb736dcd8d1d57c875ce7de2b2b2b786d1f8d51 Mon Sep 17 00:00:00 2001 From: Vladsz83 Date: Wed, 2 Dec 2020 12:13:45 +0300 Subject: [PATCH 077/110] IGNITE-13705 : Another node fails with failure of target node. (#8484) --- .../ignite/spi/discovery/tcp/ServerImpl.java | 70 +++++++++++-------- .../tcp/TcpDiscoveryNetworkIssuesTest.java | 45 +++++++++--- 2 files changed, 76 insertions(+), 39 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index 006f1c754e36e..75b55b95bd474 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -216,12 +216,12 @@ class ServerImpl extends TcpDiscoveryImpl { /** Maximal interval of connection check to next node in the ring. */ private static final long MAX_CON_CHECK_INTERVAL = 500; - /** Minimal timeout to find connection to some next node in the ring while connection recovering. */ - private static final long MIN_RECOVERY_TIMEOUT = 100; - /** Interval of checking connection to next node in the ring. */ private long connCheckInterval; + /** Fundamental value for connection checking actions. */ + private long connCheckTick; + /** */ private IgniteThreadPoolExecutor utilityPool; @@ -394,9 +394,12 @@ class ServerImpl extends TcpDiscoveryImpl { lastRingMsgSentTime = 0; + // Foundumental timeout value for actions related to connection check. + connCheckTick = effectiveExchangeTimeout() / 3; + // Since we take in account time of last sent message, the interval should be quite short to give enough piece // of failure detection timeout as send-and-acknowledge timeout of the message to send. - connCheckInterval = Math.min(effectiveExchangeTimeout() / 4, MAX_CON_CHECK_INTERVAL); + connCheckInterval = Math.min(connCheckTick, MAX_CON_CHECK_INTERVAL); utilityPool = new IgniteThreadPoolExecutor("disco-pool", spi.ignite().name(), @@ -3509,12 +3512,19 @@ else if (log.isTraceEnabled()) if (changeTop) hndMsg.changeTopology(ring.previousNodeOf(next).id()); - if (log.isDebugEnabled()) - log.debug("Sending handshake [hndMsg=" + hndMsg + ", sndState=" + sndState + ']'); + if (log.isDebugEnabled()) { + log.debug("Sending handshake [hndMsg=" + hndMsg + ", sndState=" + sndState + + "] with timeout " + timeoutHelper.nextTimeoutChunk(spi.getSocketTimeout())); + } spi.writeToSocket(sock, out, hndMsg, timeoutHelper.nextTimeoutChunk(spi.getSocketTimeout())); + if (log.isDebugEnabled()) { + log.debug("Reading handshake response with timeout " + + timeoutHelper.nextTimeoutChunk(ackTimeout0)); + } + TcpDiscoveryHandshakeResponse res = spi.readMessage(sock, null, timeoutHelper.nextTimeoutChunk(ackTimeout0)); @@ -6526,23 +6536,11 @@ private IgniteSpiOperationTimeoutHelper serverOperationTimeoutHelper(@Nullable C long lastOperationNanos) { long absoluteThreshold = -1; - // Active send-state means we lost connection to next node and have to find another. - // We don't know how many nodes failed. May be several failed in a row. But we got only one - // connectionRecoveryTimeout to establish new connection to the ring. We can't spend this timeout wholly on one - // or two next nodes. We should slice it and try to travers as many as we can. - if (sndState != null) { - int nodesLeft = ring.serverNodes().size() - 1 - sndState.failedNodes; - - assert nodesLeft > 0; - - long now = System.nanoTime(); - - // In case of large cluster and small connectionRecoveryTimeout we have to provide reasonable minimal - // timeout per one of the next nodes. It should not appear too small like 1, 5 or 10ms. - long perNodeTimeout = Math.max((sndState.failTimeNanos - now) / nodesLeft, MIN_RECOVERY_TIMEOUT); - - absoluteThreshold = Math.min(sndState.failTimeNanos, now + perNodeTimeout); - } + // Active send-state means we lost connection to next node and have to find another. We don't know how many + // nodes failed. May be several failed in a row. But we got only one connectionRecoveryTimeout to establish new + // connection. We should travers rest of the cluster with sliced timeout for each node. + if (sndState != null) + absoluteThreshold = Math.min(sndState.failTimeNanos, System.nanoTime() + U.millisToNanos(connCheckTick)); return new IgniteSpiOperationTimeoutHelper(spi, true, lastOperationNanos, absoluteThreshold); } @@ -6904,13 +6902,22 @@ else if (req.changeTopology()) { (req.checkPreviousNodeId() == null || previous.id().equals(req.checkPreviousNodeId()))) { Collection nodeAddrs = spi.getNodeAddresses(previous, false); - liveAddr = checkConnection(new ArrayList<>(nodeAddrs), - (int)U.nanosToMillis(timeThreshold - now)); + // The connection recovery connection to one node is connCheckTick. + // We need to suppose network delays. So we use half of this time. + int backwardCheckTimeout = (int)(connCheckTick / 2); + + if (log.isDebugEnabled()) { + log.debug("Remote node requests topology change. Checking connection to " + + "previous [" + previous + "] with timeout " + backwardCheckTimeout); + } + + liveAddr = checkConnection(new ArrayList<>(nodeAddrs), backwardCheckTimeout); - if (log.isInfoEnabled()) - log.info("Connection check done [liveAddr=" + liveAddr - + ", previousNode=" + previous + ", addressesToCheck=" + nodeAddrs - + ", connectingNodeId=" + nodeId + ']'); + if (log.isInfoEnabled()) { + log.info("Connection check to previous node done: [liveAddr=" + liveAddr + + ", previousNode=" + U.toShortString(previous) + ", addressesToCheck=" + + nodeAddrs + ", connectingNodeId=" + nodeId + ']'); + } } // If local node was able to connect to previous, confirm that it's alive. @@ -6929,6 +6936,11 @@ else if (req.changeTopology()) { } } + if (log.isDebugEnabled()) { + log.debug("Sending handshake response [" + res + "] with timeout " + + spi.getEffectiveSocketTimeout(srvSock) + " to " + rmtAddr + ":" + sock.getPort()); + } + spi.writeToSocket(sock, res, spi.getEffectiveSocketTimeout(srvSock)); // It can happen if a remote node is stopped and it has a loopback address in the list of addresses, diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java index a751ac402325e..0db781a4f9eff 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryNetworkIssuesTest.java @@ -21,7 +21,9 @@ import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketTimeoutException; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -211,16 +213,31 @@ public void testServerGetsSegmentedOnBecomeDangling() throws Exception { * Ensures sequential failure of two nodes has no additional issues. */ @Test - public void testFailTwoNodes() throws Exception { + public void testSequentialFailTwoNodes() throws Exception { + simulateFailureOfTwoNodes(true); + } + + /** + * Ensures sequential failure of two nodes has no additional issues. + */ + @Test + public void testNotSequentialFailTwoNodes() throws Exception { + simulateFailureOfTwoNodes(false); + } + + /** */ + private void simulateFailureOfTwoNodes(boolean sequentionally) throws Exception { failureDetectionTimeout = 1000; - startGrids(5); + int gridCnt = 7; + + startGrids(gridCnt); awaitPartitionMapExchange(); final CountDownLatch failLatch = new CountDownLatch(2); - for (int i = 0; i < 5; i++) { + for (int i = 0; i < gridCnt; i++) { ignite(i).events().localListen(evt -> { failLatch.countDown(); @@ -236,20 +253,28 @@ public void testFailTwoNodes() throws Exception { }, EVT_NODE_SEGMENTED); } - processNetworkThreads(ignite(2), t -> t.suspend()); - processNetworkThreads(ignite(3), t -> t.suspend()); + Set failedNodes = new HashSet<>(); + + failedNodes.add(2); + + if (sequentionally) + failedNodes.add(3); + else + failedNodes.add(4); + + failedNodes.forEach(idx -> processNetworkThreads(ignite(idx), Thread::suspend)); try { failLatch.await(10, TimeUnit.SECONDS); } finally { - processNetworkThreads(ignite(2), t -> t.resume()); - processNetworkThreads(ignite(3), t -> t.resume()); + failedNodes.forEach(idx -> processNetworkThreads(ignite(idx), Thread::resume)); } - assertFalse(segmentedNodes.contains(0)); - assertFalse(segmentedNodes.contains(1)); - assertFalse(segmentedNodes.contains(4)); + for (int i = 0; i < gridCnt; i++) { + if (!failedNodes.contains(i)) + assertFalse(segmentedNodes.contains(i)); + } } /** From 6f9052d6a117a2c851081946d5a9a9095c71d7cb Mon Sep 17 00:00:00 2001 From: Alexey Zinoviev Date: Wed, 2 Dec 2020 13:34:24 +0300 Subject: [PATCH 078/110] IGNITE-13672 [ML]: Add initial JSON export/import support for all models (#8521) * [IGNITE-13672] Initial solution * [IGNITE-13672] Added an example * [IGNITE-13672] Added a draft solution * [IGNITE-13672] Updated JSON model * [IGNITE-13672] Updated JSON model * [IGNITE-13672] Removed GMM support * [IGNITE-13672] Fixed blank lines * [IGNITE-13672] Fixed licenses * [IGNITE-13672] Fixed whitespaces * [IGNITE-13672] Fixed whitespaces * [IGNITE-13672] Fixed whitespaces * [IGNITE-13672] Fixed examples * [IGNITE-13672] Fixed examples * [IGNITE-13672] Fixed test --- .../binary-classification/decision-trees.adoc | 6 +- .../model-import-from-apache-spark.adoc | 2 +- .../model-selection/cross-validation.adoc | 4 +- .../model-selection/pipeline-api.adoc | 4 +- .../regression/decision-trees-regression.adoc | 6 +- .../KMeansClusterizationExample.java | 4 +- .../ANNClassificationExportImportExample.java | 339 ++++++++++++++++++ ...CompoundNaiveBayesExportImportExample.java | 129 +++++++ ...TreeClassificationExportImportExample.java | 151 ++++++++ ...sionTreeRegressionExportImportExample.java | 126 +++++++ ...DiscreteNaiveBayesExportImportExample.java | 117 ++++++ ...reesClassificationExportImportExample.java | 147 ++++++++ ...BOnTreesRegressionExportImportExample.java | 145 ++++++++ ...GaussianNaiveBayesExportImportExample.java | 117 ++++++ ...eansClusterizationExportImportExample.java | 99 +++++ .../LinearRegressionExportImportExample.java | 116 ++++++ ...LogisticRegressionExportImportExample.java | 122 +++++++ ...restClassificationExportImportExample.java | 144 ++++++++ ...omForestRegressionExportImportExample.java | 151 ++++++++ .../exchange/SVMExportImportExample.java | 113 ++++++ .../DecisionTreeFromSparkExample.java | 4 +- ...ecisionTreeRegressionFromSparkExample.java | 4 +- .../encoding/EncoderExample.java | 4 +- .../EncoderExampleWithNormalization.java | 4 +- .../encoding/LabelEncoderExample.java | 4 +- .../BostonHousePricesPredictionExample.java | 4 +- .../selection/cv/CrossValidationExample.java | 4 +- ...ssificationTrainerSQLInferenceExample.java | 4 +- ...eClassificationTrainerSQLTableExample.java | 4 +- ...isionTreeClassificationTrainerExample.java | 4 +- .../DecisionTreeRegressionTrainerExample.java | 4 +- ...DBOnTreesClassificationTrainerExample.java | 8 +- .../GDBOnTreesRegressionTrainerExample.java | 10 +- .../ml/tutorial/Step_11_Boosting.java | 8 +- .../ml/tutorial/Step_1_Read_and_Learn.java | 4 +- .../examples/ml/tutorial/Step_2_Imputing.java | 4 +- .../ml/tutorial/Step_3_Categorial.java | 4 +- ...tep_3_Categorial_with_One_Hot_Encoder.java | 4 +- .../ml/tutorial/Step_4_Add_age_fare.java | 4 +- .../examples/ml/tutorial/Step_5_Scaling.java | 4 +- .../ml/tutorial/Step_7_Split_train_test.java | 4 +- .../examples/ml/tutorial/Step_8_CV.java | 6 +- .../tutorial/Step_8_CV_with_Param_Grid.java | 6 +- ...tep_8_CV_with_Param_Grid_and_pipeline.java | 4 +- .../Step_13_RandomSearch.java | 6 +- .../Step_14_Parallel_Brute_Force_Search.java | 6 +- .../Step_15_Parallel_Random_Search.java | 6 +- .../Step_16_Genetic_Programming_Search.java | 6 +- ...7_Parallel_Genetic_Programming_Search.java | 6 +- modules/ml/pom.xml | 25 ++ .../ml/sparkmodelparser/SparkModelParser.java | 81 +---- .../ignite/ml/clustering/gmm/GmmModel.java | 6 + .../kmeans/ClusterizationModel.java | 4 +- .../ml/clustering/kmeans/KMeansModel.java | 125 ++++++- .../ml/clustering/kmeans/KMeansTrainer.java | 4 +- .../ml/composition/ModelsComposition.java | 16 +- .../composition/ModelsCompositionFormat.java | 6 +- .../boosting/GDBLearningStrategy.java | 4 +- .../ml/composition/boosting/GDBModel.java | 118 ++++++ .../ml/composition/boosting/GDBTrainer.java | 43 +-- .../PredictionsAggregator.java | 9 + .../WeightedPredictionsAggregator.java | 7 +- .../ignite/ml/inference/json/JSONModel.java | 55 +++ .../ml/inference/json/JSONModelMixIn.java | 31 ++ .../ml/inference/json/JSONWritable.java | 37 ++ .../ml/inference/json/JacksonHelper.java | 39 ++ .../ignite/ml/knn/NNClassificationModel.java | 11 + .../ml/knn/ann/ANNClassificationModel.java | 130 ++++++- .../ml/knn/ann/ANNClassificationTrainer.java | 14 +- .../ignite/ml/knn/ann/ProbableLabel.java | 5 +- .../ml/math/distances/BrayCurtisDistance.java | 4 + .../ml/math/distances/DistanceMeasure.java | 17 + .../ml/math/distances/MinkowskiDistance.java | 16 +- .../distances/WeightedMinkowskiDistance.java | 35 +- .../ml/math/stat/DistributionMixture.java | 9 +- .../compound/CompoundNaiveBayesModel.java | 73 +++- .../discrete/DiscreteNaiveBayesModel.java | 83 ++++- .../DiscreteNaiveBayesSumsHolder.java | 11 + .../gaussian/GaussianNaiveBayesModel.java | 75 +++- .../GaussianNaiveBayesSumsHolder.java | 15 + .../linear/LinearRegressionLSQRTrainer.java | 8 +- .../linear/LinearRegressionModel.java | 114 +++++- .../linear/LinearRegressionSGDTrainer.java | 4 +- .../logistic/LogisticRegressionModel.java | 112 +++++- .../ignite/ml/structures/DatasetRow.java | 4 + .../ignite/ml/structures/LabeledVector.java | 4 + .../ml/svm/SVMLinearClassificationModel.java | 112 +++++- .../svm/SVMLinearClassificationTrainer.java | 2 +- .../DecisionTreeClassificationTrainer.java | 2 +- .../ml/tree/DecisionTreeConditionalNode.java | 16 +- .../ignite/ml/tree/DecisionTreeLeafNode.java | 10 +- .../ignite/ml/tree/DecisionTreeModel.java | 111 ++++++ .../ignite/ml/tree/DecisionTreeNode.java | 15 +- .../tree/DecisionTreeRegressionTrainer.java | 2 +- ...sionTree.java => DecisionTreeTrainer.java} | 20 +- .../org/apache/ignite/ml/tree/NodeData.java | 90 +++++ .../boosting/GDBOnTreesLearningStrategy.java | 10 +- .../RandomForestClassifierTrainer.java | 7 +- .../tree/randomforest/RandomForestModel.java | 106 ++++++ .../RandomForestRegressionTrainer.java | 7 +- .../randomforest/RandomForestTrainer.java | 37 +- .../ml/tree/randomforest/data/NodeId.java | 11 +- .../ml/tree/randomforest/data/NodeSplit.java | 9 +- ...eeRoot.java => RandomForestTreeModel.java} | 25 +- .../ml/tree/randomforest/data/TreeNode.java | 9 +- .../impurity/ImpurityHistogramsComputer.java | 8 +- .../data/statistics/LeafValuesComputer.java | 8 +- .../ignite/ml/clustering/KMeansModelTest.java | 4 +- .../ignite/ml/common/KeepBinaryTest.java | 2 +- .../composition/boosting/GDBTrainerTest.java | 6 +- .../ml/math/distances/DistanceTest.java | 6 +- .../WeightedMinkowskiDistanceTest.java | 10 +- .../LinearRegressionLSQRTrainerTest.java | 16 +- .../LinearRegressionSGDTrainerTest.java | 16 +- .../ml/selection/cv/CrossValidationTest.java | 8 +- ...eClassificationTrainerIntegrationTest.java | 7 +- ...DecisionTreeClassificationTrainerTest.java | 6 +- ...nTreeRegressionTrainerIntegrationTest.java | 8 +- .../DecisionTreeRegressionTrainerTest.java | 6 +- .../RandomForestClassifierTrainerTest.java | 13 +- .../RandomForestIntegrationTest.java | 3 +- .../RandomForestRegressionTrainerTest.java | 9 +- .../tree/randomforest/data/TreeNodeTest.java | 14 +- 123 files changed, 3858 insertions(+), 433 deletions(-) create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/ANNClassificationExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/CompoundNaiveBayesExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeClassificationExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeRegressionExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DiscreteNaiveBayesExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesClassificationExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesRegressionExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GaussianNaiveBayesExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/KMeansClusterizationExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LinearRegressionExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LogisticRegressionExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestClassificationExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestRegressionExportImportExample.java create mode 100644 examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/SVMExportImportExample.java create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBModel.java create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModel.java create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModelMixIn.java create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONWritable.java create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JacksonHelper.java create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeModel.java rename modules/ml/src/main/java/org/apache/ignite/ml/tree/{DecisionTree.java => DecisionTreeTrainer.java} (92%) create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/tree/NodeData.java create mode 100644 modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestModel.java rename modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/{TreeRoot.java => RandomForestTreeModel.java} (88%) diff --git a/docs/_docs/machine-learning/binary-classification/decision-trees.adoc b/docs/_docs/machine-learning/binary-classification/decision-trees.adoc index 57ab7bf21b1d1..bc9ff058402ce 100644 --- a/docs/_docs/machine-learning/binary-classification/decision-trees.adoc +++ b/docs/_docs/machine-learning/binary-classification/decision-trees.adoc @@ -39,12 +39,12 @@ The model works this way - the split process stops when either the algorithm has == Model -The Model in a decision tree classification is represented by the class `DecisionTreeNode`. We can make a prediction for a given vector of features in the following way: +The Model in a decision tree classification is represented by the class `DecisionTreeModel`. We can make a prediction for a given vector of features in the following way: [source, java] ---- -DecisionTreeNode mdl = ...; +DecisionTreeModel mdl = ...; double prediction = mdl.apply(observation); ---- @@ -68,7 +68,7 @@ DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTraine ); // Train model. -DecisionTreeNode mdl = trainer.fit(ignite, dataCache, vectorizer); +DecisionTreeModel mdl = trainer.fit(ignite, dataCache, vectorizer); ---- diff --git a/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc b/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc index 92992f87200ca..065cb78d35bcb 100644 --- a/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc +++ b/docs/_docs/machine-learning/importing-model/model-import-from-apache-spark.adoc @@ -71,7 +71,7 @@ To load in Ignite ML you should use SparkModelParser class via method parse() ca [source, java] ---- -DecisionTreeNode mdl = (DecisionTreeNode)SparkModelParser.parse( +DecisionTreeModel mdl = (DecisionTreeModel)SparkModelParser.parse( SPARK_MDL_PATH, SupportedSparkModels.DECISION_TREE ); diff --git a/docs/_docs/machine-learning/model-selection/cross-validation.adoc b/docs/_docs/machine-learning/model-selection/cross-validation.adoc index 8e64c68e67f4e..39e00f1a5c6f3 100644 --- a/docs/_docs/machine-learning/model-selection/cross-validation.adoc +++ b/docs/_docs/machine-learning/model-selection/cross-validation.adoc @@ -27,7 +27,7 @@ Let’s imagine that we have a trainer, a training set and we want to make cross DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); // Create cross-validation instance -CrossValidation scoreCalculator +CrossValidation scoreCalculator = new CrossValidation<>(); // Set up the cross-validation process @@ -67,7 +67,7 @@ Pipeline pipeline // Create cross-validation instance -CrossValidation scoreCalculator +CrossValidation scoreCalculator = new CrossValidation<>(); // Set up the cross-validation process diff --git a/docs/_docs/machine-learning/model-selection/pipeline-api.adoc b/docs/_docs/machine-learning/model-selection/pipeline-api.adoc index 7f0cb93e3bcba..9b2798c25865d 100644 --- a/docs/_docs/machine-learning/model-selection/pipeline-api.adoc +++ b/docs/_docs/machine-learning/model-selection/pipeline-api.adoc @@ -64,7 +64,7 @@ Preprocessor normalizationPreprocessor = new NormalizationTrain DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); -CrossValidation scoreCalculator = new CrossValidation<>(); +CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() .addHyperParam("maxDeep", trainerCV::withMaxDeep, new Double[] {1.0, 2.0, 3.0, 4.0, 5.0, 10.0}) @@ -101,7 +101,7 @@ Pipeline pipeline = new Pipeline()) .addTrainer(trainer); -CrossValidation scoreCalculator = new CrossValidation<>(); +CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() .addHyperParam("maxDeep", trainer::withMaxDeep, new Double[] {1.0, 2.0, 3.0, 4.0, 5.0, 10.0}) diff --git a/docs/_docs/machine-learning/regression/decision-trees-regression.adoc b/docs/_docs/machine-learning/regression/decision-trees-regression.adoc index 48f9d5cc289e2..2abbaa8dc71dd 100644 --- a/docs/_docs/machine-learning/regression/decision-trees-regression.adoc +++ b/docs/_docs/machine-learning/regression/decision-trees-regression.adoc @@ -39,12 +39,12 @@ The model works this way - the split process stops when either the algorithm has == Model -The Model in a decision tree classification is represented by the class `DecisionTreeNode`. We can make a prediction for a given vector of features in the following way: +The Model in a decision tree classification is represented by the class `DecisionTreeModel`. We can make a prediction for a given vector of features in the following way: [source, java] ---- -DecisionTreeNode mdl = ...; +DecisionTreeModel mdl = ...; double prediction = mdl.apply(observation); ---- @@ -67,7 +67,7 @@ DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer( ); // Train model. -DecisionTreeNode mdl = trainer.fit(ignite, dataCache, vectorizer); +DecisionTreeModel mdl = trainer.fit(ignite, dataCache, vectorizer); ---- == Examples diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java index beee4f6a721e5..3127418f9653f 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java @@ -73,8 +73,8 @@ public static void main(String[] args) throws IOException { ); System.out.println(">>> KMeans centroids"); - Tracer.showAscii(mdl.getCenters()[0]); - Tracer.showAscii(mdl.getCenters()[1]); + Tracer.showAscii(mdl.centers()[0]); + Tracer.showAscii(mdl.centers()[1]); System.out.println(">>>"); System.out.println(">>> --------------------------------------------"); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/ANNClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/ANNClassificationExportImportExample.java new file mode 100644 index 0000000000000..618e4c6cdaf46 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/ANNClassificationExportImportExample.java @@ -0,0 +1,339 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.UUID; +import javax.cache.Cache; +import org.apache.commons.math3.util.Precision; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; +import org.apache.ignite.ml.knn.NNClassificationModel; +import org.apache.ignite.ml.knn.ann.ANNClassificationModel; +import org.apache.ignite.ml.knn.ann.ANNClassificationTrainer; +import org.apache.ignite.ml.math.distances.EuclideanDistance; +import org.apache.ignite.ml.math.distances.ManhattanDistance; +import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector; + +/** + * Run ANN multi-class classification trainer ({@link ANNClassificationTrainer}) over distributed dataset. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

    + *

    + * After that it trains the model based on the specified data using + * kNN algorithm.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster does + * this point belong to, and compares prediction to expected outcome (ground truth).

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class ANNClassificationExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> ANN multi-class classification algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = getTestCache(ignite); + + ANNClassificationTrainer trainer = new ANNClassificationTrainer() + .withDistance(new ManhattanDistance()) + .withK(50) + .withMaxIterations(1000) + .withEpsilon(1e-2); + + ANNClassificationModel mdl = (ANNClassificationModel) trainer.fit( + ignite, + dataCache, + new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST) + ).withK(5) + .withDistanceMeasure(new EuclideanDistance()) + .withWeighted(true); + + System.out.println("\n>>> Exported ANN model: " + mdl.toString(true)); + + double accuracy = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Accuracy for exported ANN model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + ANNClassificationModel modelImportedFromJSON = ANNClassificationModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported ANN model: " + modelImportedFromJSON.toString(true)); + + accuracy = evaluateModel(dataCache, modelImportedFromJSON); + + System.out.println("\n>>> Accuracy for imported ANN model:" + accuracy); + + System.out.println(">>> ANN multi-class classification algorithm over cached dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static double evaluateModel(IgniteCache dataCache, NNClassificationModel knnMdl) { + int amountOfErrors = 0; + int totalAmount = 0; + + double accuracy; + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + for (Cache.Entry observation : observations) { + double[] val = observation.getValue(); + double[] inputs = Arrays.copyOfRange(val, 1, val.length); + double groundTruth = val[0]; + + double prediction = knnMdl.predict(new DenseVector(inputs)); + + totalAmount++; + if (!Precision.equals(groundTruth, prediction, Precision.EPSILON)) + amountOfErrors++; + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); + } + + System.out.println(">>> ---------------------------------"); + + accuracy = 1 - amountOfErrors / (double) totalAmount; + + } + return accuracy; + } + + /** + * Fills cache with data and returns it. + * + * @param ignite Ignite instance. + * @return Filled Ignite Cache. + */ + private static IgniteCache getTestCache(Ignite ignite) { + CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); + cacheConfiguration.setName("TEST_" + UUID.randomUUID()); + cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache cache = ignite.createCache(cacheConfiguration); + + for (int k = 0; k < 10; k++) { // multiplies the Iris dataset k times. + for (int i = 0; i < data.length; i++) + cache.put(k * 10000 + i, mutate(data[i], k)); + } + + return cache; + } + + /** + * Tiny changing of data depending on k parameter. + * + * @param datum The vector data. + * @param k The passed parameter. + * @return The changed vector data. + */ + private static double[] mutate(double[] datum, int k) { + for (int i = 0; i < datum.length; i++) + datum[i] += k / 100000; + return datum; + } + + /** + * The Iris dataset. + */ + private static final double[][] data = { + {1, 5.1, 3.5, 1.4, 0.2}, + {1, 4.9, 3, 1.4, 0.2}, + {1, 4.7, 3.2, 1.3, 0.2}, + {1, 4.6, 3.1, 1.5, 0.2}, + {1, 5, 3.6, 1.4, 0.2}, + {1, 5.4, 3.9, 1.7, 0.4}, + {1, 4.6, 3.4, 1.4, 0.3}, + {1, 5, 3.4, 1.5, 0.2}, + {1, 4.4, 2.9, 1.4, 0.2}, + {1, 4.9, 3.1, 1.5, 0.1}, + {1, 5.4, 3.7, 1.5, 0.2}, + {1, 4.8, 3.4, 1.6, 0.2}, + {1, 4.8, 3, 1.4, 0.1}, + {1, 4.3, 3, 1.1, 0.1}, + {1, 5.8, 4, 1.2, 0.2}, + {1, 5.7, 4.4, 1.5, 0.4}, + {1, 5.4, 3.9, 1.3, 0.4}, + {1, 5.1, 3.5, 1.4, 0.3}, + {1, 5.7, 3.8, 1.7, 0.3}, + {1, 5.1, 3.8, 1.5, 0.3}, + {1, 5.4, 3.4, 1.7, 0.2}, + {1, 5.1, 3.7, 1.5, 0.4}, + {1, 4.6, 3.6, 1, 0.2}, + {1, 5.1, 3.3, 1.7, 0.5}, + {1, 4.8, 3.4, 1.9, 0.2}, + {1, 5, 3, 1.6, 0.2}, + {1, 5, 3.4, 1.6, 0.4}, + {1, 5.2, 3.5, 1.5, 0.2}, + {1, 5.2, 3.4, 1.4, 0.2}, + {1, 4.7, 3.2, 1.6, 0.2}, + {1, 4.8, 3.1, 1.6, 0.2}, + {1, 5.4, 3.4, 1.5, 0.4}, + {1, 5.2, 4.1, 1.5, 0.1}, + {1, 5.5, 4.2, 1.4, 0.2}, + {1, 4.9, 3.1, 1.5, 0.1}, + {1, 5, 3.2, 1.2, 0.2}, + {1, 5.5, 3.5, 1.3, 0.2}, + {1, 4.9, 3.1, 1.5, 0.1}, + {1, 4.4, 3, 1.3, 0.2}, + {1, 5.1, 3.4, 1.5, 0.2}, + {1, 5, 3.5, 1.3, 0.3}, + {1, 4.5, 2.3, 1.3, 0.3}, + {1, 4.4, 3.2, 1.3, 0.2}, + {1, 5, 3.5, 1.6, 0.6}, + {1, 5.1, 3.8, 1.9, 0.4}, + {1, 4.8, 3, 1.4, 0.3}, + {1, 5.1, 3.8, 1.6, 0.2}, + {1, 4.6, 3.2, 1.4, 0.2}, + {1, 5.3, 3.7, 1.5, 0.2}, + {1, 5, 3.3, 1.4, 0.2}, + {2, 7, 3.2, 4.7, 1.4}, + {2, 6.4, 3.2, 4.5, 1.5}, + {2, 6.9, 3.1, 4.9, 1.5}, + {2, 5.5, 2.3, 4, 1.3}, + {2, 6.5, 2.8, 4.6, 1.5}, + {2, 5.7, 2.8, 4.5, 1.3}, + {2, 6.3, 3.3, 4.7, 1.6}, + {2, 4.9, 2.4, 3.3, 1}, + {2, 6.6, 2.9, 4.6, 1.3}, + {2, 5.2, 2.7, 3.9, 1.4}, + {2, 5, 2, 3.5, 1}, + {2, 5.9, 3, 4.2, 1.5}, + {2, 6, 2.2, 4, 1}, + {2, 6.1, 2.9, 4.7, 1.4}, + {2, 5.6, 2.9, 3.6, 1.3}, + {2, 6.7, 3.1, 4.4, 1.4}, + {2, 5.6, 3, 4.5, 1.5}, + {2, 5.8, 2.7, 4.1, 1}, + {2, 6.2, 2.2, 4.5, 1.5}, + {2, 5.6, 2.5, 3.9, 1.1}, + {2, 5.9, 3.2, 4.8, 1.8}, + {2, 6.1, 2.8, 4, 1.3}, + {2, 6.3, 2.5, 4.9, 1.5}, + {2, 6.1, 2.8, 4.7, 1.2}, + {2, 6.4, 2.9, 4.3, 1.3}, + {2, 6.6, 3, 4.4, 1.4}, + {2, 6.8, 2.8, 4.8, 1.4}, + {2, 6.7, 3, 5, 1.7}, + {2, 6, 2.9, 4.5, 1.5}, + {2, 5.7, 2.6, 3.5, 1}, + {2, 5.5, 2.4, 3.8, 1.1}, + {2, 5.5, 2.4, 3.7, 1}, + {2, 5.8, 2.7, 3.9, 1.2}, + {2, 6, 2.7, 5.1, 1.6}, + {2, 5.4, 3, 4.5, 1.5}, + {2, 6, 3.4, 4.5, 1.6}, + {2, 6.7, 3.1, 4.7, 1.5}, + {2, 6.3, 2.3, 4.4, 1.3}, + {2, 5.6, 3, 4.1, 1.3}, + {2, 5.5, 2.5, 4, 1.3}, + {2, 5.5, 2.6, 4.4, 1.2}, + {2, 6.1, 3, 4.6, 1.4}, + {2, 5.8, 2.6, 4, 1.2}, + {2, 5, 2.3, 3.3, 1}, + {2, 5.6, 2.7, 4.2, 1.3}, + {2, 5.7, 3, 4.2, 1.2}, + {2, 5.7, 2.9, 4.2, 1.3}, + {2, 6.2, 2.9, 4.3, 1.3}, + {2, 5.1, 2.5, 3, 1.1}, + {2, 5.7, 2.8, 4.1, 1.3}, + {3, 6.3, 3.3, 6, 2.5}, + {3, 5.8, 2.7, 5.1, 1.9}, + {3, 7.1, 3, 5.9, 2.1}, + {3, 6.3, 2.9, 5.6, 1.8}, + {3, 6.5, 3, 5.8, 2.2}, + {3, 7.6, 3, 6.6, 2.1}, + {3, 4.9, 2.5, 4.5, 1.7}, + {3, 7.3, 2.9, 6.3, 1.8}, + {3, 6.7, 2.5, 5.8, 1.8}, + {3, 7.2, 3.6, 6.1, 2.5}, + {3, 6.5, 3.2, 5.1, 2}, + {3, 6.4, 2.7, 5.3, 1.9}, + {3, 6.8, 3, 5.5, 2.1}, + {3, 5.7, 2.5, 5, 2}, + {3, 5.8, 2.8, 5.1, 2.4}, + {3, 6.4, 3.2, 5.3, 2.3}, + {3, 6.5, 3, 5.5, 1.8}, + {3, 7.7, 3.8, 6.7, 2.2}, + {3, 7.7, 2.6, 6.9, 2.3}, + {3, 6, 2.2, 5, 1.5}, + {3, 6.9, 3.2, 5.7, 2.3}, + {3, 5.6, 2.8, 4.9, 2}, + {3, 7.7, 2.8, 6.7, 2}, + {3, 6.3, 2.7, 4.9, 1.8}, + {3, 6.7, 3.3, 5.7, 2.1}, + {3, 7.2, 3.2, 6, 1.8}, + {3, 6.2, 2.8, 4.8, 1.8}, + {3, 6.1, 3, 4.9, 1.8}, + {3, 6.4, 2.8, 5.6, 2.1}, + {3, 7.2, 3, 5.8, 1.6}, + {3, 7.4, 2.8, 6.1, 1.9}, + {3, 7.9, 3.8, 6.4, 2}, + {3, 6.4, 2.8, 5.6, 2.2}, + {3, 6.3, 2.8, 5.1, 1.5}, + {3, 6.1, 2.6, 5.6, 1.4}, + {3, 7.7, 3, 6.1, 2.3}, + {3, 6.3, 3.4, 5.6, 2.4}, + {3, 6.4, 3.1, 5.5, 1.8}, + {3, 6, 3, 4.8, 1.8}, + {3, 6.9, 3.1, 5.4, 2.1}, + {3, 6.7, 3.1, 5.6, 2.4}, + {3, 6.9, 3.1, 5.1, 2.3}, + {3, 5.8, 2.7, 5.1, 1.9}, + {3, 6.8, 3.2, 5.9, 2.3}, + {3, 6.7, 3.3, 5.7, 2.5}, + {3, 6.7, 3, 5.2, 2.3}, + {3, 6.3, 2.5, 5, 1.9}, + {3, 6.5, 3, 5.2, 2}, + {3, 6.2, 3.4, 5.4, 2.3}, + {3, 5.9, 3, 5.1, 1.8} + }; +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/CompoundNaiveBayesExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/CompoundNaiveBayesExportImportExample.java new file mode 100644 index 0000000000000..7d05f5e82129c --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/CompoundNaiveBayesExportImportExample.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.naivebayes.compound.CompoundNaiveBayesModel; +import org.apache.ignite.ml.naivebayes.compound.CompoundNaiveBayesTrainer; +import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesTrainer; +import org.apache.ignite.ml.naivebayes.gaussian.GaussianNaiveBayesTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +import static java.util.Arrays.asList; + +/** + * Run naive Compound Bayes classification model based on + * Nnaive Bayes classifier algorithm ({@link GaussianNaiveBayesTrainer})and Discrete naive Bayes + * classifier algorithm ({@link DiscreteNaiveBayesTrainer}) over distributed cache. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points. + *

    + * After that it trains the naive Bayes classification model based on the specified data.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class CompoundNaiveBayesExportImportExample { + /** Run example. */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Compound Naive Bayes classification model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite) + .fillCacheWith(MLSandboxDatasets.MIXED_DATASET); + + double[] priorProbabilities = new double[]{.5, .5}; + double[][] thresholds = new double[][]{{.5}, {.5}, {.5}, {.5}, {.5}}; + + System.out.println("\n>>> Create new naive Bayes classification trainer object."); + CompoundNaiveBayesTrainer trainer = new CompoundNaiveBayesTrainer() + .withPriorProbabilities(priorProbabilities) + .withGaussianNaiveBayesTrainer(new GaussianNaiveBayesTrainer()) + .withGaussianFeatureIdsToSkip(asList(3, 4, 5, 6, 7)) + .withDiscreteNaiveBayesTrainer(new DiscreteNaiveBayesTrainer() + .setBucketThresholds(thresholds)) + .withDiscreteFeatureIdsToSkip(asList(0, 1, 2)); + System.out.println("\n>>> Perform the training to get the model."); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + CompoundNaiveBayesModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported Compound Naive Bayes model: " + mdl.toString(true)); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported Compound Naive Bayes model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + CompoundNaiveBayesModel modelImportedFromJSON = CompoundNaiveBayesModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Compound Naive Bayes model: " + modelImportedFromJSON.toString(true)); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported Compound Naive Bayes model:" + accuracy); + + System.out.println("\n>>> Compound Naive Bayes model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeClassificationExportImportExample.java new file mode 100644 index 0000000000000..e7ad7ca71e676 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeClassificationExportImportExample.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Random; +import org.apache.commons.math3.util.Precision; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.structures.LabeledVector; +import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; +import org.apache.ignite.ml.tree.DecisionTreeModel; + +/** + * Example of using distributed {@link DecisionTreeClassificationTrainer}. + *

    + * Code in this example launches Ignite grid and fills the cache with pseudo random training data points.

    + *

    + * After that it creates classification trainer and uses it to train the model on the training set.

    + *

    + * Finally, this example loops over the pseudo randomly generated test set of data points, applies the trained model, + * and compares prediction to expected outcome.

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class DecisionTreeClassificationExportImportExample { + /** + * Executes example. + * + * @param args Command line arguments, none required. + */ + public static void main(String[] args) throws IOException { + System.out.println(">>> Decision tree classification trainer example started."); + + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration> trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache> trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = ignite.createCache(trainingSetCfg); + + Random rnd = new Random(0); + + // Fill training data. + for (int i = 0; i < 1000; i++) + trainingSet.put(i, generatePoint(rnd)); + + // Create classification trainer. + DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); + + // Train decision tree model. + LabeledDummyVectorizer vectorizer = new LabeledDummyVectorizer<>(); + DecisionTreeModel mdl = trainer.fit( + ignite, + trainingSet, + vectorizer + ); + + System.out.println("\n>>> Exported Decision tree classification model: " + mdl); + + int correctPredictions = evaluateModel(rnd, mdl); + + System.out.println("\n>>> Accuracy for exported Decision tree classification model: " + correctPredictions / 10.0 + "%"); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + DecisionTreeModel modelImportedFromJSON = DecisionTreeModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Decision tree classification model: " + modelImportedFromJSON); + + correctPredictions = evaluateModel(rnd, modelImportedFromJSON); + + System.out.println("\n>>> Accuracy for imported Decision tree classification model: " + correctPredictions / 10.0 + "%"); + + System.out.println("\n>>> Decision tree classification trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static int evaluateModel(Random rnd, DecisionTreeModel mdl) { + // Calculate score. + int correctPredictions = 0; + for (int i = 0; i < 1000; i++) { + LabeledVector pnt = generatePoint(rnd); + + double prediction = mdl.predict(pnt.features()); + double lbl = pnt.label(); + + if (i % 50 == 1) + System.out.printf(">>> test #: %d\t\t predicted: %.4f\t\tlabel: %.4f\n", i, prediction, lbl); + + if (Precision.equals(prediction, lbl, Precision.EPSILON)) + correctPredictions++; + } + return correctPredictions; + } + + /** + * Generate point with {@code x} in (-0.5, 0.5) and {@code y} in the same interval. If {@code x * y > 0} then label + * is 1, otherwise 0. + * + * @param rnd Random. + * @return Point with label. + */ + private static LabeledVector generatePoint(Random rnd) { + + double x = rnd.nextDouble() - 0.5; + double y = rnd.nextDouble() - 0.5; + + return new LabeledVector<>(VectorUtils.of(x, y), x * y > 0 ? 1. : 0.); + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeRegressionExportImportExample.java new file mode 100644 index 0000000000000..9857ba9edab55 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DecisionTreeRegressionExportImportExample.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.structures.LabeledVector; +import org.apache.ignite.ml.tree.DecisionTreeModel; +import org.apache.ignite.ml.tree.DecisionTreeRegressionTrainer; + +/** + * Example of using distributed {@link DecisionTreeRegressionTrainer}. + *

    + * Code in this example launches Ignite grid and fills the cache with generated test data points ({@code sin(x)} on + * interval {@code [0, 10)}).

    + *

    + * After that it creates classification trainer and uses it to train the model on the training set.

    + *

    + * Finally, this example loops over the test data points, applies the trained model, and compares prediction to expected + * outcome (ground truth).

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class DecisionTreeRegressionExportImportExample { + /** + * Executes example. + * + * @param args Command line arguments, none required. + */ + public static void main(String... args) throws IOException { + System.out.println(">>> Decision tree regression trainer example started."); + + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration> trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache> trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = ignite.createCache(trainingSetCfg); + + // Fill training data. + generatePoints(trainingSet); + + // Create regression trainer. + DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(10, 0); + + // Train decision tree model. + DecisionTreeModel mdl = trainer.fit(ignite, trainingSet, new LabeledDummyVectorizer<>()); + + System.out.println("\n>>> Exported Decision tree regression model: " + mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + DecisionTreeModel modelImportedFromJSON = DecisionTreeModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Decision tree regression model: " + modelImportedFromJSON); + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + // Calculate score. + for (int x = 0; x < 10; x++) { + double predicted = mdl.predict(VectorUtils.of(x)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", predicted, Math.sin(x)); + } + + System.out.println(">>> ---------------------------------"); + + System.out.println("\n>>> Decision tree regression trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + /** + * Generates {@code sin(x)} on interval {@code [0, 10)} and loads into the specified cache. + */ + private static void generatePoints(IgniteCache> trainingSet) { + for (int i = 0; i < 1000; i++) { + double x = i / 100.0; + double y = Math.sin(x); + + trainingSet.put(i, new LabeledVector<>(VectorUtils.of(x), y)); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DiscreteNaiveBayesExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DiscreteNaiveBayesExportImportExample.java new file mode 100644 index 0000000000000..c4d44c4568479 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/DiscreteNaiveBayesExportImportExample.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesModel; +import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run naive Bayes classification model based on + * naive Bayes classifier algorithm ({@link DiscreteNaiveBayesTrainer}) over distributed cache. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points. + *

    + *

    + * After that it trains the Discrete naive Bayes classification model based on the specified data.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class DiscreteNaiveBayesExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(">>> Discrete naive Bayes classification model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.ENGLISH_VS_SCOTTISH); + + double[][] thresholds = new double[][] {{.5}, {.5}, {.5}, {.5}, {.5}}; + System.out.println(">>> Create new Discrete naive Bayes classification trainer object."); + DiscreteNaiveBayesTrainer trainer = new DiscreteNaiveBayesTrainer() + .setBucketThresholds(thresholds); + + System.out.println("\n>>> Perform the training to get the model."); + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + DiscreteNaiveBayesModel mdl = trainer.fit(ignite, dataCache, vectorizer); + System.out.println("\n>>> Exported Discrete Naive Bayes model: " + mdl.toString(true)); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported Discrete Naive Bayes model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + DiscreteNaiveBayesModel modelImportedFromJSON = DiscreteNaiveBayesModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Discrete Naive Bayes model: " + modelImportedFromJSON.toString(true)); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported Discrete Naive Bayes model:" + accuracy); + + System.out.println("\n>>> Discrete Naive bayes model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesClassificationExportImportExample.java new file mode 100644 index 0000000000000..9aa8f228f717a --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesClassificationExportImportExample.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; +import org.apache.ignite.ml.math.functions.IgniteFunction; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; +import org.jetbrains.annotations.NotNull; + +/** + * Example represents a solution for the task of classification learning based on Gradient Boosting on trees + * implementation. It shows an initialization of {@link GDBBinaryClassifierOnTreesTrainer}, initialization of Ignite + * Cache, learning step and comparing of predicted and real values. + *

    + * In this example dataset is created automatically by meander function {@code f(x) = [sin(x) > 0]}.

    + */ +public class GDBOnTreesClassificationExportImportExample { + /** + * Run example. + * + * @param args Command line arguments, none required. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> GDB classification trainer example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration trainingSetCfg = createCacheConfiguration(); + IgniteCache trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = fillTrainingData(ignite, trainingSetCfg); + + // Create classification trainer. + GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.) + .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.1)); + + // Train decision tree model. + GDBModel mdl = trainer.fit( + ignite, + trainingSet, + new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) + ); + + System.out.println("\n>>> Exported GDB classification model: " + mdl.toString(true)); + + predictOnGeneratedData(mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + IgniteFunction lbMapper = lb -> lb > 0.5 ? 1.0 : 0.0; + GDBModel modelImportedFromJSON = GDBModel.fromJSON(jsonMdlPath).withLblMapping(lbMapper); + + System.out.println("\n>>> Imported GDB classification model: " + modelImportedFromJSON.toString(true)); + + predictOnGeneratedData(modelImportedFromJSON); + + System.out.println(">>> GDB classification trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static void predictOnGeneratedData(GDBModel mdl) { + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Valid answer\t|"); + System.out.println(">>> ---------------------------------"); + + // Calculate score. + for (int x = -5; x < 5; x++) { + double predicted = mdl.predict(VectorUtils.of(x)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", predicted, Math.sin(x) < 0 ? 0.0 : 1.0); + } + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> Count of trees = " + mdl.getModels().size()); + System.out.println(">>> ---------------------------------"); + } + + /** + * Create cache configuration. + */ + @NotNull private static CacheConfiguration createCacheConfiguration() { + CacheConfiguration trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + return trainingSetCfg; + } + + /** + * Fill meander-like training data. + * + * @param ignite Ignite instance. + * @param trainingSetCfg Training set config. + */ + @NotNull private static IgniteCache fillTrainingData(Ignite ignite, + CacheConfiguration trainingSetCfg) { + IgniteCache trainingSet = ignite.getOrCreateCache(trainingSetCfg); + for (int i = -50; i <= 50; i++) { + double x = ((double)i) / 10.0; + double y = Math.sin(x) < 0 ? 0.0 : 1.0; + trainingSet.put(i, new double[] {x, y}); + } + return trainingSet; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesRegressionExportImportExample.java new file mode 100644 index 0000000000000..14233e316e4a3 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GDBOnTreesRegressionExportImportExample.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; +import org.apache.ignite.ml.math.functions.IgniteFunction; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer; +import org.jetbrains.annotations.NotNull; + +/** + * Example represents a solution for the task of regression learning based on Gradient Boosting on trees implementation. + * It shows an initialization of {@link GDBRegressionOnTreesTrainer}, initialization of Ignite Cache, learning step and + * comparing of predicted and real values. + *

    + * In this example dataset is created automatically by parabolic function {@code f(x) = x^2}.

    + */ +public class GDBOnTreesRegressionExportImportExample { + /** + * Run example. + * + * @param args Command line arguments, none required. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> GDB regression trainer example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + // Create cache with training data. + CacheConfiguration trainingSetCfg = createCacheConfiguration(); + IgniteCache trainingSet = null; + Path jsonMdlPath = null; + try { + trainingSet = fillTrainingData(ignite, trainingSetCfg); + + // Create regression trainer. + GDBTrainer trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.) + .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.001)); + + // Train decision tree model. + GDBModel mdl = trainer.fit( + ignite, + trainingSet, + new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) + ); + + System.out.println("\n>>> Exported GDB regression model: " + mdl.toString(true)); + + predictOnGeneratedData(mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + IgniteFunction lbMapper = lb -> lb; + GDBModel modelImportedFromJSON = GDBModel.fromJSON(jsonMdlPath).withLblMapping(lbMapper); + + System.out.println("\n>>> Imported GDB regression model: " + modelImportedFromJSON.toString(true)); + + predictOnGeneratedData(modelImportedFromJSON); + + System.out.println(">>> GDB regression trainer example completed."); + } + finally { + if (trainingSet != null) + trainingSet.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static void predictOnGeneratedData(GDBModel mdl) { + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Valid answer \t|"); + System.out.println(">>> ---------------------------------"); + + // Calculate score. + for (int x = -5; x < 5; x++) { + double predicted = mdl.predict(VectorUtils.of(x)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", predicted, Math.pow(x, 2)); + } + + System.out.println(">>> ---------------------------------"); + } + + /** + * Create cache configuration. + */ + @NotNull private static CacheConfiguration createCacheConfiguration() { + CacheConfiguration trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + return trainingSetCfg; + } + + /** + * Fill parabolic training data. + * + * @param ignite Ignite instance. + * @param trainingSetCfg Training set config. + */ + @NotNull private static IgniteCache fillTrainingData(Ignite ignite, + CacheConfiguration trainingSetCfg) { + IgniteCache trainingSet = ignite.getOrCreateCache(trainingSetCfg); + for (int i = -50; i <= 50; i++) { + double x = ((double)i) / 10.0; + double y = Math.pow(x, 2); + trainingSet.put(i, new double[] {x, y}); + } + return trainingSet; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GaussianNaiveBayesExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GaussianNaiveBayesExportImportExample.java new file mode 100644 index 0000000000000..b6fb9c9fd2097 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/GaussianNaiveBayesExportImportExample.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.naivebayes.gaussian.GaussianNaiveBayesModel; +import org.apache.ignite.ml.naivebayes.gaussian.GaussianNaiveBayesTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run naive Bayes classification model based on naive + * Bayes classifier algorithm ({@link GaussianNaiveBayesTrainer}) over distributed cache. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

    + *

    + * After that it trains the naive Bayes classification model based on the specified data.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class GaussianNaiveBayesExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Naive Bayes classification model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + System.out.println(">>> Create new Gaussian Naive Bayes classification trainer object."); + GaussianNaiveBayesTrainer trainer = new GaussianNaiveBayesTrainer(); + + System.out.println("\n>>> Perform the training to get the model."); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + GaussianNaiveBayesModel mdl = trainer.fit(ignite, dataCache, vectorizer); + System.out.println("\n>>> Exported Gaussian Naive Bayes model: " + mdl.toString(true)); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported Gaussian Naive Bayes model:" + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + GaussianNaiveBayesModel modelImportedFromJSON = GaussianNaiveBayesModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Gaussian Naive Bayes model: " + modelImportedFromJSON.toString(true)); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported Gaussian Naive Bayes model:" + accuracy); + + System.out.println("\n>>> Gaussian Naive bayes model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/KMeansClusterizationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/KMeansClusterizationExportImportExample.java new file mode 100644 index 0000000000000..ec5e6899f7eab --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/KMeansClusterizationExportImportExample.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.clustering.kmeans.KMeansModel; +import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.distances.WeightedMinkowskiDistance; +import org.apache.ignite.ml.math.primitives.vector.Vector; + +/** + * Run KMeans clustering algorithm ({@link KMeansTrainer}) over distributed dataset. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

    + *

    + * After that it trains the model based on the specified data using + * KMeans algorithm.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster does + * this point belong to, and compares prediction to expected outcome (ground truth).

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class KMeansClusterizationExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> KMeans clustering algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + Vectorizer vectorizer = new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST); + + KMeansTrainer trainer = new KMeansTrainer() + .withDistance(new WeightedMinkowskiDistance(2, new double[] {5.9360, 2.7700, 4.2600, 1.3260})); + //.withDistance(new MinkowskiDistance(2)); + + KMeansModel mdl = trainer.fit( + ignite, + dataCache, + vectorizer + ); + + System.out.println("\n>>> Exported KMeans model: " + mdl); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + KMeansModel modelImportedFromJSON = KMeansModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported KMeans model: " + modelImportedFromJSON); + + System.out.println("\n>>> KMeans clustering algorithm over cached dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LinearRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LinearRegressionExportImportExample.java new file mode 100644 index 0000000000000..723784bb999c7 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LinearRegressionExportImportExample.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; +import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run linear regression model based on LSQR algorithm + * ({@link LinearRegressionLSQRTrainer}) over cached dataset. + *

    + * Code in this example launches Ignite grid and fills the cache with simple test data.

    + *

    + * After that it trains the linear regression model based on the specified data.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict the target value + * and compares prediction to expected outcome (ground truth).

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class LinearRegressionExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Linear regression model over cache based dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.MORTALITY_DATA); + + System.out.println("\n>>> Create new linear regression trainer object."); + LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer(); + + System.out.println("\n>>> Perform the training to get the model."); + + LinearRegressionModel mdl = trainer.fit( + ignite, + dataCache, + new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST) + ); + + System.out.println("\n>>> Exported LinearRegression model: " + mdl); + + double rmse = Evaluator.evaluate( + dataCache, + mdl, + new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST), + MetricName.RMSE + ); + + System.out.println("\n>>> RMSE for exported LinearRegression model: " + rmse); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + LinearRegressionModel modelImportedFromJSON = LinearRegressionModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported LinearRegression model: " + modelImportedFromJSON); + + rmse = Evaluator.evaluate( + dataCache, + mdl, + new DummyVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST), + MetricName.RMSE + ); + + System.out.println("\n>>> RMSE for imported LinearRegression model: " + rmse); + + System.out.println("\n>>> Linear regression model over cache based dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LogisticRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LogisticRegressionExportImportExample.java new file mode 100644 index 0000000000000..6491f7edd5a3f --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/LogisticRegressionExportImportExample.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.nn.UpdatesStrategy; +import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate; +import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator; +import org.apache.ignite.ml.regressions.logistic.LogisticRegressionModel; +import org.apache.ignite.ml.regressions.logistic.LogisticRegressionSGDTrainer; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; + +/** + * Run logistic regression model based on + * stochastic gradient descent algorithm ({@link LogisticRegressionSGDTrainer}) over distributed cache. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

    + *

    + * After that it trains the logistic regression model based on the specified data.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict the target value, + * compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class LogisticRegressionExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Logistic regression model over partitioned dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + System.out.println("\n>>> Create new logistic regression trainer object."); + LogisticRegressionSGDTrainer trainer = new LogisticRegressionSGDTrainer() + .withUpdatesStgy(new UpdatesStrategy<>( + new SimpleGDUpdateCalculator(0.2), + SimpleGDParameterUpdate.SUM_LOCAL, + SimpleGDParameterUpdate.AVG + )) + .withMaxIterations(100000) + .withLocIterations(100) + .withBatchSize(10) + .withSeed(123L); + + System.out.println("\n>>> Perform the training to get the model."); + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + LogisticRegressionModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported logistic regression model: " + mdl); + + double accuracy = Evaluator.evaluate(dataCache, + mdl, vectorizer, MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported logistic regression model " + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + LogisticRegressionModel modelImportedFromJSON = LogisticRegressionModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported logistic regression model: " + modelImportedFromJSON); + + accuracy = Evaluator.evaluate(dataCache, + modelImportedFromJSON, vectorizer, MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported logistic regression model " + accuracy); + + System.out.println("\n>>> Logistic regression model over partitioned dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestClassificationExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestClassificationExportImportExample.java new file mode 100644 index 0000000000000..6bb368f56f6b2 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestClassificationExportImportExample.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import javax.cache.Cache; +import org.apache.commons.math3.util.Precision; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.FeatureMeta; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.tree.randomforest.RandomForestClassifierTrainer; +import org.apache.ignite.ml.tree.randomforest.RandomForestModel; +import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies; + +/** + * Example represents a solution for the task of wine classification based on a + * Random Forest implementation for + * multi-classification. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Wine recognition dataset).

    + *

    + * After that it initializes the {@link RandomForestClassifierTrainer} with thread pool for multi-thread learning and + * trains the model based on the specified data using random forest regression algorithm.

    + *

    + * Finally, this example loops over the test set of data points, compares prediction of the trained model to the + * expected outcome (ground truth), and evaluates accuracy of the model.

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class RandomForestClassificationExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Random Forest multi-class classification algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.WINE_RECOGNITION); + + AtomicInteger idx = new AtomicInteger(0); + RandomForestClassifierTrainer classifier = new RandomForestClassifierTrainer( + IntStream.range(0, dataCache.get(1).size() - 1).mapToObj( + x -> new FeatureMeta("", idx.getAndIncrement(), false)).collect(Collectors.toList()) + ).withAmountOfTrees(101) + .withFeaturesCountSelectionStrgy(FeaturesCountSelectionStrategies.ONE_THIRD) + .withMaxDepth(4) + .withMinImpurityDelta(0.) + .withSubSampleSize(0.3) + .withSeed(0); + + System.out.println(">>> Configured trainer: " + classifier.getClass().getSimpleName()); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + RandomForestModel mdl = classifier.fit(ignite, dataCache, vectorizer); + + System.out.println(">>> Exported Random Forest classification model: " + mdl.toString(true)); + + double accuracy = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Accuracy for exported Random Forest classification model " + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + RandomForestModel modelImportedFromJSON = RandomForestModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported Random Forest classification model: " + modelImportedFromJSON); + + accuracy = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Accuracy for imported Random Forest classification model " + accuracy); + + System.out.println("\n>>> Random Forest multi-class classification algorithm over cached dataset usage example completed."); + + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static double evaluateModel(IgniteCache dataCache, RandomForestModel randomForestMdl) { + int amountOfErrors = 0; + int totalAmount = 0; + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + Vector val = observation.getValue(); + Vector inputs = val.copyOfRange(1, val.size()); + double groundTruth = val.get(0); + + double prediction = randomForestMdl.predict(inputs); + + totalAmount++; + if (!Precision.equals(groundTruth, prediction, Precision.EPSILON)) + amountOfErrors++; + } + } + + return 1 - amountOfErrors / (double) totalAmount; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestRegressionExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestRegressionExportImportExample.java new file mode 100644 index 0000000000000..4d7d4ad738fb8 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/RandomForestRegressionExportImportExample.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import javax.cache.Cache; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.FeatureMeta; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.environment.LearningEnvironmentBuilder; +import org.apache.ignite.ml.environment.logging.ConsoleLogger; +import org.apache.ignite.ml.environment.parallelism.ParallelismStrategy; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.tree.randomforest.RandomForestModel; +import org.apache.ignite.ml.tree.randomforest.RandomForestRegressionTrainer; +import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies; + +/** + * Example represents a solution for the task of price predictions for houses in Boston based on a + * Random Forest implementation for regression. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Boston Housing dataset).

    + *

    + * After that it initializes the {@link RandomForestRegressionTrainer} and trains the model based on the specified data + * using random forest regression algorithm.

    + *

    + * Finally, this example loops over the test set of data points, compares prediction of the trained model to the + * expected outcome (ground truth), and evaluates model quality in terms of Mean Squared Error (MSE) and Mean Absolute + * Error (MAE).

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class RandomForestRegressionExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> Random Forest regression algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.BOSTON_HOUSE_PRICES); + + AtomicInteger idx = new AtomicInteger(0); + RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer( + IntStream.range(0, dataCache.get(1).size() - 1).mapToObj( + x -> new FeatureMeta("", idx.getAndIncrement(), false)).collect(Collectors.toList()) + ).withAmountOfTrees(101) + .withFeaturesCountSelectionStrgy(FeaturesCountSelectionStrategies.ONE_THIRD) + .withMaxDepth(4) + .withMinImpurityDelta(0.) + .withSubSampleSize(0.3) + .withSeed(0); + + trainer.withEnvironmentBuilder(LearningEnvironmentBuilder.defaultBuilder() + .withParallelismStrategyTypeDependency(ParallelismStrategy.ON_DEFAULT_POOL) + .withLoggingFactoryDependency(ConsoleLogger.Factory.LOW) + ); + + System.out.println("\n>>> Configured trainer: " + trainer.getClass().getSimpleName()); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + RandomForestModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported Random Forest regression model: " + mdl.toString(true)); + + double mae = evaluateModel(dataCache, mdl); + + System.out.println("\n>>> Mean absolute error (MAE) for exported Random Forest regression model " + mae); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + RandomForestModel modelImportedFromJSON = RandomForestModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Exported Random Forest regression model: " + modelImportedFromJSON.toString(true)); + + mae = evaluateModel(dataCache, modelImportedFromJSON); + + System.out.println("\n>>> Mean absolute error (MAE) for exported Random Forest regression model " + mae); + + System.out.println("\n>>> Random Forest regression algorithm over cached dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } + + private static double evaluateModel(IgniteCache dataCache, RandomForestModel randomForestMdl) { + double mae = 0.0; + int totalAmount = 0; + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + Vector val = observation.getValue(); + Vector inputs = val.copyOfRange(1, val.size()); + double groundTruth = val.get(0); + + double prediction = randomForestMdl.predict(inputs); + + mae += Math.abs(prediction - groundTruth); + + totalAmount++; + } + + mae /= totalAmount; + } + return mae; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/SVMExportImportExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/SVMExportImportExample.java new file mode 100644 index 0000000000000..24262901b8895 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/exchange/SVMExportImportExample.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.exchange; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.MetricName; +import org.apache.ignite.ml.svm.SVMLinearClassificationModel; +import org.apache.ignite.ml.svm.SVMLinearClassificationTrainer; + +/** + * Run SVM binary-class classification model ({@link SVMLinearClassificationModel}) over distributed dataset. + *

    + * Code in this example launches Ignite grid and fills the cache with test data points (based on the + * Iris dataset).

    + *

    + * After that it trains the model based on the specified data using KMeans algorithm.

    + *

    + * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster does + * this point belong to, compares prediction to expected outcome (ground truth), and builds + * confusion matrix.

    + *

    + * You can change the test data used in this example and re-run it to explore this algorithm further.

    + */ +public class SVMExportImportExample { + /** + * Run example. + */ + public static void main(String[] args) throws IOException { + System.out.println(); + System.out.println(">>> SVM Binary classification model over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println("\n>>> Ignite grid started."); + + IgniteCache dataCache = null; + Path jsonMdlPath = null; + try { + dataCache = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.TWO_CLASSED_IRIS); + + SVMLinearClassificationTrainer trainer = new SVMLinearClassificationTrainer(); + + Vectorizer vectorizer = new DummyVectorizer() + .labeled(Vectorizer.LabelCoordinate.FIRST); + + SVMLinearClassificationModel mdl = trainer.fit(ignite, dataCache, vectorizer); + + System.out.println("\n>>> Exported SVM model: " + mdl); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for exported SVM model: " + accuracy); + + jsonMdlPath = Files.createTempFile(null, null); + mdl.toJSON(jsonMdlPath); + + SVMLinearClassificationModel modelImportedFromJSON = SVMLinearClassificationModel.fromJSON(jsonMdlPath); + + System.out.println("\n>>> Imported SVM model: " + modelImportedFromJSON); + + accuracy = Evaluator.evaluate( + dataCache, + modelImportedFromJSON, + vectorizer, + MetricName.ACCURACY + ); + + System.out.println("\n>>> Accuracy for imported SVM model: " + accuracy); + + System.out.println("\n>>> SVM Binary classification model over cache based dataset usage example completed."); + } + finally { + if (dataCache != null) + dataCache.destroy(); + if (jsonMdlPath != null) + Files.deleteIfExists(jsonMdlPath); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java index 3340ed9d33e0a..d03bb966f6a7d 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java @@ -34,7 +34,7 @@ import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.sparkmodelparser.SparkModelParser; import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Run Decision Tree model loaded from snappy.parquet file. The snappy.parquet file was generated by Spark MLLib @@ -69,7 +69,7 @@ public static void main(String[] args) throws FileNotFoundException { final Vectorizer vectorizer = new DummyVectorizer(0, 5, 6, 4).labeled(1); - DecisionTreeNode mdl = (DecisionTreeNode)SparkModelParser.parse( + DecisionTreeModel mdl = (DecisionTreeModel)SparkModelParser.parse( SPARK_MDL_PATH, SupportedSparkModels.DECISION_TREE, env diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java index 9c36198b2cf38..5fd446140f38a 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeRegressionFromSparkExample.java @@ -35,7 +35,7 @@ import org.apache.ignite.ml.sparkmodelparser.SparkModelParser; import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels; import org.apache.ignite.ml.structures.LabeledVector; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Run Decision tree regression model loaded from snappy.parquet file. The snappy.parquet file was generated by Spark @@ -69,7 +69,7 @@ public static void main(String[] args) throws FileNotFoundException { final Vectorizer vectorizer = new DummyVectorizer(0, 1, 5, 6).labeled(4); - DecisionTreeNode mdl = (DecisionTreeNode)SparkModelParser.parse( + DecisionTreeModel mdl = (DecisionTreeModel)SparkModelParser.parse( SPARK_MDL_PATH, SupportedSparkModels.DECISION_TREE_REGRESSION, env diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java index c24091c253b39..233cb13b9135b 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExample.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example that shows how to use String Encoder preprocessor to encode features presented as a strings. @@ -73,7 +73,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, encoderPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java index d9482a5123477..7270b03e4016f 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/EncoderExampleWithNormalization.java @@ -32,7 +32,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example that shows how to combine together two preprocessors: String Encoder preprocessor to encode features presented as a strings @@ -80,7 +80,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, normalizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java index d97c49c78411a..3547d7e20106c 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/LabelEncoderExample.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example that shows how to use Label Encoder preprocessor to encode labels presented as a strings. @@ -79,7 +79,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, lbEncoderPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java index 511eb0501c181..c572d81038741 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/BostonHousePricesPredictionExample.java @@ -105,7 +105,7 @@ public static void main(String[] args) throws IOException { private static String toString(LinearRegressionModel mdl) { BiFunction formatter = (idx, val) -> String.format("%.2f*f%d", val, idx); - Vector weights = mdl.getWeights(); + Vector weights = mdl.weights(); StringBuilder sb = new StringBuilder(formatter.apply(0, weights.get(0))); for (int fid = 1; fid < weights.size(); fid++) { @@ -114,7 +114,7 @@ private static String toString(LinearRegressionModel mdl) { .append(formatter.apply(fid, Math.abs(w))); } - double intercept = mdl.getIntercept(); + double intercept = mdl.intercept(); sb.append(" ").append(intercept > 0 ? "+" : "-").append(" ") .append(String.format("%.2f", Math.abs(intercept))); return sb.toString(); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java index e6a4461ca64e1..93dc0513ebf80 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java @@ -30,7 +30,7 @@ import org.apache.ignite.ml.selection.scoring.metric.MetricName; import org.apache.ignite.ml.structures.LabeledVector; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Run decision tree classification with @@ -75,7 +75,7 @@ public static void main(String... args) { LabeledDummyVectorizer vectorizer = new LabeledDummyVectorizer<>(); - CrossValidation> scoreCalculator + CrossValidation> scoreCalculator = new CrossValidation<>(); double[] accuracyScores = scoreCalculator diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java index 543e211f06b3f..68058b75b9eb3 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLInferenceExample.java @@ -30,7 +30,7 @@ import org.apache.ignite.ml.sql.SQLFunctions; import org.apache.ignite.ml.sql.SqlDatasetBuilder; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import static org.apache.ignite.examples.ml.sql.DecisionTreeClassificationTrainerSQLTableExample.loadTitanicDatasets; @@ -101,7 +101,7 @@ public static void main(String[] args) throws IOException { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); System.out.println(">>> Perform training..."); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( new SqlDatasetBuilder(ignite, "SQL_PUBLIC_TITANIC_TRAIN"), new BinaryObjectVectorizer<>("pclass", "age", "sibsp", "parch", "fare") .withFeature("sex", BinaryObjectVectorizer.Mapping.create().map("male", 1.0).defaultValue(0.0)) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java index 083608ee23e67..d05d1a91d814f 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/sql/DecisionTreeClassificationTrainerSQLTableExample.java @@ -34,7 +34,7 @@ import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.sql.SqlDatasetBuilder; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example of using distributed {@link DecisionTreeClassificationTrainer} on a data stored in SQL table. @@ -101,7 +101,7 @@ public static void main(String[] args) throws IgniteCheckedException, IOExceptio DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); System.out.println(">>> Perform training..."); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( new SqlDatasetBuilder(ignite, "SQL_PUBLIC_TITANIC_TRAIN"), new BinaryObjectVectorizer<>("pclass", "age", "sibsp", "parch", "fare") .withFeature("sex", BinaryObjectVectorizer.Mapping.create().map("male", 1.0).defaultValue(0.0)) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java index 600f4a595e0a4..b1cf23e2de2dd 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java @@ -28,7 +28,7 @@ import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.structures.LabeledVector; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Example of using distributed {@link DecisionTreeClassificationTrainer}. @@ -75,7 +75,7 @@ public static void main(String... args) { // Train decision tree model. LabeledDummyVectorizer vectorizer = new LabeledDummyVectorizer<>(); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, trainingSet, vectorizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java index 1a1977124879b..5cfb828c8bd87 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java @@ -25,7 +25,7 @@ import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.structures.LabeledVector; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import org.apache.ignite.ml.tree.DecisionTreeRegressionTrainer; /** @@ -70,7 +70,7 @@ public static void main(String... args) { DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(10, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit(ignite, trainingSet, new LabeledDummyVectorizer<>()); + DecisionTreeModel mdl = trainer.fit(ignite, trainingSet, new LabeledDummyVectorizer<>()); System.out.println(">>> Decision tree regression model: " + mdl); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java index a2eaf47d46636..7e6c5d3f723d6 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java @@ -22,12 +22,12 @@ import org.apache.ignite.Ignition; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; -import org.apache.ignite.ml.trainers.DatasetTrainer; import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; import org.jetbrains.annotations.NotNull; @@ -58,11 +58,11 @@ public static void main(String... args) { trainingSet = fillTrainingData(ignite, trainingSetCfg); // Create classification trainer. - DatasetTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.) + GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.) .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.1)); // Train decision tree model. - ModelsComposition mdl = trainer.fit( + GDBModel mdl = trainer.fit( ignite, trainingSet, new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java index 09dd708b0bb89..a6ea135aa8b5d 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java @@ -22,14 +22,12 @@ import org.apache.ignite.Ignition; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; -import org.apache.ignite.ml.inference.Model; -import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; -import org.apache.ignite.ml.trainers.DatasetTrainer; import org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer; import org.jetbrains.annotations.NotNull; @@ -60,11 +58,11 @@ public static void main(String... args) { trainingSet = fillTrainingData(ignite, trainingSetCfg); // Create regression trainer. - DatasetTrainer trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.) + GDBTrainer trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.) .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.001)); // Train decision tree model. - Model mdl = trainer.fit( + GDBModel mdl = trainer.fit( ignite, trainingSet, new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java index b9006f536505f..b8e1d00f690df 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_11_Boosting.java @@ -21,7 +21,8 @@ import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; -import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.boosting.GDBModel; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; import org.apache.ignite.ml.composition.boosting.convergence.median.MedianOfMedianConvergenceCheckerFactory; import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; @@ -36,7 +37,6 @@ import org.apache.ignite.ml.selection.scoring.metric.MetricName; import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; -import org.apache.ignite.ml.trainers.DatasetTrainer; import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; /** @@ -102,11 +102,11 @@ public static void main(String[] args) { ); // Create classification trainer. - DatasetTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(0.5, 500, 4, 0.) + GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(0.5, 500, 4, 0.) .withCheckConvergenceStgyFactory(new MedianOfMedianConvergenceCheckerFactory(0.1)); // Train decision tree model. - ModelsComposition mdl = trainer.fit( + GDBModel mdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java index b6df5d68a637a..97ccb5835a721 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java @@ -27,7 +27,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Usage of {@link DecisionTreeClassificationTrainer} to predict death in the disaster. @@ -56,7 +56,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, vectorizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java index 094a966e35086..a020dbea669f3 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java @@ -29,7 +29,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Usage of {@link ImputerTrainer} to fill missed data ({@code Double.NaN}) values in the chosen columns. @@ -66,7 +66,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, vectorizer diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java index 68b05a46d8fd5..c97ee387310e2 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Let's add two categorial features "sex", "embarked" to predict more precisely than in {@link Step_1_Read_and_Learn}. @@ -80,7 +80,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, imputingPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java index 206d2dcaa06b5..1355979d228fa 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Let's add two categorial features "sex", "embarked" to predict more precisely than in {@link @@ -83,7 +83,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, imputingPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java index 1d85a14ac74c2..f4763a1f2b66b 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * Add yet two numerical features "age", "fare" to improve our model over {@link Step_3_Categorial}. @@ -79,7 +79,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, imputingPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java index dfb6de0c7d543..05d0137e05b2e 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java @@ -33,7 +33,7 @@ import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * {@link MinMaxScalerTrainer} and {@link NormalizationTrainer} are used in this example due to different values @@ -97,7 +97,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, normalizationPreprocessor diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java index e104c510b348a..a60a8bac9812b 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java @@ -35,7 +35,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * The highest accuracy in the previous example ({@link Step_6_KNN}) is the result of @@ -103,7 +103,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0); // Train decision tree model. - DecisionTreeNode mdl = trainer.fit( + DecisionTreeModel mdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java index 0da797d06b55f..20f4a7287b7bd 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java @@ -38,7 +38,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation will be used in this example. @@ -126,7 +126,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(maxDeep, 0); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); double[] scores = scoreCalculator @@ -167,7 +167,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(bestMaxDeep, 0); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java index 5b6271414541e..963e1b7fed1b0 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java @@ -40,7 +40,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -119,7 +119,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -156,7 +156,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java index 6be849624382a..1aa2d576946cf 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid_and_pipeline.java @@ -36,7 +36,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -91,7 +91,7 @@ public static void main(String[] args) { // Tune hyper-parameters with K-fold Cross-Validation on the split training set. - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java index d7e2f27aad900..c489fc962bba7 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_13_RandomSearch.java @@ -42,7 +42,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -123,7 +123,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -166,7 +166,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java index 017f123674494..b63bf9643be63 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_14_Parallel_Brute_Force_Search.java @@ -45,7 +45,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -126,7 +126,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -168,7 +168,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java index 3a3e9e8cdddfb..ac6c1eb3c988a 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_15_Parallel_Random_Search.java @@ -45,7 +45,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -125,7 +125,7 @@ public static void main(String[] args) { // Tune hyper-parameters with K-fold Cross-Validation on the split training set. DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -171,7 +171,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java index bee51e4b1e97f..408eb48289c21 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_16_Genetic_Programming_Search.java @@ -42,7 +42,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -123,7 +123,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -162,7 +162,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java index 34a8158dec630..a9d39bd309219 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/hyperparametertuning/Step_17_Parallel_Genetic_Programming_Search.java @@ -45,7 +45,7 @@ import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter; import org.apache.ignite.ml.selection.split.TrainTestSplit; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; /** * To choose the best hyper-parameters the cross-validation with {@link ParamGrid} will be used in this example. @@ -126,7 +126,7 @@ public static void main(String[] args) { DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer(); - CrossValidation scoreCalculator + CrossValidation scoreCalculator = new CrossValidation<>(); ParamGrid paramGrid = new ParamGrid() @@ -168,7 +168,7 @@ public static void main(String[] args) { -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams)); // Train decision tree model. - DecisionTreeNode bestMdl = trainer.fit( + DecisionTreeModel bestMdl = trainer.fit( ignite, dataCache, split.getTrainFilter(), diff --git a/modules/ml/pom.xml b/modules/ml/pom.xml index 338d2542e8269..37d9c107a0b93 100644 --- a/modules/ml/pom.xml +++ b/modules/ml/pom.xml @@ -160,6 +160,31 @@ slf4j-api 1.7.7 + + javax.xml.bind + jaxb-api + 2.3.0 + + + com.sun.xml.bind + jaxb-core + 2.3.0 + + + com.sun.xml.bind + jaxb-impl + 2.3.0 + + + javax.activation + activation + 1.1.1 + + + com.fasterxml.jackson.core + jackson-databind + 2.10.3 + diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java index 8d349a197fa3f..373da3af67faa 100644 --- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java +++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.NavigableMap; import java.util.Scanner; import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; @@ -34,7 +33,7 @@ import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.clustering.kmeans.KMeansModel; import org.apache.ignite.ml.composition.ModelsComposition; -import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.GDBModel; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator; import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator; @@ -49,9 +48,7 @@ import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; import org.apache.ignite.ml.regressions.logistic.LogisticRegressionModel; import org.apache.ignite.ml.svm.SVMLinearClassificationModel; -import org.apache.ignite.ml.tree.DecisionTreeConditionalNode; -import org.apache.ignite.ml.tree.DecisionTreeLeafNode; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.NodeData; import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.SimpleGroup; @@ -66,6 +63,8 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.ml.tree.NodeData.buildDecisionTreeModel; + /** Parser of Spark models. */ public class SparkModelParser { /** @@ -497,7 +496,7 @@ private static Model loadGBTClassifierModel(String pathToMdl, String pathToMdlMe final List> models = new ArrayList<>(); nodesByTreeId.forEach((key, nodes) -> models.add(buildDecisionTreeModel(nodes))); - return new GDBTrainer.GDBModel(models, new WeightedPredictionsAggregator(treeWeights), lbMapper); + return new GDBModel(models, new WeightedPredictionsAggregator(treeWeights), lbMapper); } catch (IOException e) { String msg = "Error reading parquet file: " + e.getMessage(); @@ -604,42 +603,13 @@ private static Model loadDecisionTreeModel(String pathToMdl, LearningEnvironment return null; } - /** - * Builds the DT model by the given sorted map of nodes. - * - * @param nodes The sorted map of nodes. - */ - private static DecisionTreeNode buildDecisionTreeModel(Map nodes) { - DecisionTreeNode mdl = null; - if (!nodes.isEmpty()) { - NodeData rootNodeData = (NodeData)((NavigableMap)nodes).firstEntry().getValue(); - mdl = buildTree(nodes, rootNodeData); - return mdl; - } - return mdl; - } - - /** - * Build tree or sub-tree based on indices and nodes sorted map as a dictionary. - * - * @param nodes The sorted map of nodes. - * @param rootNodeData Root node data. - */ - @NotNull private static DecisionTreeNode buildTree(Map nodes, - NodeData rootNodeData) { - return rootNodeData.isLeafNode ? new DecisionTreeLeafNode(rootNodeData.prediction) : new DecisionTreeConditionalNode(rootNodeData.featureIdx, - rootNodeData.threshold, - buildTree(nodes, nodes.get(rootNodeData.rightChildId)), - buildTree(nodes, nodes.get(rootNodeData.leftChildId)), - null); - } /** * Form the node data according data in parquet row. * * @param g The given group presenting the node data from Spark DT model. */ - @NotNull private static SparkModelParser.NodeData extractNodeDataFromParquetRow(SimpleGroup g) { + @NotNull private static NodeData extractNodeDataFromParquetRow(SimpleGroup g) { NodeData nodeData = new NodeData(); nodeData.id = g.getInteger(0, 0); @@ -888,43 +858,4 @@ private static Vector readCoefficients(SimpleGroup g) { } return coefficients; } - - /** - * Presenting data from one parquet row filled with NodeData in Spark DT model. - */ - private static class NodeData { - /** Id. */ - int id; - - /** Prediction. */ - double prediction; - - /** Left child id. */ - int leftChildId; - - /** Right child id. */ - int rightChildId; - - /** Threshold. */ - double threshold; - - /** Feature index. */ - int featureIdx; - - /** Is leaf node. */ - boolean isLeafNode; - - /** {@inheritDoc} */ - @Override public String toString() { - return "NodeData{" + - "id=" + id + - ", prediction=" + prediction + - ", leftChildId=" + leftChildId + - ", rightChildId=" + rightChildId + - ", threshold=" + threshold + - ", featureIdx=" + featureIdx + - ", isLeafNode=" + isLeafNode + - '}'; - } - } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java index fda08b399b588..2546d0c70cbfe 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/gmm/GmmModel.java @@ -19,6 +19,7 @@ import java.util.Collections; import java.util.List; +import com.fasterxml.jackson.annotation.JsonIgnore; import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.environment.deploy.DeployableObject; import org.apache.ignite.ml.math.primitives.vector.Vector; @@ -47,12 +48,17 @@ public GmmModel(Vector componentProbs, List di super(componentProbs, distributions); } + /** */ + public GmmModel() { + } + /** {@inheritDoc} */ @Override public Double predict(Vector input) { return (double)likelihood(input).maxElement().index(); } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.emptyList(); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java index 42b0823dc39cc..4fba73936f8af 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java @@ -22,8 +22,8 @@ /** Base interface for all clusterization models. */ public interface ClusterizationModel extends IgniteModel { /** Gets the clusters count. */ - public int getAmountOfClusters(); + public int amountOfClusters(); /** Get cluster centers. */ - public P[] getCenters(); + public P[] centers(); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java index f1f677f63a861..de473c914fb70 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java @@ -17,28 +17,41 @@ package org.apache.ignite.ml.clustering.kmeans; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.UUID; import java.util.stream.Collectors; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONWritable; import org.apache.ignite.ml.math.Tracer; import org.apache.ignite.ml.math.distances.DistanceMeasure; +import org.apache.ignite.ml.math.distances.EuclideanDistance; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector; import org.apache.ignite.ml.util.ModelTrace; /** * This class encapsulates result of clusterization by KMeans algorithm. */ public final class KMeansModel implements ClusterizationModel, Exportable, - DeployableObject { + JSONWritable, DeployableObject { /** Centers of clusters. */ - private final Vector[] centers; + private Vector[] centers; /** Distance measure. */ - private final DistanceMeasure distanceMeasure; + private DistanceMeasure distanceMeasure = new EuclideanDistance(); /** * Construct KMeans model with given centers and distanceMeasure measure. @@ -51,18 +64,45 @@ public KMeansModel(Vector[] centers, DistanceMeasure distanceMeasure) { this.distanceMeasure = distanceMeasure; } + /** {@inheritDoc} */ + private KMeansModel() { + + } + /** Distance measure. */ public DistanceMeasure distanceMeasure() { return distanceMeasure; } /** {@inheritDoc} */ - @Override public int getAmountOfClusters() { + @Override public int amountOfClusters() { return centers.length; } + /** + * Set up the centroids. + * + * @param centers The parameter value. + * @return Model with new centers parameter value. + */ + public KMeansModel withCentroids(Vector[] centers) { + this.centers = centers; + return this; + } + + /** + * Set up the distance measure. + * + * @param distanceMeasure The parameter value. + * @return Model with new distance measure parameter value. + */ + public KMeansModel withDistanceMeasure(DistanceMeasure distanceMeasure) { + this.distanceMeasure = distanceMeasure; + return this; + } + /** {@inheritDoc} */ - @Override public Vector[] getCenters() { + @Override public Vector[] centers() { return Arrays.copyOf(centers, centers.length); } @@ -119,12 +159,11 @@ public DistanceMeasure distanceMeasure() { /** {@inheritDoc} */ @Override public String toString(boolean pretty) { - String measureName = distanceMeasure.getClass().getSimpleName(); List centersList = Arrays.stream(centers).map(x -> Tracer.asAscii(x, "%.4f", false)) .collect(Collectors.toList()); return ModelTrace.builder("KMeansModel", pretty) - .addField("distance measure", measureName) + .addField("distance measure", distanceMeasure.toString()) .addField("centroids", centersList) .toString(); } @@ -133,4 +172,76 @@ public DistanceMeasure distanceMeasure() { @Override public List getDependencies() { return Collections.singletonList(distanceMeasure); } + + /** Loads KMeansModel from JSON file. */ + public static KMeansModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + KMeansJSONExportModel exportModel; + try { + exportModel = mapper + .readValue(new File(path.toAbsolutePath().toString()), KMeansJSONExportModel.class); + + return exportModel.convert(); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + // TODO: https://github.com/apache/spark/blob/master/mllib/src/main/scala/org/apache/spark/mllib/pmml/export/KMeansPMMLModelExport.scala + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + + try { + KMeansJSONExportModel exportModel = new KMeansJSONExportModel(System.currentTimeMillis(), "ann_" + UUID.randomUUID().toString(), KMeansModel.class.getSimpleName()); + List listOfCenters = new ArrayList<>(); + for (int i = 0; i < centers.length; i++) { + listOfCenters.add(centers[i].asArray()); + } + + exportModel.mdlCenters = listOfCenters; + exportModel.distanceMeasure = distanceMeasure; + + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, exportModel); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** */ + public static class KMeansJSONExportModel extends JSONModel { + /** Centers of clusters. */ + public List mdlCenters; + + /** Distance measure. */ + public DistanceMeasure distanceMeasure; + + /** */ + public KMeansJSONExportModel(Long timestamp, String uid, String modelClass) { + super(timestamp, uid, modelClass); + } + + /** */ + @JsonCreator + public KMeansJSONExportModel() { + } + + /** {@inheritDoc} */ + @Override public KMeansModel convert() { + KMeansModel mdl = new KMeansModel(); + Vector[] centers = new DenseVector[mdlCenters.size()]; + for (int i = 0; i < mdlCenters.size(); i++) { + centers[i] = VectorUtils.of(mdlCenters.get(i)); + } + + DistanceMeasure distanceMeasure = this.distanceMeasure; + + mdl.withCentroids(centers); + mdl.withDistanceMeasure(distanceMeasure); + return mdl; + } + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java index caec370e63472..c36dd341c81f4 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java @@ -102,7 +102,7 @@ public class KMeansTrainer extends SingleLabelDatasetTrainer { return getLastTrainedModelOrThrowEmptyDatasetException(mdl); centers = Optional.ofNullable(mdl) - .map(KMeansModel::getCenters) + .map(KMeansModel::centers) .orElseGet(() -> initClusterCentersRandomly(dataset, k)); boolean converged = false; @@ -139,7 +139,7 @@ public class KMeansTrainer extends SingleLabelDatasetTrainer { /** {@inheritDoc} */ @Override public boolean isUpdateable(KMeansModel mdl) { - return mdl.getCenters().length == k && mdl.distanceMeasure().equals(distance); + return mdl.centers().length == k && mdl.distanceMeasure().equals(distance); } /** diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java index 3942b9ee907d4..190203c1fbe44 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java @@ -19,6 +19,8 @@ import java.util.Collections; import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnore; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.IgniteModel; @@ -30,17 +32,17 @@ /** * Model consisting of several models and prediction aggregation strategy. */ -public class ModelsComposition implements IgniteModel, Exportable, +public class ModelsComposition> implements IgniteModel, Exportable, DeployableObject { /** * Predictions aggregator. */ - private final PredictionsAggregator predictionsAggregator; + protected PredictionsAggregator predictionsAggregator; /** * Models. */ - private final List> models; + protected List models; /** * Constructs a new instance of composition of models. @@ -48,11 +50,14 @@ public class ModelsComposition implements IgniteModel, Exportabl * @param models Basic models. * @param predictionsAggregator Predictions aggregator. */ - public ModelsComposition(List> models, PredictionsAggregator predictionsAggregator) { + public ModelsComposition(List models, PredictionsAggregator predictionsAggregator) { this.predictionsAggregator = predictionsAggregator; this.models = Collections.unmodifiableList(models); } + public ModelsComposition() { + } + /** * Applies containing models to features and aggregate them to one prediction. * @@ -78,7 +83,7 @@ public PredictionsAggregator getPredictionsAggregator() { /** * Returns containing models. */ - public List> getModels() { + public List getModels() { return models; } @@ -102,6 +107,7 @@ public List> getModels() { } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.singletonList(predictionsAggregator); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java index ba71afa14cd25..c49638cc51e2d 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java @@ -33,7 +33,7 @@ public class ModelsCompositionFormat implements Serializable { private static final long serialVersionUID = 9115341364082681837L; /** Models. */ - private List> models; + private List> models; /** Predictions aggregator. */ private PredictionsAggregator predictionsAggregator; @@ -44,13 +44,13 @@ public class ModelsCompositionFormat implements Serializable { * @param models Models. * @param predictionsAggregator Predictions aggregator. */ - public ModelsCompositionFormat(List> models,PredictionsAggregator predictionsAggregator) { + public ModelsCompositionFormat(List> models, PredictionsAggregator predictionsAggregator) { this.models = models; this.predictionsAggregator = predictionsAggregator; } /** */ - public List> models() { + public List> models() { return models; } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java index 44137f7da934f..45b43181f258e 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java @@ -103,7 +103,7 @@ public List> learnModels(DatasetBuilder * @param Type of a value in {@code upstream} data. * @return Updated models list. */ - public List> update(GDBTrainer.GDBModel mdlToUpdate, + public List> update(GDBModel mdlToUpdate, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { if (trainerEnvironment == null) throw new IllegalStateException("Learning environment builder is not set."); @@ -148,7 +148,7 @@ public List> update(GDBTrainer.GDBModel mdlTo * @param mdlToUpdate Model to update. * @return List of already learned models. */ - @NotNull protected List> initLearningState(GDBTrainer.GDBModel mdlToUpdate) { + @NotNull protected List> initLearningState(GDBModel mdlToUpdate) { List> models = new ArrayList<>(); if (mdlToUpdate != null) { models.addAll(mdlToUpdate.getModels()); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBModel.java new file mode 100644 index 0000000000000..35cb70e77c54b --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBModel.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.composition.boosting; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import org.apache.ignite.ml.IgniteModel; +import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; +import org.apache.ignite.ml.math.functions.IgniteFunction; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.tree.DecisionTreeModel; + +/** + * GDB model. + */ +public final class GDBModel extends ModelsComposition implements JSONWritable { + /** Serial version uid. */ + private static final long serialVersionUID = 3476661240155508004L; + + /** Internal to external lbl mapping. */ + @JsonIgnore private IgniteFunction internalToExternalLblMapping; + + /** + * Creates an instance of GDBModel. + * + * @param models Models. + * @param predictionsAggregator Predictions aggregator. + * @param internalToExternalLblMapping Internal to external lbl mapping. + */ + public GDBModel(List> models, + WeightedPredictionsAggregator predictionsAggregator, + IgniteFunction internalToExternalLblMapping) { + + super((List) models, predictionsAggregator); + this.internalToExternalLblMapping = internalToExternalLblMapping; + } + + private GDBModel() { + } + + public GDBModel withLblMapping(IgniteFunction internalToExternalLblMapping) { + this.internalToExternalLblMapping = internalToExternalLblMapping; + return this; + } + + /** {@inheritDoc} */ + @Override public Double predict(Vector features) { + if (internalToExternalLblMapping == null) { + throw new IllegalArgumentException("The mapping should not be empty. Initialize it with apropriate function. "); + } else { + return internalToExternalLblMapping.apply(super.predict(features)); + } + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(GDBModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(GDBModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", GDBModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads RandomForestModel from JSON file. */ + public static GDBModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + GDBModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, GDBModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), GDBModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java index ad35d809d4839..a36feec1a8d1b 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.ml.IgniteModel; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerFactory; import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory; import org.apache.ignite.ml.composition.boosting.loss.Loss; @@ -34,7 +33,6 @@ import org.apache.ignite.ml.environment.LearningEnvironmentBuilder; import org.apache.ignite.ml.environment.logging.MLLogger; import org.apache.ignite.ml.knn.regression.KNNRegressionTrainer; -import org.apache.ignite.ml.math.functions.IgniteFunction; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.preprocessing.Preprocessor; import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; @@ -57,7 +55,7 @@ * * But in practice Decision Trees is most used regressors (see: {@link DecisionTreeRegressionTrainer}). */ -public abstract class GDBTrainer extends DatasetTrainer { +public abstract class GDBTrainer extends DatasetTrainer { /** Gradient step. */ private final double gradientStep; @@ -87,13 +85,13 @@ public GDBTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) { } /** {@inheritDoc} */ - @Override public ModelsComposition fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, + @Override public GDBModel fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, Preprocessor preprocessor) { return updateModel(null, datasetBuilder, preprocessor); } /** {@inheritDoc} */ - @Override protected ModelsComposition updateModel(ModelsComposition mdl, + @Override protected GDBModel updateModel(GDBModel mdl, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { if (!learnLabels(datasetBuilder, preprocessor)) @@ -121,7 +119,7 @@ public GDBTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) { List> models; if (mdl != null) - models = stgy.update((GDBModel) mdl, datasetBuilder, preprocessor); + models = stgy.update(mdl, datasetBuilder, preprocessor); else models = stgy.learnModels(datasetBuilder, preprocessor); @@ -136,7 +134,7 @@ public GDBTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) { } /** {@inheritDoc} */ - @Override public boolean isUpdateable(ModelsComposition mdl) { + @Override public boolean isUpdateable(GDBModel mdl) { return mdl instanceof GDBModel; } @@ -239,35 +237,4 @@ public GDBTrainer withCheckConvergenceStgyFactory(ConvergenceCheckerFactory fact protected GDBLearningStrategy getLearningStrategy() { return new GDBLearningStrategy(); } - - /** - * GDB model. - */ - public static final class GDBModel extends ModelsComposition { - /** Serial version uid. */ - private static final long serialVersionUID = 3476661240155508004L; - - /** Internal to external lbl mapping. */ - private final IgniteFunction internalToExternalLblMapping; - - /** - * Creates an instance of GDBModel. - * - * @param models Models. - * @param predictionsAggregator Predictions aggregator. - * @param internalToExternalLblMapping Internal to external lbl mapping. - */ - public GDBModel(List> models, - WeightedPredictionsAggregator predictionsAggregator, - IgniteFunction internalToExternalLblMapping) { - - super(models, predictionsAggregator); - this.internalToExternalLblMapping = internalToExternalLblMapping; - } - - /** {@inheritDoc} */ - @Override public Double predict(Vector features) { - return internalToExternalLblMapping.apply(super.predict(features)); - } - } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java index d996a2aeb6d84..1490b7c82dccc 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/PredictionsAggregator.java @@ -17,11 +17,20 @@ package org.apache.ignite.ml.composition.predictionsaggregator; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; import org.apache.ignite.ml.math.functions.IgniteFunction; /** * Predictions aggregator interface. */ +@JsonTypeInfo( use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type") +@JsonSubTypes( + { + @JsonSubTypes.Type(value = MeanValuePredictionsAggregator.class, name = "MeanValuePredictionsAggregator"), + @JsonSubTypes.Type(value = OnMajorityPredictionsAggregator.class, name = "OnMajorityPredictionsAggregator"), + @JsonSubTypes.Type(value = WeightedPredictionsAggregator.class, name = "WeightedPredictionsAggregator"), + }) public interface PredictionsAggregator extends IgniteFunction { /** * Represents aggregator as String. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java index 555ff3c1eaed6..257c635f79c3e 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java @@ -25,10 +25,13 @@ */ public final class WeightedPredictionsAggregator implements PredictionsAggregator { /** Weights for predictions. */ - private final double[] weights; + private double[] weights; /** Bias. */ - private final double bias; + private double bias; + + public WeightedPredictionsAggregator() { + } /** * Constructs WeightedPredictionsAggregator instance. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModel.java new file mode 100644 index 0000000000000..ac733988c32c4 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModel.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import org.apache.ignite.ml.IgniteModel; + +/** Basic class for all non-trivial model data serialization. */ +public abstract class JSONModel { + /** Basic Ignite version. */ + @JsonIgnore + public static final String JSON_MODEL_FORMAT_VERSION = "1"; + + /** Ignite version. */ + public String formatVersion = JSON_MODEL_FORMAT_VERSION; + + /** Timestamp in ms from System.currentTimeMillis() method. */ + public Long timestamp; + + /** Unique string indetifier. */ + public String uid; + + /** String description of model class. */ + public String modelClass; + + /** Convert JSON string to IgniteModel object. */ + public abstract IgniteModel convert(); + + /** */ + public JSONModel(Long timestamp, String uid, String modelClass) { + this.timestamp = timestamp; + this.uid = uid; + this.modelClass = modelClass; + } + + @JsonCreator + public JSONModel() { + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModelMixIn.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModelMixIn.java new file mode 100644 index 0000000000000..843b5942f7be9 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONModelMixIn.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import com.fasterxml.jackson.databind.annotation.JsonAppend; + +/** Just a mixin class to add a few configuration properties. */ +@JsonAppend( + attrs = { + @JsonAppend.Attr(value = "formatVersion"), + @JsonAppend.Attr(value = "timestamp"), + @JsonAppend.Attr(value = "uid"), + @JsonAppend.Attr(value = "modelClass") + } +) +public class JSONModelMixIn { } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONWritable.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONWritable.java new file mode 100644 index 0000000000000..fcc30379dded9 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JSONWritable.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; + +public interface JSONWritable { + default void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + try { + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JacksonHelper.java b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JacksonHelper.java new file mode 100644 index 0000000000000..654ade44d774d --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/inference/json/JacksonHelper.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.inference.json; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.LinkedHashMap; +import java.util.Map; +import com.fasterxml.jackson.databind.ObjectMapper; + +public class JacksonHelper { + public static void readAndValidateBasicJsonModelProperties(Path path, ObjectMapper mapper, String className) throws IOException { + Map jsonAsMap = mapper.readValue(new File(path.toAbsolutePath().toString()), LinkedHashMap.class); + String formatVersion = jsonAsMap.get("formatVersion").toString(); + Long timestamp = (Long) jsonAsMap.get("timestamp"); + String uid = jsonAsMap.get("uid").toString(); + String modelClass = jsonAsMap.get("modelClass").toString(); + + if (!modelClass.equals(className)) { + throw new IllegalArgumentException("You are trying to load " + modelClass + " model to " + className); + } + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java index 2ad0c46aedd57..922630ea1615a 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java @@ -180,6 +180,17 @@ public DistanceMeasure getDistanceMeasure() { return distanceMeasure; } + /** */ + public int getK() { + return k; + } + + /** */ + public boolean isWeighted() { + return weighted; + } + + /** {@inheritDoc} */ @Override public int hashCode() { int res = 1; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java index 2c820b7423af4..6015900259809 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java @@ -17,33 +17,47 @@ package org.apache.ignite.ml.knn.ann; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exporter; +import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONWritable; import org.apache.ignite.ml.knn.NNClassificationModel; +import org.apache.ignite.ml.math.distances.DistanceMeasure; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.structures.LabeledVector; import org.apache.ignite.ml.structures.LabeledVectorSet; import org.apache.ignite.ml.util.ModelTrace; -import org.jetbrains.annotations.NotNull; /** * ANN model to predict labels in multi-class classification task. */ -public final class ANNClassificationModel extends NNClassificationModel { +public final class ANNClassificationModel extends NNClassificationModel implements JSONWritable, DeployableObject { /** */ private static final long serialVersionUID = -127312378991350345L; /** The labeled set of candidates. */ - private final LabeledVectorSet candidates; + private LabeledVectorSet candidates; /** Centroid statistics. */ - private final ANNClassificationTrainer.CentroidStat centroindsStat; + private ANNClassificationTrainer.CentroidStat centroindsStat; /** * Build the model based on a candidates set. @@ -56,6 +70,10 @@ public ANNClassificationModel(LabeledVectorSet centers, this.centroindsStat = centroindsStat; } + /** */ + private ANNClassificationModel() { + } + /** */ public LabeledVectorSet getCandidates() { return candidates; @@ -94,7 +112,7 @@ private List findKNearestNeighbors(Vector v) { * @param distanceIdxPairs The distance map. * @return K-nearest neighbors. */ - @NotNull private LabeledVector[] getKClosestVectors( + private LabeledVector[] getKClosestVectors( TreeMap> distanceIdxPairs) { LabeledVector[] res; @@ -129,7 +147,7 @@ private List findKNearestNeighbors(Vector v) { * @return Key - distanceMeasure from given features before features with idx stored in value. Value is presented * with Set because there can be a few vectors with the same distance. */ - @NotNull private TreeMap> getDistances(Vector v) { + private TreeMap> getDistances(Vector v) { TreeMap> distanceIdxPairs = new TreeMap<>(); for (int i = 0; i < candidates.rowSize(); i++) { @@ -203,4 +221,104 @@ private double classify(List neighbors, Vector v, boolean weighte .addField("amount of candidates", String.valueOf(candidates.rowSize())) .toString(); } + + /** {@inheritDoc} */ + @JsonIgnore + @Override public List getDependencies() { + return Collections.emptyList(); + } + + /** Loads ANNClassificationModel from JSON file. */ + public static ANNClassificationModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + ANNJSONExportModel exportModel; + try { + exportModel = mapper + .readValue(new File(path.toAbsolutePath().toString()), ANNJSONExportModel.class); + + return exportModel.convert(); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + + try { + ANNJSONExportModel exportModel = new ANNJSONExportModel(System.currentTimeMillis(), "ann_" + UUID.randomUUID().toString(), ANNClassificationModel.class.getSimpleName()); + List listOfCandidates = new ArrayList<>(); + ProbableLabel[] labels = new ProbableLabel[candidates.rowSize()]; + for (int i = 0; i < candidates.rowSize(); i++) { + labels[i] = (ProbableLabel) candidates.getRow(i).getLb(); + listOfCandidates.add(candidates.features(i).asArray()); + } + + exportModel.candidateFeatures = listOfCandidates; + exportModel.distanceMeasure = distanceMeasure; + exportModel.k = k; + exportModel.weighted = weighted; + exportModel.candidateLabels = labels; + exportModel.centroindsStat = centroindsStat; + + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, exportModel); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** */ + public static class ANNJSONExportModel extends JSONModel { + /** Centers of clusters. */ + public List candidateFeatures; + + public ProbableLabel[] candidateLabels; + + /** Distance measure. */ + public DistanceMeasure distanceMeasure; + + /** Amount of nearest neighbors. */ + public int k; + + /** kNN strategy. */ + public boolean weighted; + + /** Centroid statistics. */ + public ANNClassificationTrainer.CentroidStat centroindsStat; + + /** */ + public ANNJSONExportModel(Long timestamp, String uid, String modelClass) { + super(timestamp, uid, modelClass); + } + + /** */ + @JsonCreator + public ANNJSONExportModel() { + } + + /** {@inheritDoc} */ + @Override public ANNClassificationModel convert() { + if (candidateFeatures == null || candidateFeatures.isEmpty()) + throw new IllegalArgumentException("Loaded list of candidates is empty. It should be not empty."); + + double[] firstRow = candidateFeatures.get(0); + LabeledVectorSet candidatesForANN = new LabeledVectorSet<>(candidateFeatures.size(), firstRow.length); + LabeledVector[] data = new LabeledVector[candidateFeatures.size()]; + for (int i = 0; i < candidateFeatures.size(); i++) { + data[i] = new LabeledVector(VectorUtils.of(candidateFeatures.get(i)), candidateLabels[i]); + } + candidatesForANN.setData(data); + + ANNClassificationModel mdl = new ANNClassificationModel(candidatesForANN, centroindsStat); + + mdl.withDistanceMeasure(distanceMeasure); + mdl.withK(k); + mdl.withWeighted(weighted); + return mdl; + } + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java index 22192224c760a..eec871386d1f8 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java @@ -24,6 +24,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListSet; import java.util.stream.Collectors; +import com.fasterxml.jackson.annotation.JsonIgnore; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.ml.clustering.kmeans.KMeansModel; import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer; @@ -139,7 +140,7 @@ private List getCentroids(Preprocessor> centroidStat = new ConcurrentHashMap<>(); + public ConcurrentHashMap> centroidStat = new ConcurrentHashMap<>(); /** Count of points closest to the center with a given index. */ - ConcurrentHashMap counts = new ConcurrentHashMap<>(); + public ConcurrentHashMap counts = new ConcurrentHashMap<>(); /** Set of unique labels. */ - ConcurrentSkipListSet clsLblsSet = new ConcurrentSkipListSet<>(); + public ConcurrentSkipListSet clsLblsSet = new ConcurrentSkipListSet<>(); /** Merge current */ CentroidStat merge(CentroidStat other) { diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java index 1fee123d19385..49f56b8815f64 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java @@ -25,7 +25,10 @@ */ public class ProbableLabel { /** Key is label, value is probability to be this class */ - TreeMap clsLbls; + public TreeMap clsLbls; + + public ProbableLabel() { + } /** * The key is class label, diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java index 0b431597a81d7..2c32ee6ebcfed 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/BrayCurtisDistance.java @@ -51,4 +51,8 @@ public class BrayCurtisDistance implements DistanceMeasure { @Override public int hashCode() { return getClass().hashCode(); } + + @Override public String toString() { + return "BrayCurtisDistance{}"; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java index 392e7b0a2247e..4176d971adfc9 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/DistanceMeasure.java @@ -20,6 +20,8 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; import org.apache.ignite.ml.math.exceptions.math.CardinalityException; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector; @@ -28,6 +30,21 @@ * This class is based on the corresponding class from Apache Common Math lib. Interface for distance measures of * n-dimensional vectors. */ +@JsonTypeInfo( use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type") +@JsonSubTypes( + { + @JsonSubTypes.Type(value = BrayCurtisDistance.class, name = "BrayCurtisDistance"), + @JsonSubTypes.Type(value = CanberraDistance.class, name = "CanberraDistance"), + @JsonSubTypes.Type(value = ChebyshevDistance.class, name = "ChebyshevDistance"), + @JsonSubTypes.Type(value = CosineSimilarity.class, name = "CosineSimilarity"), + @JsonSubTypes.Type(value = EuclideanDistance.class, name = "EuclideanDistance"), + @JsonSubTypes.Type(value = HammingDistance.class, name = "HammingDistance"), + @JsonSubTypes.Type(value = JaccardIndex.class, name = "JaccardIndex"), + @JsonSubTypes.Type(value = JensenShannonDistance.class, name = "JensenShannonDistance"), + @JsonSubTypes.Type(value = ManhattanDistance.class, name = "ManhattanDistance"), + @JsonSubTypes.Type(value = MinkowskiDistance.class, name = "MinkowskiDistance"), + @JsonSubTypes.Type(value = WeightedMinkowskiDistance.class, name = "WeightedMinkowskiDistance"), + }) public interface DistanceMeasure extends Externalizable { /** * Compute the distance between two n-dimensional vectors. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java index b382112964ec4..20c1c02fe06ce 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/MinkowskiDistance.java @@ -17,6 +17,8 @@ package org.apache.ignite.ml.math.distances; import java.util.Objects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.ignite.ml.math.exceptions.math.CardinalityException; import org.apache.ignite.ml.math.functions.IgniteDoubleFunction; import org.apache.ignite.ml.math.primitives.vector.Vector; @@ -35,10 +37,16 @@ public class MinkowskiDistance implements DistanceMeasure { private final double p; /** @param p norm */ - public MinkowskiDistance(double p) { + @JsonCreator + public MinkowskiDistance(@JsonProperty("p")double p) { this.p = p; } + /** Returns p-norm. */ + public double getP() { + return p; + } + /** {@inheritDoc} */ @Override public double compute(Vector a, Vector b) throws CardinalityException { assert a.size() == b.size(); @@ -60,4 +68,10 @@ public MinkowskiDistance(double p) { @Override public int hashCode() { return Objects.hash(p); } + + @Override public String toString() { + return "MinkowskiDistance{" + + "p=" + p + + '}'; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java index 662bf907021e4..61e2125f70744 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistance.java @@ -16,8 +16,13 @@ */ package org.apache.ignite.ml.math.distances; +import java.util.Arrays; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.ignite.ml.math.exceptions.math.CardinalityException; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.math.util.MatrixUtil; /** @@ -29,13 +34,18 @@ public class WeightedMinkowskiDistance implements DistanceMeasure { */ private static final long serialVersionUID = 1771556549784040096L; - private final int p; + private int p = 1; - private final Vector weight; + private final double[] weights; - public WeightedMinkowskiDistance(int p, Vector weight) { + @JsonIgnore + private final Vector internalWeights; + + @JsonCreator + public WeightedMinkowskiDistance(@JsonProperty("p")int p, @JsonProperty("weights")double[] weights) { this.p = p; - this.weight = weight.copy().map(x -> Math.pow(Math.abs(x), p)); + this.weights = weights.clone(); + internalWeights = VectorUtils.of(weights).copy().map(x -> Math.pow(Math.abs(x), p)); } /** @@ -47,12 +57,20 @@ public WeightedMinkowskiDistance(int p, Vector weight) { return Math.pow( MatrixUtil.localCopyOf(a).minus(b) .map(x -> Math.pow(Math.abs(x), p)) - .times(weight) + .times(internalWeights) .sum(), 1 / (double) p ); } + /** Returns p-norm. */ + public int getP() { + return p; + } + + /** Returns weights. */ + public double[] getWeights() { return weights.clone(); } + /** * {@inheritDoc} */ @@ -70,4 +88,11 @@ public WeightedMinkowskiDistance(int p, Vector weight) { @Override public int hashCode() { return getClass().hashCode(); } + + @Override public String toString() { + return "WeightedMinkowskiDistance{" + + "p=" + p + + ", weights=" + Arrays.toString(weights) + + '}'; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java index abd39df19b0f3..4a915fa7fc460 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/stat/DistributionMixture.java @@ -32,13 +32,13 @@ */ public abstract class DistributionMixture implements Distribution { /** Component probabilities. */ - private final Vector componentProbs; + private Vector componentProbs; /** Distributions. */ - private final List distributions; + private List distributions; /** Dimension. */ - private final int dimension; + private int dimension; /** * Creates an instance of DistributionMixture. @@ -61,6 +61,9 @@ public DistributionMixture(Vector componentProbs, List distributions) { this.dimension = dimension; } + public DistributionMixture() { + } + /** {@inheritDoc} */ @Override public double prob(Vector x) { return likelihood(x).sum(); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java index 6cdc637b59973..a9fc2d0dea0a9 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/compound/CompoundNaiveBayesModel.java @@ -17,14 +17,27 @@ package org.apache.ignite.ml.naivebayes.compound; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.naivebayes.discrete.DiscreteNaiveBayesModel; @@ -34,7 +47,8 @@ * A compound Naive Bayes model which uses a composition of{@code GaussianNaiveBayesModel} and {@code * DiscreteNaiveBayesModel}. */ -public class CompoundNaiveBayesModel implements IgniteModel, Exportable, DeployableObject { +public class CompoundNaiveBayesModel implements IgniteModel, Exportable, + JSONWritable, DeployableObject { /** Serial version uid. */ private static final long serialVersionUID = -5045925321135798960L; @@ -56,6 +70,10 @@ public class CompoundNaiveBayesModel implements IgniteModel, Exp /** Feature ids which should be skipped in Discrete model. */ private Collection discreteFeatureIdsToSkip = Collections.emptyList(); + /** */ + public CompoundNaiveBayesModel() { + } + /** {@inheritDoc} */ @Override public

    void saveModel(Exporter exporter, P path) { exporter.save(this, path); @@ -91,6 +109,22 @@ public DiscreteNaiveBayesModel getDiscreteModel() { return discreteModel; } + public double[] getPriorProbabilities() { + return priorProbabilities; + } + + public double[] getLabels() { + return labels; + } + + public Collection getGaussianFeatureIdsToSkip() { + return gaussianFeatureIdsToSkip; + } + + public Collection getDiscreteFeatureIdsToSkip() { + return discreteFeatureIdsToSkip; + } + /** Sets prior probabilities. */ public CompoundNaiveBayesModel withPriorProbabilities(double[] priorProbabilities) { this.priorProbabilities = priorProbabilities.clone(); @@ -155,7 +189,44 @@ private static Vector skipFeatures(Vector vector, Collection featureIds } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Arrays.asList(discreteModel, gaussianModel); } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(CompoundNaiveBayesModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(CompoundNaiveBayesModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", CompoundNaiveBayesModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads CompoundNaiveBayesModel from JSON file. */ + public static CompoundNaiveBayesModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + CompoundNaiveBayesModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, CompoundNaiveBayesModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), CompoundNaiveBayesModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java index b7eb5d383ab30..3d5edce45b755 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesModel.java @@ -17,10 +17,23 @@ package org.apache.ignite.ml.naivebayes.discrete; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.naivebayes.BayesModel; @@ -29,7 +42,8 @@ * {@code p(C_k,y) =x_1*p_k1^x *...*x_i*p_ki^x_i}. Where {@code x_i} is a discrete feature, {@code p_ki} is a prior * probability probability of class {@code p(x|C_k)}. Returns the number of the most possible class. */ -public class DiscreteNaiveBayesModel implements BayesModel, DeployableObject { +public class DiscreteNaiveBayesModel implements BayesModel, + JSONWritable, DeployableObject { /** Serial version uid. */ private static final long serialVersionUID = -127386523291350345L; @@ -37,23 +51,23 @@ public class DiscreteNaiveBayesModel implements BayesModel void saveModel(Exporter exporter, P path) { exporter.save(this, path); @@ -111,22 +129,22 @@ public DiscreteNaiveBayesModel(double[][][] probabilities, double[] clsProbabili /** A getter for probabilities.*/ public double[][][] getProbabilities() { - return probabilities; + return probabilities.clone(); } /** A getter for clsProbabilities.*/ public double[] getClsProbabilities() { - return clsProbabilities; + return clsProbabilities.clone(); } /** A getter for bucketThresholds.*/ public double[][] getBucketThresholds() { - return bucketThresholds; + return bucketThresholds.clone(); } /** A getter for labels.*/ public double[] getLabels() { - return labels; + return labels.clone(); } /** A getter for sumsHolder.*/ @@ -145,7 +163,44 @@ private int toBucketNumber(double val, double[] thresholds) { } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.emptyList(); } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(DiscreteNaiveBayesModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(DiscreteNaiveBayesModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", DiscreteNaiveBayesModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads DiscreteNaiveBayesModel from JSON file. */ + public static DiscreteNaiveBayesModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + DiscreteNaiveBayesModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, DiscreteNaiveBayesModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), DiscreteNaiveBayesModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java index 50b335eaa6539..060d1889d9e37 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/discrete/DiscreteNaiveBayesSumsHolder.java @@ -32,6 +32,17 @@ public class DiscreteNaiveBayesSumsHolder implements AutoCloseable, Serializable /** Rows count for each label */ Map featureCountersPerLbl = new HashMap<>(); + public DiscreteNaiveBayesSumsHolder() { + } + + public Map getValuesInBucketPerLbl() { + return valuesInBucketPerLbl; + } + + public Map getFeatureCountersPerLbl() { + return featureCountersPerLbl; + } + /** Merge to current */ DiscreteNaiveBayesSumsHolder merge(DiscreteNaiveBayesSumsHolder other) { valuesInBucketPerLbl = MapUtil.mergeMaps(valuesInBucketPerLbl, other.valuesInBucketPerLbl, this::sum, HashMap::new); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java index d0a647093f0e0..0627ce52677df 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesModel.java @@ -17,10 +17,23 @@ package org.apache.ignite.ml.naivebayes.gaussian; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.environment.deploy.DeployableObject; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.naivebayes.BayesModel; @@ -28,24 +41,25 @@ * Simple naive Bayes model which predicts result value {@code y} belongs to a class {@code C_k, k in [0..K]} as {@code * p(C_k,y) = p(C_k)*p(y_1,C_k) *...*p(y_n,C_k) / p(y)}. Return the number of the most possible class. */ -public class GaussianNaiveBayesModel implements BayesModel, DeployableObject { +public class GaussianNaiveBayesModel implements BayesModel, + JSONWritable, DeployableObject { /** Serial version uid. */ private static final long serialVersionUID = -127386523291350345L; /** Means of features for all classes. kth row contains means for labels[k] class. */ - private final double[][] means; + private double[][] means; /** Variances of features for all classes. kth row contains variances for labels[k] class */ - private final double[][] variances; + private double[][] variances; /** Prior probabilities of each class */ - private final double[] classProbabilities; + private double[] classProbabilities; /** Labels. */ - private final double[] labels; + private double[] labels; /** Feature sum, squared sum and count per label. */ - private final GaussianNaiveBayesSumsHolder sumsHolder; + private GaussianNaiveBayesSumsHolder sumsHolder; /** * @param means Means of features for all classes. @@ -56,13 +70,17 @@ public class GaussianNaiveBayesModel implements BayesModel void saveModel(Exporter exporter, P path) { exporter.save(this, path); @@ -127,7 +145,44 @@ private static double gauss(double x, double mean, double variance) { } /** {@inheritDoc} */ + @JsonIgnore @Override public List getDependencies() { return Collections.emptyList(); } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(GaussianNaiveBayesModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(GaussianNaiveBayesModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", GaussianNaiveBayesModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads GaussianNaiveBayesModel from JSON file. */ + public static GaussianNaiveBayesModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + GaussianNaiveBayesModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, GaussianNaiveBayesModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), GaussianNaiveBayesModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java index 7b95ff8f9354f..1d85832ceec90 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/naivebayes/gaussian/GaussianNaiveBayesSumsHolder.java @@ -35,6 +35,21 @@ class GaussianNaiveBayesSumsHolder implements Serializable, AutoCloseable { /** Rows count for each label */ Map featureCountersPerLbl = new HashMap<>(); + public GaussianNaiveBayesSumsHolder() { + } + + public Map getFeatureSumsPerLbl() { + return featureSumsPerLbl; + } + + public Map getFeatureSquaredSumsPerLbl() { + return featureSquaredSumsPerLbl; + } + + public Map getFeatureCountersPerLbl() { + return featureCountersPerLbl; + } + /** Merge to current */ GaussianNaiveBayesSumsHolder merge(GaussianNaiveBayesSumsHolder other) { featureSumsPerLbl = MapUtil.mergeMaps(featureSumsPerLbl, other.featureSumsPerLbl, this::sum, HashMap::new); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java index 9ecc257492b19..d28a2a958a1ce 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java @@ -72,10 +72,10 @@ private static LabeledVector extendLabeledVector(LabeledVector double[] x0 = null; if (mdl != null) { - int x0Size = mdl.getWeights().size() + 1; - Vector weights = mdl.getWeights().like(x0Size); - mdl.getWeights().nonZeroes().forEach(ith -> weights.set(ith.index(), ith.get())); - weights.set(weights.size() - 1, mdl.getIntercept()); + int x0Size = mdl.weights().size() + 1; + Vector weights = mdl.weights().like(x0Size); + mdl.weights().nonZeroes().forEach(ith -> weights.set(ith.index(), ith.get())); + weights.set(weights.size() - 1, mdl.intercept()); x0 = weights.asArray(); } res = lsqr.solve(0, 1e-12, 1e-12, 1e8, -1, false, x0); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java index 150b6d763a20b..4cb53403d96b8 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModel.java @@ -17,25 +17,35 @@ package org.apache.ignite.ml.regressions.linear; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; import java.util.Objects; +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.ignite.ml.Exportable; import org.apache.ignite.ml.Exporter; import org.apache.ignite.ml.IgniteModel; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONWritable; import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; /** * Simple linear regression model which predicts result value Y as a linear combination of input variables: * Y = weights * X + intercept. */ -public final class LinearRegressionModel implements IgniteModel, Exportable { +public final class LinearRegressionModel implements IgniteModel, Exportable, + JSONWritable { /** */ private static final long serialVersionUID = -105984600091550226L; /** Multiplier of the objects's vector required to make prediction. */ - private final Vector weights; + private Vector weights; /** Intercept of the linear regression model */ - private final double intercept; + private double intercept; /** */ public LinearRegressionModel(Vector weights, double intercept) { @@ -44,15 +54,41 @@ public LinearRegressionModel(Vector weights, double intercept) { } /** */ - public Vector getWeights() { + private LinearRegressionModel() { + } + + /** */ + public Vector weights() { return weights; } /** */ - public double getIntercept() { + public double intercept() { return intercept; } + /** + * Set up the weights. + * + * @param weights The parameter value. + * @return Model with new weights parameter value. + */ + public LinearRegressionModel withWeights(Vector weights) { + this.weights = weights; + return this; + } + + /** + * Set up the intercept. + * + * @param intercept The parameter value. + * @return Model with new intercept parameter value. + */ + public LinearRegressionModel withIntercept(double intercept) { + this.intercept = intercept; + return this; + } + /** {@inheritDoc} */ @Override public Double predict(Vector input) { return input.dot(weights) + intercept; @@ -108,4 +144,72 @@ public double getIntercept() { @Override public String toString(boolean pretty) { return toString(); } + + /** Loads LinearRegressionModel from JSON file. */ + public static LinearRegressionModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + + LinearRegressionModelJSONExportModel linearRegressionJSONExportModel; + try { + linearRegressionJSONExportModel = mapper + .readValue(new File(path.toAbsolutePath().toString()), LinearRegressionModelJSONExportModel.class); + + return linearRegressionJSONExportModel.convert(); + } catch (IOException e) { + e.printStackTrace(); + } + + return null; + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + try { + LinearRegressionModelJSONExportModel exportModel = new LinearRegressionModelJSONExportModel( + System.currentTimeMillis(), + "linreg_" + UUID.randomUUID().toString(), + LinearRegressionModel.class.getSimpleName() + ); + exportModel.intercept = intercept; + exportModel.weights = weights.asArray(); + + File file = new File(path.toAbsolutePath().toString()); + mapper.writeValue(file, exportModel); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** */ + public static class LinearRegressionModelJSONExportModel extends JSONModel { + /** + * Multiplier of the objects's vector required to make prediction. + */ + public double[] weights; + + /** + * Intercept of the linear regression model. + */ + public double intercept; + + /** */ + public LinearRegressionModelJSONExportModel(Long timestamp, String uid, String modelClass) { + super(timestamp, uid, modelClass); + } + + /** */ + @JsonCreator + public LinearRegressionModelJSONExportModel() { + } + + /** {@inheritDoc} */ + @Override public LinearRegressionModel convert() { + LinearRegressionModel linRegMdl = new LinearRegressionModel(); + linRegMdl.withWeights(VectorUtils.of(weights)); + linRegMdl.withIntercept(intercept); + + return linRegMdl; + } + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java index da813fc1953e8..d98267152e3c4 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java @@ -148,8 +148,8 @@ public LinearRegressionSGDTrainer(UpdatesStrategy, Exportable { +public final class LogisticRegressionModel implements IgniteModel, Exportable, + JSONWritable { /** */ private static final long serialVersionUID = -133984600091550776L; @@ -42,6 +53,10 @@ public final class LogisticRegressionModel implements IgniteModel, JSONWritable { + /** Root node. */ + private DecisionTreeNode rootNode; + + /** + * Creates the model. + * + * @param rootNode Root node of the tree. + */ + public DecisionTreeModel(DecisionTreeNode rootNode) { + this.rootNode = rootNode; + } + + /** */ + private DecisionTreeModel() { + + } + + /** Returns the root node. */ + public DecisionTreeNode getRootNode() { + return rootNode; + } + + /** {@inheritDoc} */ + @Override public Double predict(Vector features) { + return rootNode.predict(features); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return toString(false); + } + + /** {@inheritDoc} */ + @Override public String toString(boolean pretty) { + return DecisionTreeTrainer.printTree(rootNode, pretty); + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(DecisionTreeModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(DecisionTreeModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", DecisionTreeModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads DecisionTreeModel from JSON file. */ + public static DecisionTreeModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + DecisionTreeModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, DecisionTreeModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), DecisionTreeModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java index 80036ba4da2d8..8d705e4c9bbe5 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeNode.java @@ -17,11 +17,24 @@ package org.apache.ignite.ml.tree; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.math.primitives.vector.Vector; /** * Base interface for decision tree nodes. */ -public interface DecisionTreeNode extends IgniteModel { +@JsonTypeInfo( use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type") +@JsonSubTypes( + { + @JsonSubTypes.Type(value = DecisionTreeLeafNode.class, name = "leaf"), + @JsonSubTypes.Type(value = DecisionTreeConditionalNode.class, name = "conditional"), + }) +public abstract class DecisionTreeNode implements IgniteModel { + /** + * Empty constructor for serialization needs. + */ + protected DecisionTreeNode() { + } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java index 2b259f24cb3d0..7ae86fcceea19 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java @@ -31,7 +31,7 @@ * Decision tree regressor based on distributed decision tree trainer that allows to fit trees using row-partitioned * dataset. */ -public class DecisionTreeRegressionTrainer extends DecisionTree { +public class DecisionTreeRegressionTrainer extends DecisionTreeTrainer { /** * Constructs a new decision tree regressor with default impurity function compressor. * diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeTrainer.java similarity index 92% rename from modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java rename to modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeTrainer.java index eb2f1e5e2eba2..0692ec62ac853 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeTrainer.java @@ -41,7 +41,7 @@ * * @param Type of impurity measure. */ -public abstract class DecisionTree> extends SingleLabelDatasetTrainer { +public abstract class DecisionTreeTrainer> extends SingleLabelDatasetTrainer { /** Max tree deep. */ int maxDeep; @@ -65,8 +65,8 @@ public abstract class DecisionTree> extends SingleL * @param compressor Impurity function compressor. * @param decisionTreeLeafBuilder Decision tree leaf builder. */ - DecisionTree(int maxDeep, double minImpurityDecrease, StepFunctionCompressor compressor, - DecisionTreeLeafBuilder decisionTreeLeafBuilder) { + DecisionTreeTrainer(int maxDeep, double minImpurityDecrease, StepFunctionCompressor compressor, + DecisionTreeLeafBuilder decisionTreeLeafBuilder) { this.maxDeep = maxDeep; this.minImpurityDecrease = minImpurityDecrease; this.compressor = compressor; @@ -108,7 +108,7 @@ else if (node instanceof DecisionTreeConditionalNode) { } /** {@inheritDoc} */ - @Override public DecisionTreeNode fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, + @Override public DecisionTreeModel fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, Preprocessor preprocessor) { try (Dataset dataset = datasetBuilder.build( envBuilder, @@ -124,13 +124,13 @@ else if (node instanceof DecisionTreeConditionalNode) { } /** {@inheritDoc} */ - @Override public boolean isUpdateable(DecisionTreeNode mdl) { + @Override public boolean isUpdateable(DecisionTreeModel mdl) { return true; } /** {@inheritDoc} */ - @Override public DecisionTree withEnvironmentBuilder(LearningEnvironmentBuilder envBuilder) { - return (DecisionTree)super.withEnvironmentBuilder(envBuilder); + @Override public DecisionTreeTrainer withEnvironmentBuilder(LearningEnvironmentBuilder envBuilder) { + return (DecisionTreeTrainer)super.withEnvironmentBuilder(envBuilder); } /** @@ -143,7 +143,7 @@ else if (node instanceof DecisionTreeConditionalNode) { * @param Type of a value in {@code upstream} data. * @return New model based on new dataset. */ - @Override protected DecisionTreeNode updateModel(DecisionTreeNode mdl, + @Override protected DecisionTreeModel updateModel(DecisionTreeModel mdl, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { @@ -151,8 +151,8 @@ else if (node instanceof DecisionTreeConditionalNode) { } /** */ - public DecisionTreeNode fit(Dataset dataset) { - return split(dataset, e -> true, 0, getImpurityMeasureCalculator(dataset)); + public DecisionTreeModel fit(Dataset dataset) { + return new DecisionTreeModel(split(dataset, e -> true, 0, getImpurityMeasureCalculator(dataset))); } /** diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/NodeData.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/NodeData.java new file mode 100644 index 0000000000000..885a14d788a57 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/NodeData.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.tree; + +import java.util.Map; +import java.util.NavigableMap; + +/** + * Presenting decision tree data in plain manner (For example: from one parquet row filled with NodeData in Spark DT model). + */ +public class NodeData { + /** Id. */ + public int id; + + /** Prediction. */ + public double prediction; + + /** Left child id. */ + public int leftChildId; + + /** Right child id. */ + public int rightChildId; + + /** Threshold. */ + public double threshold; + + /** Feature index. */ + public int featureIdx; + + /** Is leaf node. */ + public boolean isLeafNode; + + /**{@inheritDoc}*/ + @Override public String toString() { + return "NodeData{" + + "id=" + id + + ", prediction=" + prediction + + ", leftChildId=" + leftChildId + + ", rightChildId=" + rightChildId + + ", threshold=" + threshold + + ", featureIdx=" + featureIdx + + ", isLeafNode=" + isLeafNode + + '}'; + } + + /** + * Build tree or sub-tree based on indices and nodes sorted map as a dictionary. + * + * @param nodes The sorted map of nodes. + * @param rootNodeData Root node data. + */ + public static DecisionTreeNode buildTree(Map nodes, + NodeData rootNodeData) { + return rootNodeData.isLeafNode ? new DecisionTreeLeafNode(rootNodeData.prediction) : new DecisionTreeConditionalNode(rootNodeData.featureIdx, + rootNodeData.threshold, + buildTree(nodes, nodes.get(rootNodeData.rightChildId)), + buildTree(nodes, nodes.get(rootNodeData.leftChildId)), + null); + } + + /** + * Builds the DT model by the given sorted map of nodes. + * + * @param nodes The sorted map of nodes. + */ + public static DecisionTreeModel buildDecisionTreeModel(Map nodes) { + DecisionTreeModel mdl = null; + if (!nodes.isEmpty()) { + NodeData rootNodeData = (NodeData)((NavigableMap)nodes).firstEntry().getValue(); + mdl = new DecisionTreeModel(buildTree(nodes, rootNodeData)); + return mdl; + } + return mdl; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java index 1c25f73b352c4..a2438e517ae01 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java @@ -22,7 +22,7 @@ import org.apache.ignite.ml.IgniteModel; import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.boosting.GDBLearningStrategy; -import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.GDBModel; import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker; import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator; import org.apache.ignite.ml.dataset.Dataset; @@ -35,7 +35,7 @@ import org.apache.ignite.ml.math.primitives.vector.VectorUtils; import org.apache.ignite.ml.preprocessing.Preprocessor; import org.apache.ignite.ml.trainers.DatasetTrainer; -import org.apache.ignite.ml.tree.DecisionTree; +import org.apache.ignite.ml.tree.DecisionTreeTrainer; import org.apache.ignite.ml.tree.data.DecisionTreeData; import org.apache.ignite.ml.tree.data.DecisionTreeDataBuilder; @@ -57,15 +57,15 @@ public GDBOnTreesLearningStrategy(boolean useIdx) { } /** {@inheritDoc} */ - @Override public List> update(GDBTrainer.GDBModel mdlToUpdate, + @Override public List> update(GDBModel mdlToUpdate, DatasetBuilder datasetBuilder, Preprocessor vectorizer) { LearningEnvironment environment = envBuilder.buildForTrainer(); environment.initDeployingContext(vectorizer); DatasetTrainer, Double> trainer = baseMdlTrainerBuilder.get(); - assert trainer instanceof DecisionTree; - DecisionTree decisionTreeTrainer = (DecisionTree)trainer; + assert trainer instanceof DecisionTreeTrainer; + DecisionTreeTrainer decisionTreeTrainer = (DecisionTreeTrainer)trainer; List> models = initLearningState(mdlToUpdate); diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java index fb118ec327e29..ab8db2e563114 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator; import org.apache.ignite.ml.dataset.Dataset; import org.apache.ignite.ml.dataset.feature.FeatureMeta; @@ -31,7 +30,7 @@ import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector; import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; import org.apache.ignite.ml.environment.LearningEnvironmentBuilder; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.impurity.GiniHistogram; import org.apache.ignite.ml.tree.randomforest.data.impurity.GiniHistogramsComputer; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer; @@ -98,8 +97,8 @@ public RandomForestClassifierTrainer(List meta) { } /** {@inheritDoc} */ - @Override protected ModelsComposition buildComposition(List models) { - return new ModelsComposition(models, new OnMajorityPredictionsAggregator()); + @Override protected RandomForestModel buildComposition(List models) { + return new RandomForestModel(models, new OnMajorityPredictionsAggregator()); } /** {@inheritDoc} */ diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestModel.java new file mode 100644 index 0000000000000..1ae95762b925a --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestModel.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.tree.randomforest; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; +import org.apache.ignite.ml.composition.predictionsaggregator.PredictionsAggregator; +import org.apache.ignite.ml.inference.json.JSONModel; +import org.apache.ignite.ml.inference.json.JSONModelMixIn; +import org.apache.ignite.ml.inference.json.JSONWritable; +import org.apache.ignite.ml.inference.json.JacksonHelper; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; + +/** + * Random Forest Model class. + */ +public class RandomForestModel extends ModelsComposition implements JSONWritable { + /** Serial version uid. */ + private static final long serialVersionUID = 3476345240155508004L; + + /** */ + public RandomForestModel() { + super(new ArrayList<>(), new MeanValuePredictionsAggregator()); + + } + + /** */ + public RandomForestModel(List oldModels, PredictionsAggregator predictionsAggregator) { + super(oldModels, predictionsAggregator); + } + + /** + * Returns predictions aggregator. + */ + @Override public PredictionsAggregator getPredictionsAggregator() { + return predictionsAggregator; + } + + /** + * Returns containing models. + */ + @Override public List getModels() { + return models; + } + + /** {@inheritDoc} */ + @Override public void toJSON(Path path) { + ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + mapper.addMixIn(RandomForestModel.class, JSONModelMixIn.class); + + ObjectWriter writer = mapper + .writerFor(RandomForestModel.class) + .withAttribute("formatVersion", JSONModel.JSON_MODEL_FORMAT_VERSION) + .withAttribute("timestamp", System.currentTimeMillis()) + .withAttribute("uid", "dt_" + UUID.randomUUID().toString()) + .withAttribute("modelClass", RandomForestModel.class.getSimpleName()); + + try { + File file = new File(path.toAbsolutePath().toString()); + writer.writeValue(file, this); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** Loads RandomForestModel from JSON file. */ + public static RandomForestModel fromJSON(Path path) { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + RandomForestModel mdl; + try { + JacksonHelper.readAndValidateBasicJsonModelProperties(path, mapper, RandomForestModel.class.getSimpleName()); + mdl = mapper.readValue(new File(path.toAbsolutePath().toString()), RandomForestModel.class); + return mdl; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java index ab1d0361ee4c7..4b0499f5c8f73 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java @@ -18,10 +18,9 @@ package org.apache.ignite.ml.tree.randomforest; import java.util.List; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer; import org.apache.ignite.ml.tree.randomforest.data.impurity.MSEHistogram; import org.apache.ignite.ml.tree.randomforest.data.impurity.MSEHistogramComputer; @@ -49,8 +48,8 @@ public RandomForestRegressionTrainer(List meta) { } /** {@inheritDoc} */ - @Override protected ModelsComposition buildComposition(List models) { - return new ModelsComposition(models, new MeanValuePredictionsAggregator()); + @Override protected RandomForestModel buildComposition(List models) { + return new RandomForestModel(models, new MeanValuePredictionsAggregator()); } /** {@inheritDoc} */ diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java index fe860ca62866d..481c22b15673b 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java @@ -30,8 +30,6 @@ import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.ignite.ml.IgniteModel; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.dataset.Dataset; import org.apache.ignite.ml.dataset.DatasetBuilder; import org.apache.ignite.ml.dataset.feature.BucketMeta; @@ -41,14 +39,13 @@ import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector; import org.apache.ignite.ml.dataset.primitive.builder.context.EmptyContextBuilder; import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; -import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.preprocessing.Preprocessor; import org.apache.ignite.ml.trainers.SingleLabelDatasetTrainer; import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies; import org.apache.ignite.ml.tree.randomforest.data.NodeId; import org.apache.ignite.ml.tree.randomforest.data.NodeSplit; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.TreeNode; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityComputer; import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer; import org.apache.ignite.ml.tree.randomforest.data.statistics.LeafValuesComputer; @@ -68,7 +65,7 @@ * @param Type of child of RandomForestTrainer using in with-methods. */ public abstract class RandomForestTrainer, - T extends RandomForestTrainer> extends SingleLabelDatasetTrainer { + T extends RandomForestTrainer> extends SingleLabelDatasetTrainer { /** Bucket size factor. */ private static final double BUCKET_SIZE_FACTOR = (1 / 10.0); @@ -110,9 +107,9 @@ public RandomForestTrainer(List meta) { } /** {@inheritDoc} */ - @Override public ModelsComposition fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, - Preprocessor preprocessor) { - List models = null; + @Override public RandomForestModel fitWithInitializedDeployingContext(DatasetBuilder datasetBuilder, + Preprocessor preprocessor) { + List models = null; try (Dataset dataset = datasetBuilder.build( envBuilder, new EmptyContextBuilder<>(), @@ -215,9 +212,9 @@ protected boolean init(Dataset datas * @param dataset Dataset. * @return list of decision trees. */ - private List fit(Dataset dataset) { + private List fit(Dataset dataset) { Queue treesQueue = createRootsQueue(); - ArrayList roots = initTrees(treesQueue); + ArrayList roots = initTrees(treesQueue); Map histMeta = computeHistogramMeta(meta, dataset); if (histMeta.isEmpty()) return Collections.emptyList(); @@ -239,20 +236,20 @@ private List fit(Dataset d } /** {@inheritDoc} */ - @Override public boolean isUpdateable(ModelsComposition mdl) { - ModelsComposition fakeComposition = buildComposition(Collections.emptyList()); + @Override public boolean isUpdateable(RandomForestModel mdl) { + RandomForestModel fakeComposition = buildComposition(Collections.emptyList()); return mdl.getPredictionsAggregator().getClass() == fakeComposition.getPredictionsAggregator().getClass(); } /** {@inheritDoc} */ - @Override protected ModelsComposition updateModel(ModelsComposition mdl, DatasetBuilder datasetBuilder, + @Override protected RandomForestModel updateModel(RandomForestModel mdl, DatasetBuilder datasetBuilder, Preprocessor preprocessor) { - ArrayList> oldModels = new ArrayList<>(mdl.getModels()); - ModelsComposition newModels = fit(datasetBuilder, preprocessor); + List oldModels = new ArrayList<>(mdl.getModels()); + RandomForestModel newModels = fit(datasetBuilder, preprocessor); oldModels.addAll(newModels.getModels()); - return new ModelsComposition(oldModels, mdl.getPredictionsAggregator()); + return new RandomForestModel(oldModels, mdl.getPredictionsAggregator()); } /** @@ -297,16 +294,16 @@ private void split(Queue learningQueue, Map nodesToL * @param treesQueue Trees queue. * @return List of trees. */ - protected ArrayList initTrees(Queue treesQueue) { + protected ArrayList initTrees(Queue treesQueue) { assert featuresPerTree > 0; - ArrayList roots = new ArrayList<>(); + ArrayList roots = new ArrayList<>(); List allFeatureIds = IntStream.range(0, meta.size()).boxed().collect(Collectors.toList()); for (TreeNode node : treesQueue) { Collections.shuffle(allFeatureIds, random); Set featuresSubspace = allFeatureIds.stream() .limit(featuresPerTree).collect(Collectors.toSet()); - roots.add(new TreeRoot(node, featuresSubspace)); + roots.add(new RandomForestTreeModel(node, featuresSubspace)); } return roots; @@ -394,6 +391,6 @@ boolean needSplit(TreeNode parentNode, Optional split) { * @param models Models. * @return composition of built trees. */ - protected abstract ModelsComposition buildComposition(List models); + protected abstract RandomForestModel buildComposition(List models); } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java index f0ecd628009fe..a8bc849bb7ab7 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java @@ -29,10 +29,10 @@ public class NodeId implements Serializable { private static final long serialVersionUID = 4400852013136423333L; /** Tree id. */ - private final int treeId; + private int treeId; /** Node id. */ - private final long nodeId; + private long nodeId; /** * Create an instance of NodeId. @@ -45,11 +45,14 @@ public NodeId(int treeId, long nodeId) { this.nodeId = nodeId; } + public NodeId() { + } + /** * * @return Tree id. */ - public int treeId() { + public int getTreeId() { return treeId; } @@ -57,7 +60,7 @@ public int treeId() { * * @return Node id. */ - public long nodeId() { + public long getNodeId() { return nodeId; } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java index 6bdf9a9dce3ad..8146df01638e0 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java @@ -28,13 +28,16 @@ public class NodeSplit implements Serializable { private static final long serialVersionUID = 1331311529596106124L; /** Feature id in feature vector. */ - private final int featureId; + private int featureId; /** Feature split value. */ - private final double val; + private double val; /** Impurity at this split point. */ - private final double impurity; + private double impurity; + + public NodeSplit() { + } /** * Creates an instance of NodeSplit. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/RandomForestTreeModel.java similarity index 88% rename from modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java rename to modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/RandomForestTreeModel.java index 53a2d66c2278e..563080ad69774 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/RandomForestTreeModel.java @@ -27,12 +27,12 @@ /** * Tree root class. */ -public class TreeRoot implements IgniteModel { +public class RandomForestTreeModel implements IgniteModel { /** Serial version uid. */ private static final long serialVersionUID = 531797299171329057L; /** Root node. */ - private TreeNode node; + private TreeNode rootNode; /** Used features. */ private Set usedFeatures; @@ -43,14 +43,17 @@ public class TreeRoot implements IgniteModel { * @param root Root. * @param usedFeatures Used features. */ - public TreeRoot(TreeNode root, Set usedFeatures) { - this.node = root; + public RandomForestTreeModel(TreeNode root, Set usedFeatures) { + this.rootNode = root; this.usedFeatures = usedFeatures; } + public RandomForestTreeModel() { + } + /** {@inheritDoc} */ @Override public Double predict(Vector vector) { - return node.predict(vector); + return rootNode.predict(vector); } /** */ @@ -60,15 +63,15 @@ public Set getUsedFeatures() { /** */ public TreeNode getRootNode() { - return node; + return rootNode; } /** * @return All leafs in tree. */ - public List getLeafs() { + public List leafs() { List res = new ArrayList<>(); - getLeafs(node, res); + leafs(rootNode, res); return res; } @@ -76,12 +79,12 @@ public List getLeafs() { * @param root Root. * @param res Result list. */ - private void getLeafs(TreeNode root, List res) { + private void leafs(TreeNode root, List res) { if (root.getType() == TreeNode.Type.LEAF) res.add(root); else { - getLeafs(root.getLeft(), res); - getLeafs(root.getRight(), res); + leafs(root.getLeft(), res); + leafs(root.getRight(), res); } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java index b373596e8f900..7a480e60f8587 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java @@ -44,7 +44,7 @@ public enum Type { } /** Id. */ - private final NodeId id; + private NodeId id; /** Feature id. */ private int featureId; @@ -81,6 +81,9 @@ public TreeNode(long id, int treeId) { this.depth = 1; } + public TreeNode() { + } + /** {@inheritDoc} */ @Override public Double predict(Vector features) { assert type != Type.UNKNOWN; @@ -125,8 +128,8 @@ public List toConditional(int featureId, double val) { assert type == Type.UNKNOWN; toLeaf(val); - left = new TreeNode(2 * id.nodeId(), id.treeId()); - right = new TreeNode(2 * id.nodeId() + 1, id.treeId()); + left = new TreeNode(2 * id.getNodeId(), id.getTreeId()); + right = new TreeNode(2 * id.getNodeId() + 1, id.getTreeId()); this.type = Type.CONDITIONAL; this.featureId = featureId; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java index bc22ee1669223..521b42622a4e0 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java @@ -32,8 +32,8 @@ import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; import org.apache.ignite.ml.tree.randomforest.data.NodeId; import org.apache.ignite.ml.tree.randomforest.data.NodeSplit; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.TreeNode; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; /** * Class containing logic of aggregation impurity statistics within learning dataset. @@ -52,7 +52,7 @@ public abstract class ImpurityHistogramsComputer> aggregateImpurityStatistics(ArrayList roots, + public Map> aggregateImpurityStatistics(ArrayList roots, Map histMeta, Map nodesToLearn, Dataset dataset) { @@ -73,7 +73,7 @@ public Map> aggregateImpurityStatistics(ArrayL * @return Leaf statistics for impurity computing. */ private Map> aggregateImpurityStatisticsOnPartition( - BootstrappedDatasetPartition dataset, ArrayList roots, + BootstrappedDatasetPartition dataset, ArrayList roots, Map histMeta, Map part) { @@ -85,7 +85,7 @@ private Map> aggregateImpurityStatisticsOnPart if (vector.counters()[sampleId] == 0) continue; - TreeRoot root = roots.get(sampleId); + RandomForestTreeModel root = roots.get(sampleId); NodeId key = root.getRootNode().predictNextNodeKey(vector.features()); if (!part.containsKey(key)) //if we didn't take all nodes from learning queue continue; diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java index 98c2abacb221d..7c8f7e7dd72ec 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java @@ -30,8 +30,8 @@ import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector; import org.apache.ignite.ml.dataset.primitive.context.EmptyContext; import org.apache.ignite.ml.tree.randomforest.data.NodeId; +import org.apache.ignite.ml.tree.randomforest.data.RandomForestTreeModel; import org.apache.ignite.ml.tree.randomforest.data.TreeNode; -import org.apache.ignite.ml.tree.randomforest.data.TreeRoot; /** * Class containing logic of leaf values computing after building of all trees in random forest. @@ -49,11 +49,11 @@ public abstract class LeafValuesComputer implements Serializable { * @param roots Learned trees. * @param dataset Dataset. */ - public void setValuesForLeaves(ArrayList roots, + public void setValuesForLeaves(ArrayList roots, Dataset dataset) { Map leafs = roots.stream() - .flatMap(r -> r.getLeafs().stream()) + .flatMap(r -> r.leafs().stream()) .collect(Collectors.toMap(TreeNode::getId, Function.identity())); Map stats = dataset.compute( @@ -78,7 +78,7 @@ public void setValuesForLeaves(ArrayList roots, * @param data Data. * @return Statistics on labels for each leaf nodes. */ - private Map computeLeafsStatisticsInPartition(ArrayList roots, + private Map computeLeafsStatisticsInPartition(ArrayList roots, Map leafs, BootstrappedDatasetPartition data) { Map res = new HashMap<>(); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java index cc652e8e50b69..5c7f8dad79905 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java @@ -54,7 +54,7 @@ public void predictClusters() { Assert.assertEquals(mdl.predict(new DenseVector(new double[]{-1.1, -1.1})), 3.0, PRECISION); Assert.assertEquals(mdl.distanceMeasure(), distanceMeasure); - Assert.assertEquals(mdl.getAmountOfClusters(), 4); - Assert.assertArrayEquals(mdl.getCenters(), centers); + Assert.assertEquals(mdl.amountOfClusters(), 4); + Assert.assertArrayEquals(mdl.centers(), centers); } } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java index 0d35df58b84cb..ef33acae1a6ed 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/KeepBinaryTest.java @@ -83,7 +83,7 @@ public void test() { Integer zeroCentre = mdl.predict(VectorUtils.num2Vec(0.0)); - assertTrue(mdl.getCenters()[zeroCentre].get(0) == 0); + assertTrue(mdl.centers()[zeroCentre].get(0) == 0); } /** diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java index e5170505f08c9..9bd9509255e7d 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java @@ -32,7 +32,7 @@ import org.apache.ignite.ml.math.functions.IgniteBiFunction; import org.apache.ignite.ml.math.primitives.vector.Vector; import org.apache.ignite.ml.math.primitives.vector.VectorUtils; -import org.apache.ignite.ml.tree.DecisionTreeConditionalNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; import org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer; import org.junit.Test; @@ -83,7 +83,7 @@ public void testFitRegression() { assertTrue(!composition.toString(true).isEmpty()); assertTrue(!composition.toString(false).isEmpty()); - composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeConditionalNode)); + composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeModel)); assertEquals(2000, composition.getModels().size()); assertTrue(composition.getPredictionsAggregator() instanceof WeightedPredictionsAggregator); @@ -145,7 +145,7 @@ private void testClassifier(BiFunction, assertTrue(mdl instanceof ModelsComposition); ModelsComposition composition = (ModelsComposition)mdl; - composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeConditionalNode)); + composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeModel)); assertTrue(composition.getModels().size() < 500); assertTrue(composition.getPredictionsAggregator() instanceof WeightedPredictionsAggregator); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java index 0be0b5472b255..40949c493b125 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java @@ -43,7 +43,7 @@ public class DistanceTest { new BrayCurtisDistance(), new CanberraDistance(), new JensenShannonDistance(), - new WeightedMinkowskiDistance(4, new DenseVector(new double[]{1, 1, 1})), + new WeightedMinkowskiDistance(4, new double[]{1, 1, 1}), new MinkowskiDistance(Math.random())); /** */ @@ -197,9 +197,9 @@ public void weightedMinkowskiDistance() { double precistion = 0.01; int p = 2; double expRes = 5.0; - Vector v = new DenseVector(new double[]{2, 3, 4}); + double[] weights = new double[]{2, 3, 4}; - DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(p, v); + DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(p, weights); assertEquals(expRes, distanceMeasure.compute(v1, data2), precistion); assertEquals(expRes, distanceMeasure.compute(v1, v2), precistion); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java index 1ab93a195b69a..c6a1d1826d8a3 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/WeightedMinkowskiDistanceTest.java @@ -72,7 +72,7 @@ public WeightedMinkowskiDistanceTest(TestData testData) { /** */ @Test public void testWeightedMinkowski() { - DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(testData.p, testData.weight); + DistanceMeasure distanceMeasure = new WeightedMinkowskiDistance(testData.p, testData.weights); assertEquals(testData.expRes, distanceMeasure.compute(testData.vectorA, testData.vectorB), PRECISION); @@ -87,15 +87,15 @@ private static class TestData { public final Integer p; - public final Vector weight; + public final double[] weights; public final Double expRes; - private TestData(double[] vectorA, double[] vectorB, Integer p, double[] weight, double expRes) { + private TestData(double[] vectorA, double[] vectorB, Integer p, double[] weights, double expRes) { this.vectorA = new DenseVector(vectorA); this.vectorB = new DenseVector(vectorB); this.p = p; - this.weight = new DenseVector(weight); + this.weights = weights; this.expRes = expRes; } @@ -104,7 +104,7 @@ private TestData(double[] vectorA, double[] vectorB, Integer p, double[] weight, Arrays.toString(vectorA.asArray()), Arrays.toString(vectorB.asArray()), p, - Arrays.toString(weight.asArray()), + Arrays.toString(weights), expRes ); } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java index 96c7158da4a9f..a64651af1e29c 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java @@ -59,11 +59,11 @@ public void testSmallDataFit() { assertArrayEquals( new double[]{72.26948107, 15.95144674, 24.07403921, 66.73038781}, - mdl.getWeights().getStorage().data(), + mdl.weights().getStorage().data(), 1e-6 ); - assertEquals(2.8421709430404007e-14, mdl.getIntercept(), 1e-6); + assertEquals(2.8421709430404007e-14, mdl.intercept(), 1e-6); } /** @@ -95,9 +95,9 @@ public void testBigDataFit() { new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) ); - assertArrayEquals(coef, mdl.getWeights().getStorage().data(), 1e-6); + assertArrayEquals(coef, mdl.weights().getStorage().data(), 1e-6); - assertEquals(intercept, mdl.getIntercept(), 1e-6); + assertEquals(intercept, mdl.intercept(), 1e-6); } /** */ @@ -142,10 +142,10 @@ public void testUpdate() { vectorizer ); - assertArrayEquals(originalMdl.getWeights().getStorage().data(), updatedOnSameDS.getWeights().getStorage().data(), 1e-6); - assertEquals(originalMdl.getIntercept(), updatedOnSameDS.getIntercept(), 1e-6); + assertArrayEquals(originalMdl.weights().getStorage().data(), updatedOnSameDS.weights().getStorage().data(), 1e-6); + assertEquals(originalMdl.intercept(), updatedOnSameDS.intercept(), 1e-6); - assertArrayEquals(originalMdl.getWeights().getStorage().data(), updatedOnEmptyDS.getWeights().getStorage().data(), 1e-6); - assertEquals(originalMdl.getIntercept(), updatedOnEmptyDS.getIntercept(), 1e-6); + assertArrayEquals(originalMdl.weights().getStorage().data(), updatedOnEmptyDS.weights().getStorage().data(), 1e-6); + assertEquals(originalMdl.intercept(), updatedOnEmptyDS.intercept(), 1e-6); } } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java index 22b16d15829fc..9f503697697af 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java @@ -64,11 +64,11 @@ public void testSmallDataFit() { assertArrayEquals( new double[]{72.26948107, 15.95144674, 24.07403921, 66.73038781}, - mdl.getWeights().getStorage().data(), + mdl.weights().getStorage().data(), 1e-1 ); - assertEquals(2.8421709430404007e-14, mdl.getIntercept(), 1e-1); + assertEquals(2.8421709430404007e-14, mdl.intercept(), 1e-1); } /** */ @@ -112,19 +112,19 @@ public void testUpdate() { ); assertArrayEquals( - originalMdl.getWeights().getStorage().data(), - updatedOnSameDS.getWeights().getStorage().data(), + originalMdl.weights().getStorage().data(), + updatedOnSameDS.weights().getStorage().data(), 1.0 ); - assertEquals(originalMdl.getIntercept(), updatedOnSameDS.getIntercept(), 1.0); + assertEquals(originalMdl.intercept(), updatedOnSameDS.intercept(), 1.0); assertArrayEquals( - originalMdl.getWeights().getStorage().data(), - updatedOnEmptyDS.getWeights().getStorage().data(), + originalMdl.weights().getStorage().data(), + updatedOnEmptyDS.weights().getStorage().data(), 1e-1 ); - assertEquals(originalMdl.getIntercept(), updatedOnEmptyDS.getIntercept(), 1e-1); + assertEquals(originalMdl.intercept(), updatedOnEmptyDS.intercept(), 1e-1); } } diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java index 7122c6907d104..bfccc71591067 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java @@ -31,7 +31,7 @@ import org.apache.ignite.ml.selection.paramgrid.RandomStrategy; import org.apache.ignite.ml.selection.scoring.metric.MetricName; import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; -import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeModel; import org.junit.Test; import static org.apache.ignite.ml.common.TrainerTest.twoLinearlySeparableClasses; @@ -53,7 +53,7 @@ public void testScoreWithGoodDataset() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DebugCrossValidation scoreCalculator = + DebugCrossValidation scoreCalculator = new DebugCrossValidation<>(); Vectorizer vectorizer = new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.FIRST); @@ -84,7 +84,7 @@ public void testScoreWithGoodDatasetAndBinaryMetrics() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DebugCrossValidation scoreCalculator = + DebugCrossValidation scoreCalculator = new DebugCrossValidation<>(); int folds = 4; @@ -298,7 +298,7 @@ public void testScoreWithBadDataset() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DebugCrossValidation scoreCalculator = + DebugCrossValidation scoreCalculator = new DebugCrossValidation<>(); int folds = 4; diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java index d64c35ede37af..1c3f1407f7dfc 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerIntegrationTest.java @@ -72,11 +72,12 @@ public void testFit() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0); - DecisionTreeNode tree = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); + DecisionTreeModel tree = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); - assertTrue(tree instanceof DecisionTreeConditionalNode); + DecisionTreeNode decisionTreeNode = tree.getRootNode(); + assertTrue(decisionTreeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) decisionTreeNode; assertEquals(0, node.getThreshold(), 1e-3); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java index ed7c4fe856d28..e618f634c0234 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java @@ -75,11 +75,11 @@ public void testFit() { DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0) .withUseIndex(useIdx == 1); - DecisionTreeNode tree = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)); + DecisionTreeNode treeNode = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)).getRootNode(); - assertTrue(tree instanceof DecisionTreeConditionalNode); + assertTrue(treeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode)tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode)treeNode; assertEquals(0, node.getThreshold(), 1e-3); assertEquals(0, node.getCol()); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java index 587dacdc4af2c..686949f2652a1 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerIntegrationTest.java @@ -78,15 +78,15 @@ public void testFit() { DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(1, 0); - DecisionTreeNode tree = trainer.fit( + DecisionTreeNode treeNode = trainer.fit( ignite, data, new DoubleArrayVectorizer().labeled(Vectorizer.LabelCoordinate.LAST) - ); + ).getRootNode(); - assertTrue(tree instanceof DecisionTreeConditionalNode); + assertTrue(treeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) treeNode; assertEquals(0, node.getThreshold(), 1e-3); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java index 6466350078445..98e3e7a6f4200 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java @@ -74,11 +74,11 @@ public void testFit() { DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(1, 0) .withUsingIdx(useIdx == 1); - DecisionTreeNode tree = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)); + DecisionTreeNode treeNode = trainer.fit(data, parts, new DoubleArrayVectorizer().labeled(1)).getRootNode(); - assertTrue(tree instanceof DecisionTreeConditionalNode); + assertTrue(treeNode instanceof DecisionTreeConditionalNode); - DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) tree; + DecisionTreeConditionalNode node = (DecisionTreeConditionalNode) treeNode; assertEquals(0, node.getThreshold(), 1e-3); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java index c94799a76f153..cb5961dcaa5a0 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java @@ -22,7 +22,6 @@ import java.util.Map; import org.apache.ignite.ml.TestUtils; import org.apache.ignite.ml.common.TrainerTest; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; @@ -56,12 +55,12 @@ public void testFit() { ArrayList meta = new ArrayList<>(); for (int i = 0; i < 4; i++) meta.add(new FeatureMeta("", i, false)); - DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) + DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) .withAmountOfTrees(5) .withFeaturesCountSelectionStrgy(x -> 2) .withEnvironmentBuilder(TestUtils.testEnvBuilder()); - ModelsComposition mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); assertTrue(mdl.getPredictionsAggregator() instanceof OnMajorityPredictionsAggregator); assertEquals(5, mdl.getModels().size()); @@ -84,14 +83,14 @@ public void testUpdate() { ArrayList meta = new ArrayList<>(); for (int i = 0; i < 4; i++) meta.add(new FeatureMeta("", i, false)); - DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) + DatasetTrainer trainer = new RandomForestClassifierTrainer(meta) .withAmountOfTrees(100) .withFeaturesCountSelectionStrgy(x -> 2) .withEnvironmentBuilder(TestUtils.testEnvBuilder()); - ModelsComposition originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); + RandomForestModel originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); Vector v = VectorUtils.of(5, 0.5, 0.05, 0.005); assertEquals(originalMdl.predict(v), updatedOnSameDS.predict(v), 0.01); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java index 8bb0894b1aa9f..dc2be8536dd0f 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestIntegrationTest.java @@ -24,7 +24,6 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.util.IgniteUtils; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; import org.apache.ignite.ml.dataset.feature.extractor.impl.DoubleArrayVectorizer; @@ -85,7 +84,7 @@ public void testFit() { .withAmountOfTrees(5) .withFeaturesCountSelectionStrgy(x -> 2); - ModelsComposition mdl = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); + RandomForestModel mdl = trainer.fit(ignite, data, new DoubleArrayVectorizer().labeled(1)); assertTrue(mdl.getPredictionsAggregator() instanceof MeanValuePredictionsAggregator); assertEquals(5, mdl.getModels().size()); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java index 8ea027fa0631d..d501dbabcda9c 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java @@ -21,7 +21,6 @@ import java.util.HashMap; import java.util.Map; import org.apache.ignite.ml.common.TrainerTest; -import org.apache.ignite.ml.composition.ModelsComposition; import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator; import org.apache.ignite.ml.dataset.feature.FeatureMeta; import org.apache.ignite.ml.dataset.feature.extractor.impl.LabeledDummyVectorizer; @@ -58,7 +57,7 @@ public void testFit() { .withAmountOfTrees(5) .withFeaturesCountSelectionStrgy(x -> 2); - ModelsComposition mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel mdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); assertTrue(mdl.getPredictionsAggregator() instanceof MeanValuePredictionsAggregator); assertEquals(5, mdl.getModels().size()); } @@ -84,9 +83,9 @@ public void testUpdate() { .withAmountOfTrees(100) .withFeaturesCountSelectionStrgy(x -> 2); - ModelsComposition originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); - ModelsComposition updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); + RandomForestModel originalMdl = trainer.fit(sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnSameDS = trainer.update(originalMdl, sample, parts, new LabeledDummyVectorizer<>()); + RandomForestModel updatedOnEmptyDS = trainer.update(originalMdl, new HashMap>(), parts, new LabeledDummyVectorizer<>()); Vector v = VectorUtils.of(5, 0.5, 0.05, 0.005); assertEquals(originalMdl.predict(v), updatedOnSameDS.predict(v), 0.1); diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java index 0b199ff05463b..0550eca187d31 100644 --- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java +++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java @@ -38,8 +38,8 @@ public void testPredictNextIdCondNodeAtTreeCorner() { TreeNode node = new TreeNode(5, 1); assertEquals(TreeNode.Type.UNKNOWN, node.getType()); - assertEquals(5, node.predictNextNodeKey(features1).nodeId()); - assertEquals(5, node.predictNextNodeKey(features2).nodeId()); + assertEquals(5, node.predictNextNodeKey(features1).getNodeId()); + assertEquals(5, node.predictNextNodeKey(features2).getNodeId()); } /** */ @@ -49,8 +49,8 @@ public void testPredictNextIdForLeaf() { node.toLeaf(0.5); assertEquals(TreeNode.Type.LEAF, node.getType()); - assertEquals(5, node.predictNextNodeKey(features1).nodeId()); - assertEquals(5, node.predictNextNodeKey(features2).nodeId()); + assertEquals(5, node.predictNextNodeKey(features1).getNodeId()); + assertEquals(5, node.predictNextNodeKey(features2).getNodeId()); } /** */ @@ -60,8 +60,8 @@ public void testPredictNextIdForTree() { root.toConditional(0, 0.1); assertEquals(TreeNode.Type.CONDITIONAL, root.getType()); - assertEquals(2, root.predictNextNodeKey(features1).nodeId()); - assertEquals(3, root.predictNextNodeKey(features2).nodeId()); + assertEquals(2, root.predictNextNodeKey(features1).getNodeId()); + assertEquals(3, root.predictNextNodeKey(features2).getNodeId()); } /** */ @@ -69,7 +69,7 @@ public void testPredictNextIdForTree() { public void testPredictProba() { TreeNode root = new TreeNode(1, 1); List leaves = root.toConditional(0, 0.1); - leaves.forEach(leaf -> leaf.toLeaf(leaf.getId().nodeId() % 2)); + leaves.forEach(leaf -> leaf.toLeaf(leaf.getId().getNodeId() % 2)); assertEquals(TreeNode.Type.CONDITIONAL, root.getType()); assertEquals(0.0, root.predict(features1), 0.001); From dad79aa6ea741d1da6630035ff24dc714b4e7932 Mon Sep 17 00:00:00 2001 From: Peter Ivanov Date: Wed, 2 Dec 2020 14:11:59 +0300 Subject: [PATCH 079/110] IGNITE-13388 Fix apache-ignite deb package dependency on JVM package - Fixes #8191. Signed-off-by: Ilya Kasnacheev --- packaging/deb/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/deb/control b/packaging/deb/control index 40467c0f5a744..0c258f8ab9f7a 100644 --- a/packaging/deb/control +++ b/packaging/deb/control @@ -8,7 +8,7 @@ Package: apache-ignite Architecture: all Section: misc Priority: optional -Depends: openjdk-8-jdk | oracle-java8-installer, systemd, passwd +Depends: openjdk-8-jdk | openjdk-11-jdk | default-jdk | java-sdk, systemd, passwd Description: Apache Ignite In-Memory Computing, Database and Caching Platform Ignite™ is a memory-centric distributed database, caching, and processing platform for transactional, analytical, and streaming workloads, delivering From 77b90b1431a8e3a611e44497729b271f8059d2dc Mon Sep 17 00:00:00 2001 From: Ilya Kasnacheev Date: Wed, 2 Dec 2020 15:05:55 +0300 Subject: [PATCH 080/110] IGNITE-13770 Fix NPE in Ignite.dataRegionMetrics with empty persistent region - Fixes #8506. Signed-off-by: Ilya Kasnacheev --- .../cache/persistence/pagemem/PageMemoryImpl.java | 4 ++-- .../persistence/db/IgnitePdsDataRegionMetricsTest.java | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index c6d4d87f4c795..b0b28bee9d800 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -1829,14 +1829,14 @@ public int activePagesCount() { /** {@inheritDoc} */ @Override public int checkpointBufferPagesCount() { - return checkpointPool.size(); + return checkpointPool == null ? 0 : checkpointPool.size(); } /** * Number of used pages in checkpoint buffer. */ public int checkpointBufferPagesSize() { - return checkpointPool.pages(); + return checkpointPool == null ? 0 : checkpointPool.pages(); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java index 3da0c181340b0..63fea4494f816 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java @@ -89,6 +89,13 @@ public class IgnitePdsDataRegionMetricsTest extends GridCommonAbstractTest { .setMaxSize(MAX_REGION_SIZE) .setPersistenceEnabled(true) .setMetricsEnabled(true)) + .setDataRegionConfigurations( + new DataRegionConfiguration() + .setName("EmptyRegion") + .setInitialSize(INIT_REGION_SIZE) + .setMaxSize(MAX_REGION_SIZE) + .setPersistenceEnabled(true) + .setMetricsEnabled(true)) .setCheckpointFrequency(1000); cfg.setDataStorageConfiguration(memCfg); From 897dca9aae523d992b1b550ced8eb7d571badd03 Mon Sep 17 00:00:00 2001 From: Alexander Lapin Date: Wed, 2 Dec 2020 15:19:19 +0300 Subject: [PATCH 081/110] IGNITE-13640 Added runtime dependencies to opencensus module. Fixes #8406 Signed-off-by: Slava Koptilin --- modules/opencensus/pom.xml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/modules/opencensus/pom.xml b/modules/opencensus/pom.xml index 648c2bcd4d91a..9a3503a0225ce 100644 --- a/modules/opencensus/pom.xml +++ b/modules/opencensus/pom.xml @@ -116,4 +116,27 @@ test + + + + + maven-dependency-plugin + + + copy-libs + package + + copy-dependencies + + + org.apache.ignite + target/libs + runtime + false + + + + + + From bd4fb3c162e17601fa65d108d553937366bb1ea7 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Wed, 2 Dec 2020 15:54:13 +0300 Subject: [PATCH 082/110] IGNITE-13520 Skip generating encryption keys on the client node. (#8317) --- .../encryption/GridEncryptionManager.java | 2 +- .../processors/cache/ClusterCachesInfo.java | 18 ++- .../processors/cache/GridCacheProcessor.java | 2 +- .../EncryptedCacheNodeJoinTest.java | 118 +++++++++++++++++- .../src/test/config/enc/enc-cache-client.xml | 2 +- 5 files changed, 136 insertions(+), 6 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index 368331368659e..2c31dcdf7f34c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -501,7 +501,7 @@ public void onLocalJoin() { /** {@inheritDoc} */ @Override public void collectJoiningNodeData(DiscoveryDataBag dataBag) { - if (dataBag.isJoiningNodeClient()) + if (ctx.clientNode()) return; Set grpIds = grpKeys.groupIds(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 88a9fde320f28..96ca0072e3585 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -1864,9 +1864,10 @@ else if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) /** * @param data Joining node data. + * @param joiningNodeClient Joining node is client flag. * @return Message with error or null if everything was OK. */ - public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData data) { + public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData data, boolean joiningNodeClient) { if (data.hasJoiningNodeData()) { Serializable joiningNodeData = data.joiningNodeData(); @@ -1874,6 +1875,7 @@ public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData CacheJoinNodeDiscoveryData joinData = (CacheJoinNodeDiscoveryData)joiningNodeData; Set problemCaches = null; + Set encClientCaches = null; for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.caches().values()) { CacheConfiguration cfg = cacheInfo.cacheData().config(); @@ -1895,6 +1897,12 @@ public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData problemCaches.add(cfg.getName()); } + else if (joiningNodeClient && cfg.isEncryptionEnabled()) { + if (encClientCaches == null) + encClientCaches = new HashSet<>(); + + encClientCaches.add(cfg.getName()); + } } } @@ -1903,6 +1911,14 @@ public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData "Joining node has caches with data which are not presented on cluster, " + "it could mean that they were already destroyed, to add the node to cluster - " + "remove directories with the caches[", "]")); + + if (!F.isEmpty(encClientCaches)) { + return encClientCaches.stream().collect(Collectors.joining(", ", + "Joining node has encrypted caches which are not presented on the cluster, " + + "encrypted caches configured on client node cannot be started when such node joins " + + "the cluster, these caches can be started manually (dynamically) after node joined" + + "[caches=", "]")); + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index 01124fbb665db..4a1aceb068bf2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -3107,7 +3107,7 @@ private GridCacheSharedContext createSharedContext( if (!cachesInfo.isMergeConfigSupports(node)) return null; - String validationRes = cachesInfo.validateJoiningNodeData(discoData); + String validationRes = cachesInfo.validateJoiningNodeData(discoData, node.isClient()); if (validationRes != null) return new IgniteNodeValidationResult(node.id(), validationRes, validationRes); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java index cdf802bf17f99..2391bdb0643be 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java @@ -17,12 +17,16 @@ package org.apache.ignite.internal.encryption; +import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cluster.ClusterState; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi; +import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause; @@ -42,6 +46,12 @@ public class EncryptedCacheNodeJoinTest extends AbstractEncryptionTest { /** */ private static final String GRID_5 = "grid-5"; + /** */ + private static final String GRID_6 = "grid-6"; + + /** */ + private static final String GRID_7 = "grid-7"; + /** */ public static final String CLIENT = "client"; @@ -76,7 +86,9 @@ public class EncryptedCacheNodeJoinTest extends AbstractEncryptionTest { grid.equals(GRID_2) || grid.equals(GRID_3) || grid.equals(GRID_4) || - grid.equals(GRID_5)) { + grid.equals(GRID_5) || + grid.equals(GRID_6) || + grid.equals(GRID_7)) { KeystoreEncryptionSpi encSpi = new KeystoreEncryptionSpi(); encSpi.setKeyStorePath(grid.equals(GRID_2) ? KEYSTORE_PATH_2 : KEYSTORE_PATH); @@ -98,7 +110,12 @@ protected CacheConfiguration cacheConfiguration(String gridName) { CacheConfiguration ccfg = defaultCacheConfiguration(); ccfg.setName(cacheName()); - ccfg.setEncryptionEnabled(gridName.equals(GRID_0)); + + if (gridName.startsWith(CLIENT) || + gridName.equals(GRID_0) || + gridName.equals(GRID_6) || + gridName.equals(GRID_7)) + ccfg.setEncryptionEnabled(true); return ccfg; } @@ -204,6 +221,103 @@ public void testClientNodeJoin() throws Exception { createEncryptedCache(client, grid0, cacheName(), null); } + /** */ + @Test + public void testClientNodeJoinActiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, true, true); + } + + /** */ + @Test + public void testClientNodeJoinActiveClusterWithExistingStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, true, false); + } + + /** */ + @Test + public void testClientNodeJoinInactiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, false, true); + } + + /** */ + @Test + public void testClientNodeJoinInactiveClusterWithExistingStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(true, false, false); + } + + /** */ + @Test + public void testServerNodeJoinActiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(false, true, true); + } + + /** */ + @Test + public void testServerNodeJoinInactiveClusterWithNewStaticCacheConfig() throws Exception { + checkNodeJoinWithStaticCacheConfig(false, false, true); + } + + /** + * @param client {@code True} to test client node join, {@code False} to test server node join. + * @param activateBeforeJoin {@code True} to activate the server before joining the client node. + * @param newCfg {@code True} to configure cache on the last joined node. {@code False} to configure on all nodes. + */ + private void checkNodeJoinWithStaticCacheConfig( + boolean client, + boolean activateBeforeJoin, + boolean newCfg + ) throws Exception { + if (!newCfg) + configureCache = true; + + startGrid(GRID_0); + startGrid(GRID_6); + + IgniteEx client1 = startClientGrid("client1"); + + if (newCfg) + configureCache = true; + + if (activateBeforeJoin) + grid(GRID_0).cluster().state(ClusterState.ACTIVE); + + if (client && newCfg) { + String expErrMsg = "Joining node has encrypted caches which are not presented on the cluster, " + + "encrypted caches configured on client node cannot be started when such node joins " + + "the cluster, these caches can be started manually (dynamically) after node is joined " + + "[caches=" + cacheName() + ']'; + + GridTestUtils.assertThrowsAnyCause(log, () -> startClientGrid(CLIENT), IgniteSpiException.class, expErrMsg); + + return; + } + + IgniteEx node = client ? startClientGrid(CLIENT) : startGrid(GRID_7); + + if (!activateBeforeJoin) + grid(GRID_0).cluster().state(ClusterState.ACTIVE); + + awaitPartitionMapExchange(); + + IgniteCache cache = node.cache(cacheName()); + + assertNotNull(cache); + + for (long i = 0; i < 100; i++) + cache.put(i, String.valueOf(i)); + + checkEncryptedCaches(grid(GRID_0), grid(GRID_6)); + checkEncryptedCaches(grid(GRID_0), client1); + checkData(client1); + + if (client) { + checkEncryptedCaches(grid(GRID_0), grid(CLIENT)); + checkData(grid(CLIENT)); + } + else + checkEncryptedCaches(grid(GRID_7), grid(GRID_0)); + } + /** */ @Test public void testNodeCantJoinWithSameNameButNotEncCache() throws Exception { diff --git a/modules/spring/src/test/config/enc/enc-cache-client.xml b/modules/spring/src/test/config/enc/enc-cache-client.xml index ba4068a4606a7..6ebef07a3a84d 100644 --- a/modules/spring/src/test/config/enc/enc-cache-client.xml +++ b/modules/spring/src/test/config/enc/enc-cache-client.xml @@ -23,7 +23,7 @@ http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd"> - + From 338165afadd3b6979b4655ee2f03f3b9c2228236 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Wed, 2 Dec 2020 19:39:25 +0300 Subject: [PATCH 083/110] IGNITE-13496 Java thin: make async API non-blocking with GridNioServer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor Java Thin Client to use GridNioServer in client mode: * Client threads are never blocked * Single worker thread is shared across all connections within `IgniteClient` Benchmark results (i7-9700K, Ubuntu 20.04.1, JDK 1.8.0_275): Before Benchmark Mode Cnt Score Error Units JmhThinClientCacheBenchmark.get thrpt 10 65916.805 ± 2118.954 ops/s JmhThinClientCacheBenchmark.put thrpt 10 62304.444 ± 2521.371 ops/s After Benchmark Mode Cnt Score Error Units JmhThinClientCacheBenchmark.get thrpt 10 92501.557 ± 1380.384 ops/s JmhThinClientCacheBenchmark.put thrpt 10 82907.446 ± 7572.537 ops/s --- .../thin/JmhThinClientAbstractBenchmark.java | 135 ++++ .../jmh/thin/JmhThinClientCacheBenchmark.java | 81 +++ .../streams/BinaryByteBufferInputStream.java | 91 +-- .../client/thin/ClientComputeImpl.java | 7 +- .../internal/client/thin/ClientSslUtils.java | 293 ++++++++ .../client/thin/NotificationListener.java | 4 +- .../client/thin/PayloadInputChannel.java | 8 +- .../internal/client/thin/ReliableChannel.java | 63 +- .../client/thin/TcpClientChannel.java | 679 ++---------------- .../internal/client/thin/TcpIgniteClient.java | 27 +- .../client/thin/io/ClientConnection.java | 39 + .../thin/io/ClientConnectionMultiplexer.java | 52 ++ .../thin/io/ClientConnectionStateHandler.java | 31 + .../client/thin/io/ClientMessageDecoder.java | 92 +++ .../client/thin/io/ClientMessageHandler.java | 31 + .../GridNioClientConnection.java | 93 +++ .../GridNioClientConnectionMultiplexer.java | 147 ++++ .../gridnioserver/GridNioClientListener.java | 73 ++ .../io/gridnioserver/GridNioClientParser.java | 59 ++ .../client/ConnectToStartingNodeTest.java | 18 +- .../ignite/client/SslParametersTest.java | 4 +- .../client/thin/ReliableChannelTest.java | 9 +- ...nClientAbstractPartitionAwarenessTest.java | 9 +- ...PartitionAwarenessResourceReleaseTest.java | 14 +- 24 files changed, 1298 insertions(+), 761 deletions(-) create mode 100644 modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientAbstractBenchmark.java create mode 100644 modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientCacheBenchmark.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientSslUtils.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnection.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionMultiplexer.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionStateHandler.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageDecoder.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageHandler.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnection.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnectionMultiplexer.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientListener.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientParser.java diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientAbstractBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientAbstractBenchmark.java new file mode 100644 index 0000000000000..6b6dc53dffd54 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientAbstractBenchmark.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.thin; + +import java.util.stream.IntStream; + +import org.apache.ignite.Ignite; +import org.apache.ignite.Ignition; +import org.apache.ignite.client.ClientCache; +import org.apache.ignite.client.IgniteClient; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.benchmarks.jmh.JmhAbstractBenchmark; +import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; + +/** + * Base class for thin client benchmarks. + */ +@State(Scope.Benchmark) +public abstract class JmhThinClientAbstractBenchmark extends JmhAbstractBenchmark { + /** Property: nodes count. */ + protected static final String PROP_DATA_NODES = "ignite.jmh.thin.dataNodes"; + + /** Default amount of nodes. */ + protected static final int DFLT_DATA_NODES = 4; + + /** Items count. */ + protected static final int CNT = 1000; + + /** Cache value. */ + protected static final byte[] PAYLOAD = new byte[1000]; + + /** IP finder shared across nodes. */ + private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** Default cache name. */ + private static final String DEFAULT_CACHE_NAME = "default"; + + /** Target node. */ + protected Ignite node; + + /** Target cache. */ + protected ClientCache cache; + + /** Thin client. */ + protected IgniteClient client; + + /** + * Setup routine. Child classes must invoke this method first. + * + */ + @Setup + public void setup() { + System.out.println(); + System.out.println("--------------------"); + System.out.println("IGNITE BENCHMARK INFO: "); + System.out.println("\tdata nodes: " + intProperty(PROP_DATA_NODES, DFLT_DATA_NODES)); + System.out.println("--------------------"); + System.out.println(); + + int nodesCnt = intProperty(PROP_DATA_NODES, DFLT_DATA_NODES); + + A.ensure(nodesCnt >= 1, "nodesCnt >= 1"); + + node = Ignition.start(configuration("node0")); + + for (int i = 1; i < nodesCnt; i++) + Ignition.start(configuration("node" + i)); + + String[] addrs = IntStream + .range(10800, 10800 + nodesCnt) + .mapToObj(p -> "127.0.0.1:" + p) + .toArray(String[]::new); + + ClientConfiguration cfg = new ClientConfiguration() + .setAddresses(addrs) + .setPartitionAwarenessEnabled(true); + + client = Ignition.startClient(cfg); + + cache = client.getOrCreateCache(DEFAULT_CACHE_NAME); + + System.out.println("Loading test data..."); + + for (int i = 0; i < CNT; i++) + cache.put(i, PAYLOAD); + + System.out.println("Test data loaded: " + CNT); + } + + /** + * Tear down routine. + * + */ + @TearDown + public void tearDown() throws Exception { + client.close(); + Ignition.stopAll(true); + } + + /** + * Create Ignite configuration. + * + * @param igniteInstanceName Ignite instance name. + * @return Configuration. + */ + protected IgniteConfiguration configuration(String igniteInstanceName) { + + return new IgniteConfiguration() + .setIgniteInstanceName(igniteInstanceName) + .setLocalHost("127.0.0.1") + .setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER)); + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientCacheBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientCacheBenchmark.java new file mode 100644 index 0000000000000..88e6a87171d9c --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/thin/JmhThinClientCacheBenchmark.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.thin; + +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +import org.apache.ignite.internal.benchmarks.jmh.runner.JmhIdeBenchmarkRunner; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Mode; + +/** + * Thin client cache benchmark. + * + * Results on i7-9700K, Ubuntu 20.04.1, JDK 1.8.0_275: + * Benchmark Mode Cnt Score Error Units + * JmhThinClientCacheBenchmark.get thrpt 10 92501.557 ± 1380.384 ops/s + * JmhThinClientCacheBenchmark.put thrpt 10 82907.446 ± 7572.537 ops/s + * + * JmhThinClientCacheBenchmark.get avgt 10 41.505 ± 1.018 us/op + * JmhThinClientCacheBenchmark.put avgt 10 44.623 ± 0.779 us/op + */ +public class JmhThinClientCacheBenchmark extends JmhThinClientAbstractBenchmark { + /** + * Cache put benchmark. + */ + @Benchmark + public void put() { + int key = ThreadLocalRandom.current().nextInt(CNT); + + cache.put(key, PAYLOAD); + } + + /** + * Cache get benchmark. + */ + @Benchmark + public Object get() { + int key = ThreadLocalRandom.current().nextInt(CNT); + + return cache.get(key); + } + + /** + * Run benchmarks. + * + * @param args Arguments. + * @throws Exception If failed. + */ + public static void main(String[] args) throws Exception { + JmhIdeBenchmarkRunner runner = JmhIdeBenchmarkRunner.create() + .forks(1) + .threads(4) + .benchmarks(JmhThinClientCacheBenchmark.class.getSimpleName()) + .jvmArguments("-Xms4g", "-Xmx4g"); + + runner + .benchmarkModes(Mode.Throughput) + .run(); + + runner + .benchmarkModes(Mode.AverageTime) + .outputTimeUnit(TimeUnit.MICROSECONDS) + .run(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java index d277948212f1d..fe138e6507a33 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryByteBufferInputStream.java @@ -18,14 +18,14 @@ package org.apache.ignite.internal.binary.streams; import java.nio.ByteBuffer; -import org.apache.ignite.binary.BinaryObjectException; +import java.util.Arrays; /** - * + * Input stream over {@link ByteBuffer}. */ public class BinaryByteBufferInputStream implements BinaryInputStream { /** */ - private ByteBuffer buf; + private final ByteBuffer buf; /** * @param buf Buffer to wrap. @@ -44,15 +44,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public byte readByte() { - ensureHasData(1); - return buf.get(); } /** {@inheritDoc} */ @Override public byte[] readByteArray(int cnt) { - ensureHasData(cnt); - byte[] data = new byte[cnt]; buf.get(data); @@ -62,22 +58,16 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public int read(byte[] arr, int off, int cnt) { - ensureHasData(cnt); - return 0; } /** {@inheritDoc} */ @Override public boolean readBoolean() { - ensureHasData(1); - - return false; + return readByte() == 1; } /** {@inheritDoc} */ @Override public boolean[] readBooleanArray(int cnt) { - ensureHasData(cnt); - boolean[] res = new boolean[cnt]; for (int i = 0; i < cnt; i++) @@ -88,15 +78,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public short readShort() { - ensureHasData(2); - return buf.getShort(); } /** {@inheritDoc} */ @Override public short[] readShortArray(int cnt) { - ensureHasData(2 * cnt); - short[] res = new short[cnt]; for (int i = 0; i < cnt; i++) @@ -107,15 +93,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public char readChar() { - ensureHasData(2); - return buf.getChar(); } /** {@inheritDoc} */ @Override public char[] readCharArray(int cnt) { - ensureHasData(2 * cnt); - char[] res = new char[cnt]; for (int i = 0; i < cnt; i++) @@ -126,15 +108,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public int readInt() { - ensureHasData(4); - return buf.getInt(); } /** {@inheritDoc} */ @Override public int[] readIntArray(int cnt) { - ensureHasData(4 * cnt); - int[] res = new int[cnt]; for (int i = 0; i < cnt; i++) @@ -145,15 +123,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public float readFloat() { - ensureHasData(4); - return buf.getFloat(); } /** {@inheritDoc} */ @Override public float[] readFloatArray(int cnt) { - ensureHasData(4 * cnt); - float[] res = new float[cnt]; for (int i = 0; i < cnt; i++) @@ -164,15 +138,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public long readLong() { - ensureHasData(8); - return buf.getLong(); } /** {@inheritDoc} */ @Override public long[] readLongArray(int cnt) { - ensureHasData(8 * cnt); - long[] res = new long[cnt]; for (int i = 0; i < cnt; i++) @@ -183,15 +153,11 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public double readDouble() { - ensureHasData(8); - return buf.getDouble(); } /** {@inheritDoc} */ @Override public double[] readDoubleArray(int cnt) { - ensureHasData(8 * cnt); - double[] res = new double[cnt]; for (int i = 0; i < cnt; i++) @@ -207,47 +173,17 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public byte readBytePositioned(int pos) { - int oldPos = buf.position(); - - buf.position(pos); - - ensureHasData(1); - - byte res = buf.get(); - - buf.position(oldPos); - - return res; + return buf.get(pos); } /** {@inheritDoc} */ @Override public short readShortPositioned(int pos) { - int oldPos = buf.position(); - - buf.position(pos); - - ensureHasData(2); - - short res = buf.getShort(); - - buf.position(oldPos); - - return res; + return buf.getShort(pos); } /** {@inheritDoc} */ @Override public int readIntPositioned(int pos) { - int oldPos = buf.position(); - - buf.position(pos); - - ensureHasData(4); - - byte res = buf.get(); - - buf.position(oldPos); - - return res; + return buf.getInt(pos); } /** {@inheritDoc} */ @@ -277,7 +213,9 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { /** {@inheritDoc} */ @Override public byte[] arrayCopy() { - return buf.array(); + byte[] arr = buf.array(); + + return Arrays.copyOf(arr, arr.length); } /** {@inheritDoc} */ @@ -289,13 +227,4 @@ public static BinaryByteBufferInputStream create(ByteBuffer buf) { @Override public boolean hasArray() { return false; } - - /** - * @param cnt Remaining bytes. - */ - private void ensureHasData(int cnt) { - if (buf.remaining() < cnt) - throw new BinaryObjectException("Not enough data to read the value " + - "[requiredBytes=" + cnt + ", remainingBytes=" + buf.remaining() + ']'); - } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java index d4cb4153d1f96..65d1c2d618e03 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientComputeImpl.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.client.thin; +import java.nio.ByteBuffer; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -40,7 +41,7 @@ import org.apache.ignite.client.IgniteClientFuture; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.binary.BinaryRawWriterEx; -import org.apache.ignite.internal.binary.streams.BinaryHeapInputStream; +import org.apache.ignite.internal.binary.streams.BinaryByteBufferInputStream; import org.apache.ignite.internal.processors.platform.client.ClientStatus; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -353,11 +354,11 @@ private void writeExecuteTaskRequest( ClientChannel ch, ClientOperation op, long rsrcId, - byte[] payload, + ByteBuffer payload, Exception err ) { if (op == ClientOperation.COMPUTE_TASK_FINISHED) { - Object res = payload == null ? null : utils.readObject(new BinaryHeapInputStream(payload), false); + Object res = payload == null ? null : utils.readObject(BinaryByteBufferInputStream.create(payload), false); ClientComputeTask task = addTask(ch, rsrcId); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientSslUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientSslUtils.java new file mode 100644 index 0000000000000..4f964d86908f4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientSslUtils.java @@ -0,0 +1,293 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.function.BiFunction; +import java.util.function.Predicate; +import java.util.stream.Stream; +import javax.cache.configuration.Factory; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; + +import org.apache.ignite.client.SslMode; +import org.apache.ignite.client.SslProtocol; +import org.apache.ignite.configuration.ClientConfiguration; + +import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; + +public class ClientSslUtils { + /** */ + public static final char[] EMPTY_CHARS = new char[0]; + + /** Trust manager ignoring all certificate checks. */ + private static final TrustManager ignoreErrorsTrustMgr = new X509TrustManager() { + /** */ + @Override public X509Certificate[] getAcceptedIssuers() { + return null; + } + + /** */ + @Override public void checkServerTrusted(X509Certificate[] arg0, String arg1) { + // No-op. + } + + /** */ + @Override public void checkClientTrusted(X509Certificate[] arg0, String arg1) { + // No-op. + } + }; + + /** + * Gets SSL context for the given client configuration. + * + * @param cfg Configuration. + * @return {@link SSLContext} when SSL is enabled in the configuration; null otherwise. + */ + public static SSLContext getSslContext(ClientConfiguration cfg) { + if (cfg.getSslMode() == SslMode.DISABLED) + return null; + + Factory sslCtxFactory = cfg.getSslContextFactory(); + + if (sslCtxFactory != null) { + try { + return sslCtxFactory.create(); + } + catch (Exception e) { + throw new ClientError("SSL Context Factory failed", e); + } + } + + BiFunction or = (val, dflt) -> val == null || val.isEmpty() ? dflt : val; + + String keyStore = or.apply( + cfg.getSslClientCertificateKeyStorePath(), + System.getProperty("javax.net.ssl.keyStore") + ); + + String keyStoreType = or.apply( + cfg.getSslClientCertificateKeyStoreType(), + or.apply(System.getProperty("javax.net.ssl.keyStoreType"), DFLT_STORE_TYPE) + ); + + String keyStorePwd = or.apply( + cfg.getSslClientCertificateKeyStorePassword(), + System.getProperty("javax.net.ssl.keyStorePassword") + ); + + String trustStore = or.apply( + cfg.getSslTrustCertificateKeyStorePath(), + System.getProperty("javax.net.ssl.trustStore") + ); + + String trustStoreType = or.apply( + cfg.getSslTrustCertificateKeyStoreType(), + or.apply(System.getProperty("javax.net.ssl.trustStoreType"), DFLT_STORE_TYPE) + ); + + String trustStorePwd = or.apply( + cfg.getSslTrustCertificateKeyStorePassword(), + System.getProperty("javax.net.ssl.trustStorePassword") + ); + + String algorithm = or.apply(cfg.getSslKeyAlgorithm(), DFLT_KEY_ALGORITHM); + + String proto = toString(cfg.getSslProtocol()); + + if (Stream.of(keyStore, keyStorePwd, keyStoreType, trustStore, trustStorePwd, trustStoreType) + .allMatch(s -> s == null || s.isEmpty()) + ) { + try { + return SSLContext.getDefault(); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Default SSL context cryptographic algorithm is not available", e); + } + } + + KeyManager[] keyManagers = getKeyManagers(algorithm, keyStore, keyStoreType, keyStorePwd); + + TrustManager[] trustManagers = cfg.isSslTrustAll() ? + new TrustManager[] {ignoreErrorsTrustMgr} : + getTrustManagers(algorithm, trustStore, trustStoreType, trustStorePwd); + + try { + SSLContext sslCtx = SSLContext.getInstance(proto); + + sslCtx.init(keyManagers, trustManagers, null); + + return sslCtx; + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("SSL context cryptographic algorithm is not available", e); + } + catch (KeyManagementException e) { + throw new ClientError("Failed to create SSL Context", e); + } + } + + /** + * @return String representation of {@link SslProtocol} as required by {@link SSLContext}. + */ + private static String toString(SslProtocol proto) { + switch (proto) { + case TLSv1_1: + return "TLSv1.1"; + + case TLSv1_2: + return "TLSv1.2"; + + default: + return proto.toString(); + } + } + + /** */ + private static KeyManager[] getKeyManagers( + String algorithm, + String keyStore, + String keyStoreType, + String keyStorePwd + ) { + KeyManagerFactory keyMgrFactory; + + try { + keyMgrFactory = KeyManagerFactory.getInstance(algorithm); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Key manager cryptographic algorithm is not available", e); + } + + Predicate empty = s -> s == null || s.isEmpty(); + + if (!empty.test(keyStore) && !empty.test(keyStoreType)) { + char[] pwd = (keyStorePwd == null) ? EMPTY_CHARS : keyStorePwd.toCharArray(); + + KeyStore store = loadKeyStore("Client", keyStore, keyStoreType, pwd); + + try { + keyMgrFactory.init(store, pwd); + } + catch (UnrecoverableKeyException e) { + throw new ClientError("Could not recover key store key", e); + } + catch (KeyStoreException e) { + throw new ClientError( + String.format("Client key store provider of type [%s] is not available", keyStoreType), + e + ); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Client key store integrity check algorithm is not available", e); + } + } + + return keyMgrFactory.getKeyManagers(); + } + + /** */ + private static TrustManager[] getTrustManagers( + String algorithm, + String trustStore, + String trustStoreType, + String trustStorePwd + ) { + TrustManagerFactory trustMgrFactory; + + try { + trustMgrFactory = TrustManagerFactory.getInstance(algorithm); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError("Trust manager cryptographic algorithm is not available", e); + } + + Predicate empty = s -> s == null || s.isEmpty(); + + if (!empty.test(trustStore) && !empty.test(trustStoreType)) { + char[] pwd = (trustStorePwd == null) ? EMPTY_CHARS : trustStorePwd.toCharArray(); + + KeyStore store = loadKeyStore("Trust", trustStore, trustStoreType, pwd); + + try { + trustMgrFactory.init(store); + } + catch (KeyStoreException e) { + throw new ClientError( + String.format("Trust key store provider of type [%s] is not available", trustStoreType), + e + ); + } + } + + return trustMgrFactory.getTrustManagers(); + } + + /** */ + private static KeyStore loadKeyStore(String lb, String path, String type, char[] pwd) { + KeyStore store; + + try { + store = KeyStore.getInstance(type); + } + catch (KeyStoreException e) { + throw new ClientError( + String.format("%s key store provider of type [%s] is not available", lb, type), + e + ); + } + + try (InputStream in = new FileInputStream(new File(path))) { + + store.load(in, pwd); + + return store; + } + catch (FileNotFoundException e) { + throw new ClientError(String.format("%s key store file [%s] does not exist", lb, path), e); + } + catch (NoSuchAlgorithmException e) { + throw new ClientError( + String.format("%s key store integrity check algorithm is not available", lb), + e + ); + } + catch (CertificateException e) { + throw new ClientError(String.format("Could not load certificate from %s key store", lb), e); + } + catch (IOException e) { + throw new ClientError(String.format("Could not read %s key store", lb), e); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java index ae1b7fac9b473..3aee48304efa0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/NotificationListener.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.client.thin; +import java.nio.ByteBuffer; + /** * Server to client notification listener. */ @@ -30,5 +32,5 @@ interface NotificationListener { * @param payload Notification payload or {@code null} if there is no payload. * @param err Error. */ - public void acceptNotification(ClientChannel ch, ClientOperation op, long rsrcId, byte[] payload, Exception err); + public void acceptNotification(ClientChannel ch, ClientOperation op, long rsrcId, ByteBuffer payload, Exception err); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java index 76af7f2ae92e2..f9d5978f4ec67 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/PayloadInputChannel.java @@ -17,7 +17,9 @@ package org.apache.ignite.internal.client.thin; -import org.apache.ignite.internal.binary.streams.BinaryHeapInputStream; +import java.nio.ByteBuffer; + +import org.apache.ignite.internal.binary.streams.BinaryByteBufferInputStream; import org.apache.ignite.internal.binary.streams.BinaryInputStream; /** @@ -33,8 +35,8 @@ class PayloadInputChannel { /** * Constructor. */ - PayloadInputChannel(ClientChannel ch, byte[] payload) { - in = new BinaryHeapInputStream(payload); + PayloadInputChannel(ClientChannel ch, ByteBuffer payload) { + in = BinaryByteBufferInputStream.create(payload); this.ch = ch; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java index e7005be764d54..195088de3cd40 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.client.thin; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -31,13 +32,11 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -51,26 +50,21 @@ import org.apache.ignite.client.IgniteClientFuture; import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; +import org.apache.ignite.internal.client.thin.io.gridnioserver.GridNioClientConnectionMultiplexer; import org.apache.ignite.internal.util.HostAndPortRange; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; -import org.jetbrains.annotations.NotNull; /** * Communication channel with failover and partition awareness. */ final class ReliableChannel implements AutoCloseable, NotificationListener { - /** Timeout to wait for executor service to shutdown (in milliseconds). */ - private static final long EXECUTOR_SHUTDOWN_TIMEOUT = 10_000L; - /** Do nothing helper function. */ private static final Consumer DO_NOTHING = (v) -> {}; - /** Async runner thread name. */ - static final String ASYNC_RUNNER_THREAD_NAME = "thin-client-channel-async-init"; - /** Channel factory. */ - private final Function chFactory; + private final BiFunction chFactory; /** Client channel holders for each configured address. */ private volatile List channels; @@ -96,19 +90,6 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { /** Listeners of channel close events. */ private final Collection> channelCloseLsnrs = new CopyOnWriteArrayList<>(); - /** Async tasks thread pool. */ - private final ExecutorService asyncRunner = Executors.newSingleThreadExecutor( - new ThreadFactory() { - @Override public Thread newThread(@NotNull Runnable r) { - Thread thread = new Thread(r, ASYNC_RUNNER_THREAD_NAME); - - thread.setDaemon(true); - - return thread; - } - } - ); - /** Channels reinit was scheduled. */ private final AtomicBoolean scheduledChannelsReinit = new AtomicBoolean(); @@ -130,6 +111,9 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { /** Guard channels and curChIdx together. */ private final ReadWriteLock curChannelsGuard = new ReentrantReadWriteLock(); + /** Connection manager. */ + private final ClientConnectionMultiplexer connMgr; + /** Cache addresses returned by {@code ThinClientAddressFinder}. */ private volatile String[] prevHostAddrs; @@ -137,9 +121,9 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { * Constructor. */ ReliableChannel( - Function chFactory, - ClientConfiguration clientCfg, - IgniteBinary binary + BiFunction chFactory, + ClientConfiguration clientCfg, + IgniteBinary binary ) { if (chFactory == null) throw new NullPointerException("chFactory"); @@ -153,20 +137,16 @@ final class ReliableChannel implements AutoCloseable, NotificationListener { partitionAwarenessEnabled = clientCfg.isPartitionAwarenessEnabled(); affinityCtx = new ClientCacheAffinityContext(binary); + + connMgr = new GridNioClientConnectionMultiplexer(clientCfg); + connMgr.start(); } /** {@inheritDoc} */ @Override public synchronized void close() { closed = true; - asyncRunner.shutdown(); - - try { - asyncRunner.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, TimeUnit.MILLISECONDS); - } - catch (InterruptedException ignore) { - // No-op. - } + connMgr.stop(); List holders = channels; @@ -430,7 +410,7 @@ public void addChannelCloseListener(Consumer lsnr) { ClientChannel ch, ClientOperation op, long rsrcId, - byte[] payload, + ByteBuffer payload, Exception err ) { for (NotificationListener lsnr : notificationLsnrs) { @@ -579,7 +559,7 @@ private void onChannelFailure(ClientChannelHolder hld, ClientChannel ch) { * Asynchronously try to establish a connection to all configured servers. */ private void initAllChannelsAsync() { - asyncRunner.submit( + ForkJoinPool.commonPool().submit( () -> { List holders = channels; @@ -608,7 +588,7 @@ private void onTopologyChanged(ClientChannel ch) { if (scheduledChannelsReinit.compareAndSet(false, true)) { // If partition awareness is disabled then only schedule and wait for the default channel to fail. if (partitionAwarenessEnabled) - asyncRunner.submit(this::channelsInit); + ForkJoinPool.commonPool().submit(this::channelsInit); } } } @@ -867,6 +847,7 @@ private int getRetryLimit() { /** * Channels holder. */ + @SuppressWarnings("PackageVisibleInnerClass") // Visible for tests. class ClientChannelHolder { /** Channel configuration. */ private final ClientChannelConfiguration chCfg; @@ -937,7 +918,7 @@ private ClientChannel getOrCreateChannel(boolean ignoreThrottling) if (!ignoreThrottling && applyReconnectionThrottling()) throw new ClientConnectionException("Reconnect is not allowed due to applied throttling"); - ClientChannel channel = chFactory.apply(chCfg); + ClientChannel channel = chFactory.apply(chCfg, connMgr); if (channel.serverNodeId() != null) { channel.addTopologyChangeListener(ReliableChannel.this::onTopologyChanged); @@ -1008,6 +989,7 @@ InetSocketAddress getAddress() { /** * Get holders reference. For test purposes. */ + @SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") // For tests. List getChannelHolders() { return channels; } @@ -1015,6 +997,7 @@ List getChannelHolders() { /** * Get node channels reference. For test purposes. */ + @SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") // For tests. Map getNodeChannels() { return nodeChannels; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java index 25df909af4fee..109c2a9b0a08c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java @@ -17,21 +17,9 @@ package org.apache.ignite.internal.client.thin; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; import java.net.InetSocketAddress; -import java.net.Socket; -import java.security.KeyManagementException; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.UnrecoverableKeyException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; @@ -44,22 +32,8 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Stream; -import javax.cache.configuration.Factory; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSocket; -import javax.net.ssl.SSLSocketFactory; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.X509TrustManager; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.client.ClientAuthenticationException; import org.apache.ignite.client.ClientAuthorizationException; @@ -67,19 +41,20 @@ import org.apache.ignite.client.ClientException; import org.apache.ignite.client.ClientFeatureNotSupportedByServerException; import org.apache.ignite.client.ClientReconnectedException; -import org.apache.ignite.client.SslMode; -import org.apache.ignite.client.SslProtocol; import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.binary.BinaryCachingMetadataHandler; import org.apache.ignite.internal.binary.BinaryContext; -import org.apache.ignite.internal.binary.BinaryPrimitives; import org.apache.ignite.internal.binary.BinaryReaderExImpl; import org.apache.ignite.internal.binary.BinaryWriterExImpl; -import org.apache.ignite.internal.binary.streams.BinaryHeapInputStream; +import org.apache.ignite.internal.binary.streams.BinaryByteBufferInputStream; import org.apache.ignite.internal.binary.streams.BinaryHeapOutputStream; import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; +import org.apache.ignite.internal.client.thin.io.ClientConnection; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; +import org.apache.ignite.internal.client.thin.io.ClientConnectionStateHandler; +import org.apache.ignite.internal.client.thin.io.ClientMessageHandler; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.odbc.ClientListenerNioListener; import org.apache.ignite.internal.processors.odbc.ClientListenerRequest; @@ -103,19 +78,14 @@ import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.AUTHORIZATION; import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.BITMAP_FEATURES; import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.PARTITION_AWARENESS; -import static org.apache.ignite.ssl.SslContextFactory.DFLT_KEY_ALGORITHM; -import static org.apache.ignite.ssl.SslContextFactory.DFLT_STORE_TYPE; /** * Implements {@link ClientChannel} over TCP. */ -class TcpClientChannel implements ClientChannel { +class TcpClientChannel implements ClientChannel, ClientMessageHandler, ClientConnectionStateHandler { /** Protocol version used by default on first connection attempt. */ private static final ProtocolVersion DEFAULT_VERSION = LATEST_VER; - /** Receiver thread prefix. */ - static final String RECEIVER_THREAD_PREFIX = "thin-client-channel#"; - /** Supported protocol versions. */ private static final Collection supportedVers = Arrays.asList( V1_7_0, @@ -128,30 +98,24 @@ class TcpClientChannel implements ClientChannel { V1_0_0 ); + /** Preallocated empty bytes. */ + public static final byte[] EMPTY_BYTES = new byte[0]; + /** Protocol context. */ - private ProtocolContext protocolCtx; + private volatile ProtocolContext protocolCtx; /** Server node ID. */ - private UUID srvNodeId; + private volatile UUID srvNodeId; /** Server topology version. */ - private AffinityTopologyVersion srvTopVer; + private volatile AffinityTopologyVersion srvTopVer; /** Channel. */ - private final Socket sock; - - /** Output stream. */ - private final OutputStream out; - - /** Data input. */ - private final ByteCountingDataInput dataInput; + private final ClientConnection sock; /** Request id. */ private final AtomicLong reqId = new AtomicLong(1); - /** Send lock. */ - private final Lock sndLock = new ReentrantLock(); - /** Pending requests. */ private final Map pendingReqs = new ConcurrentHashMap<>(); @@ -167,14 +131,11 @@ class TcpClientChannel implements ClientChannel { /** Executor for async operation listeners. */ private final Executor asyncContinuationExecutor; - /** Receiver thread (processes incoming messages). */ - private Thread receiverThread; - /** Send/receive timeout in milliseconds. */ private final int timeout; /** Constructor. */ - TcpClientChannel(ClientChannelConfiguration cfg) + TcpClientChannel(ClientChannelConfiguration cfg, ClientConnectionMultiplexer connMgr) throws ClientConnectionException, ClientAuthenticationException, ClientProtocolError { validateConfiguration(cfg); @@ -183,21 +144,9 @@ class TcpClientChannel implements ClientChannel { timeout = cfg.getTimeout(); - try { - sock = createSocket(cfg); - - out = sock.getOutputStream(); - dataInput = new ByteCountingDataInput(sock.getInputStream()); - - handshake(DEFAULT_VERSION, cfg.getUserName(), cfg.getUserPassword(), cfg.getUserAttributes()); + sock = connMgr.open(cfg.getAddress(), this, this); - // Disable timeout on socket after handshake, instead, get future result with timeout in "receive" method. - if (timeout > 0) - sock.setSoTimeout(0); - } - catch (IOException e) { - throw handleIOError("addr=" + cfg.getAddress(), e); - } + handshake(DEFAULT_VERSION, cfg.getUserName(), cfg.getUserPassword(), cfg.getUserAttributes()); } /** {@inheritDoc} */ @@ -205,28 +154,25 @@ class TcpClientChannel implements ClientChannel { close(null); } + /** {@inheritDoc} */ + @Override public void onMessage(ByteBuffer buf) { + processNextMessage(buf); + } + + /** {@inheritDoc} */ + @Override public void onDisconnected(@Nullable Exception e) { + close(e); + } + /** * Close the channel with cause. */ private void close(Throwable cause) { if (closed.compareAndSet(false, true)) { - U.closeQuiet(dataInput); - U.closeQuiet(out); U.closeQuiet(sock); - sndLock.lock(); // Lock here to prevent creation of new pending requests. - - try { - for (ClientRequestFuture pendingReq : pendingReqs.values()) - pendingReq.onDone(new ClientConnectionException("Channel is closed", cause)); - - if (receiverThread != null) - receiverThread.interrupt(); - } - finally { - sndLock.unlock(); - } - + for (ClientRequestFuture pendingReq : pendingReqs.values()) + pendingReq.onDone(new ClientConnectionException("Channel is closed", cause)); } } @@ -251,7 +197,8 @@ private void close(Throwable cause) { ClientRequestFuture fut = send(op, payloadWriter); return receiveAsync(fut, payloadReader); - } catch (Throwable t) { + } + catch (Throwable t) { CompletableFuture fut = new CompletableFuture<>(); fut.completeExceptionally(t); @@ -268,15 +215,10 @@ private ClientRequestFuture send(ClientOperation op, Consumer T receive(ClientRequestFuture pendingReq, Function payloadReader) throws ClientException { try { - byte[] payload = timeout > 0 ? pendingReq.get(timeout) : pendingReq.get(); + ByteBuffer payload = timeout > 0 ? pendingReq.get(timeout) : pendingReq.get(); if (payload == null || payloadReader == null) return null; @@ -338,7 +278,7 @@ private CompletableFuture receiveAsync(ClientRequestFuture pendingReq, Fu pendingReq.listen(payloadFut -> asyncContinuationExecutor.execute(() -> { try { - byte[] payload = payloadFut.get(); + ByteBuffer payload = payloadFut.get(); if (payload == null || payloadReader == null) fut.complete(null); @@ -346,7 +286,8 @@ private CompletableFuture receiveAsync(ClientRequestFuture pendingReq, Fu T res = payloadReader.apply(new PayloadInputChannel(this, payload)); fut.complete(res); } - } catch (Throwable t) { + } + catch (Throwable t) { fut.completeExceptionally(convertException(t)); } })); @@ -388,59 +329,30 @@ private RuntimeException convertException(Throwable e) { return new ClientException(e.getMessage(), e); } - /** - * Init and start receiver thread if it wasn't started before. - * - * Note: Method should be called only under external synchronization. - */ - private void initReceiverThread() { - if (receiverThread == null) { - Socket sock = this.sock; - - String sockInfo = sock == null ? null : sock.getInetAddress().getHostName() + ":" + sock.getPort(); - - receiverThread = new Thread(() -> { - try { - while (!closed()) - processNextMessage(); - } - catch (Throwable e) { - close(e); - } - }, RECEIVER_THREAD_PREFIX + sockInfo); - - receiverThread.setDaemon(true); - - receiverThread.start(); - } - } - /** * Process next message from the input stream and complete corresponding future. */ - private void processNextMessage() throws ClientProtocolError, ClientConnectionException { - // blocking read a message header not to fall into a busy loop - int msgSize = dataInput.readInt(2048); - - if (msgSize <= 0) - throw new ClientProtocolError(String.format("Invalid message size: %s", msgSize)); + private void processNextMessage(ByteBuffer buf) throws ClientProtocolError, ClientConnectionException { + BinaryInputStream dataInput = BinaryByteBufferInputStream.create(buf); - long bytesReadOnStartMsg = dataInput.totalBytesRead(); + if (protocolCtx == null) { + // Process handshake. + pendingReqs.remove(-1L).onDone(buf); + return; + } - long resId = dataInput.spinReadLong(); + long resId = dataInput.readLong(); int status = 0; ClientOperation notificationOp = null; - BinaryInputStream resIn; - if (protocolCtx.isFeatureSupported(PARTITION_AWARENESS)) { - short flags = dataInput.spinReadShort(); + short flags = dataInput.readShort(); if ((flags & ClientFlag.AFFINITY_TOPOLOGY_CHANGED) != 0) { - long topVer = dataInput.spinReadLong(); - int minorTopVer = dataInput.spinReadInt(); + long topVer = dataInput.readLong(); + int minorTopVer = dataInput.readInt(); srvTopVer = new AffinityTopologyVersion(topVer, minorTopVer); @@ -449,7 +361,7 @@ private void processNextMessage() throws ClientProtocolError, ClientConnectionEx } if ((flags & ClientFlag.NOTIFICATION) != 0) { - short notificationCode = dataInput.spinReadShort(); + short notificationCode = dataInput.readShort(); notificationOp = ClientOperation.fromCode(notificationCode); @@ -458,28 +370,25 @@ private void processNextMessage() throws ClientProtocolError, ClientConnectionEx } if ((flags & ClientFlag.ERROR) != 0) - status = dataInput.spinReadInt(); + status = dataInput.readInt(); } else - status = dataInput.spinReadInt(); + status = dataInput.readInt(); - int hdrSize = (int)(dataInput.totalBytesRead() - bytesReadOnStartMsg); + int hdrSize = dataInput.position(); + int msgSize = buf.limit(); - byte[] res = null; + ByteBuffer res = null; Exception err = null; if (status == 0) { if (msgSize > hdrSize) - res = dataInput.spinRead(msgSize - hdrSize); + res = buf; } - else if (status == ClientStatus.SECURITY_VIOLATION) { - dataInput.spinRead(msgSize - hdrSize); // Read message to the end. - + else if (status == ClientStatus.SECURITY_VIOLATION) err = new ClientAuthorizationException(); - } else { - resIn = new BinaryHeapInputStream(dataInput.spinRead(msgSize - hdrSize)); - - String errMsg = ClientUtils.createBinaryReader(null, resIn).readString(); + else { + String errMsg = ClientUtils.createBinaryReader(null, dataInput).readString(); err = new ClientServerError(errMsg, status, resId); } @@ -543,31 +452,21 @@ else if (addr.getPort() < 1024 || addr.getPort() > 49151) throw new IllegalArgumentException(error); } - /** Create socket. */ - private static Socket createSocket(ClientChannelConfiguration cfg) throws IOException { - Socket sock = cfg.getSslMode() == SslMode.REQUIRED ? - new ClientSslSocketFactory(cfg).create() : - new Socket(cfg.getAddress().getHostName(), cfg.getAddress().getPort()); - - sock.setTcpNoDelay(cfg.isTcpNoDelay()); - - if (cfg.getTimeout() > 0) - sock.setSoTimeout(cfg.getTimeout()); - - if (cfg.getSendBufferSize() > 0) - sock.setSendBufferSize(cfg.getSendBufferSize()); - - if (cfg.getReceiveBufferSize() > 0) - sock.setReceiveBufferSize(cfg.getReceiveBufferSize()); - - return sock; - } - /** Client handshake. */ private void handshake(ProtocolVersion ver, String user, String pwd, Map userAttrs) throws ClientConnectionException, ClientAuthenticationException, ClientProtocolError { + ClientRequestFuture fut = new ClientRequestFuture(); + pendingReqs.put(-1L, fut); + handshakeReq(ver, user, pwd, userAttrs); - handshakeRes(ver, user, pwd, userAttrs); + + try { + ByteBuffer res = timeout > 0 ? fut.get(timeout) : fut.get(); + handshakeRes(res, ver, user, pwd, userAttrs); + } + catch (IgniteCheckedException e) { + throw new ClientConnectionException(e.getMessage(), e); + } } /** Send handshake request. */ @@ -604,7 +503,7 @@ private void handshakeReq(ProtocolVersion proposedVer, String user, String pwd, writer.out().writeInt(0, writer.out().position() - 4);// actual size - write(writer.array(), writer.out().position()); + write(writer.out().arrayCopy(), writer.out().position()); } } @@ -621,20 +520,15 @@ private ProtocolContext protocolContextFromVersion(ProtocolVersion ver) { } /** Receive and handle handshake response. */ - private void handshakeRes(ProtocolVersion proposedVer, String user, String pwd, Map userAttrs) + private void handshakeRes(ByteBuffer buf, ProtocolVersion proposedVer, String user, String pwd, Map userAttrs) throws ClientConnectionException, ClientAuthenticationException, ClientProtocolError { - int resSize = dataInput.readInt(); - - if (resSize <= 0) - throw new ClientProtocolError(String.format("Invalid handshake response size: %s", resSize)); - - BinaryInputStream res = new BinaryHeapInputStream(dataInput.read(resSize)); + BinaryInputStream res = BinaryByteBufferInputStream.create(buf); try (BinaryReaderExImpl reader = ClientUtils.createBinaryReader(null, res)) { boolean success = res.readBoolean(); if (success) { - byte[] features = new byte[0]; + byte[] features = EMPTY_BYTES; if (ProtocolContext.isFeatureSupported(proposedVer, BITMAP_FEATURES)) features = reader.readByteArray(); @@ -680,12 +574,13 @@ else if (!supportedVers.contains(srvVer) || /** Write bytes to the output stream. */ private void write(byte[] bytes, int len) throws ClientConnectionException { + ByteBuffer buf = ByteBuffer.wrap(bytes, 0, len); + try { - out.write(bytes, 0, len); - out.flush(); + sock.send(buf); } - catch (IOException e) { - throw handleIOError(e); + catch (IgniteCheckedException e) { + throw new ClientConnectionException(e.getMessage(), e); } } @@ -704,425 +599,9 @@ private ClientException handleIOError(String chInfo, @Nullable IOException ex) { return new ClientConnectionException("Ignite cluster is unavailable [" + chInfo + ']', ex); } - /** - * Auxiliary class to read byte buffers and numeric values, counting total bytes read. - * Numeric values are read in the little-endian byte order. - */ - private class ByteCountingDataInput implements AutoCloseable { - /** Input stream. */ - private final InputStream in; - - /** Total bytes read from the input stream. */ - private long totalBytesRead; - - /** Temporary buffer to read long, int and short values. */ - private final byte[] tmpBuf = new byte[Long.BYTES]; - - /** - * @param in Input stream. - */ - public ByteCountingDataInput(InputStream in) { - this.in = in; - } - - /** Read bytes from the input stream. */ - public byte[] read(int len) throws ClientConnectionException { - byte[] bytes = new byte[len]; - - read(bytes, len, 0); - - return bytes; - } - - /** Read bytes from the input stream. */ - public byte[] spinRead(int len) { - byte[] bytes = new byte[len]; - - read(bytes, len, Integer.MAX_VALUE); - - return bytes; - } - - /** - * Read bytes from the input stream to the buffer. - * - * @param bytes Bytes buffer. - * @param len Length. - * @param tryReadCnt Number of reads before falling into blocking read. - */ - public void read(byte[] bytes, int len, int tryReadCnt) throws ClientConnectionException { - int offset = 0; - - try { - while (offset < len) { - int toRead; - - if (tryReadCnt == 0) - toRead = len - offset; - else if ((toRead = Math.min(in.available(), len - offset)) == 0) { - tryReadCnt--; - - continue; - } - - int read = in.read(bytes, offset, toRead); - - if (read < 0) - throw handleIOError(null); - - offset += read; - totalBytesRead += read; - } - } - catch (IOException e) { - throw handleIOError(e); - } - } - - /** - * Read long value from the input stream. - */ - public long readLong() throws ClientConnectionException { - return readLong(0); - } - - /** - * Read long value from the input stream. - */ - public long spinReadLong() throws ClientConnectionException { - return readLong(Integer.MAX_VALUE); - } - - /** - * Read long value from the input stream. - * - * @param tryReadCnt Number of reads before falling into blocking read. - */ - private long readLong(int tryReadCnt) throws ClientConnectionException { - read(tmpBuf, Long.BYTES, tryReadCnt); - - return BinaryPrimitives.readLong(tmpBuf, 0); - } - - /** - * Read int value from the input stream. - */ - public int readInt() throws ClientConnectionException { - return readInt(0); - } - - /** - * Read int value from the input stream. - */ - public int spinReadInt() throws ClientConnectionException { - return readInt(Integer.MAX_VALUE); - } - - /** - * Read int value from the input stream. - * - * @param tryReadCnt Number of reads before falling into blocking read. - */ - private int readInt(int tryReadCnt) throws ClientConnectionException { - read(tmpBuf, Integer.BYTES, tryReadCnt); - - return BinaryPrimitives.readInt(tmpBuf, 0); - } - - /** - * Read short value from the input stream. - */ - public short readShort() throws ClientConnectionException { - return readShort(0); - } - - /** - * Read short value from the input stream. - */ - public short spinReadShort() throws ClientConnectionException { - return readShort(Integer.MAX_VALUE); - } - - /** - * Read short value from the input stream. - * - * @param tryReadCnt Number of reads before falling into blocking read. - */ - public short readShort(int tryReadCnt) throws ClientConnectionException { - read(tmpBuf, Short.BYTES, tryReadCnt); - - return BinaryPrimitives.readShort(tmpBuf, 0); - } - - /** - * Gets total bytes read from the input stream. - */ - public long totalBytesRead() { - return totalBytesRead; - } - - /** - * Close input stream. - */ - @Override public void close() throws IOException { - in.close(); - } - } - /** * */ - private static class ClientRequestFuture extends GridFutureAdapter { - } - - /** SSL Socket Factory. */ - private static class ClientSslSocketFactory { - /** Trust manager ignoring all certificate checks. */ - private static final TrustManager ignoreErrorsTrustMgr = new X509TrustManager() { - @Override public X509Certificate[] getAcceptedIssuers() { - return null; - } - - @Override public void checkServerTrusted(X509Certificate[] arg0, String arg1) { - } - - @Override public void checkClientTrusted(X509Certificate[] arg0, String arg1) { - } - }; - - /** Config. */ - private final ClientChannelConfiguration cfg; - - /** Constructor. */ - ClientSslSocketFactory(ClientChannelConfiguration cfg) { - this.cfg = cfg; - } - - /** Create SSL socket. */ - SSLSocket create() throws IOException { - InetSocketAddress addr = cfg.getAddress(); - - SSLSocket sock = (SSLSocket)getSslSocketFactory(cfg).createSocket(addr.getHostName(), addr.getPort()); - - sock.setUseClientMode(true); - - sock.startHandshake(); - - return sock; - } - - /** Create SSL socket factory. */ - private static SSLSocketFactory getSslSocketFactory(ClientChannelConfiguration cfg) { - Factory sslCtxFactory = cfg.getSslContextFactory(); - - if (sslCtxFactory != null) { - try { - return sslCtxFactory.create().getSocketFactory(); - } - catch (Exception e) { - throw new ClientError("SSL Context Factory failed", e); - } - } - - BiFunction or = (val, dflt) -> val == null || val.isEmpty() ? dflt : val; - - String keyStore = or.apply( - cfg.getSslClientCertificateKeyStorePath(), - System.getProperty("javax.net.ssl.keyStore") - ); - - String keyStoreType = or.apply( - cfg.getSslClientCertificateKeyStoreType(), - or.apply(System.getProperty("javax.net.ssl.keyStoreType"), DFLT_STORE_TYPE) - ); - - String keyStorePwd = or.apply( - cfg.getSslClientCertificateKeyStorePassword(), - System.getProperty("javax.net.ssl.keyStorePassword") - ); - - String trustStore = or.apply( - cfg.getSslTrustCertificateKeyStorePath(), - System.getProperty("javax.net.ssl.trustStore") - ); - - String trustStoreType = or.apply( - cfg.getSslTrustCertificateKeyStoreType(), - or.apply(System.getProperty("javax.net.ssl.trustStoreType"), DFLT_STORE_TYPE) - ); - - String trustStorePwd = or.apply( - cfg.getSslTrustCertificateKeyStorePassword(), - System.getProperty("javax.net.ssl.trustStorePassword") - ); - - String algorithm = or.apply(cfg.getSslKeyAlgorithm(), DFLT_KEY_ALGORITHM); - - String proto = toString(cfg.getSslProtocol()); - - if (Stream.of(keyStore, keyStorePwd, keyStoreType, trustStore, trustStorePwd, trustStoreType) - .allMatch(s -> s == null || s.isEmpty()) - ) { - try { - return SSLContext.getDefault().getSocketFactory(); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Default SSL context cryptographic algorithm is not available", e); - } - } - - KeyManager[] keyManagers = getKeyManagers(algorithm, keyStore, keyStoreType, keyStorePwd); - - TrustManager[] trustManagers = cfg.isSslTrustAll() ? - new TrustManager[] {ignoreErrorsTrustMgr} : - getTrustManagers(algorithm, trustStore, trustStoreType, trustStorePwd); - - try { - SSLContext sslCtx = SSLContext.getInstance(proto); - - sslCtx.init(keyManagers, trustManagers, null); - - return sslCtx.getSocketFactory(); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("SSL context cryptographic algorithm is not available", e); - } - catch (KeyManagementException e) { - throw new ClientError("Failed to create SSL Context", e); - } - } - - /** - * @return String representation of {@link SslProtocol} as required by {@link SSLContext}. - */ - private static String toString(SslProtocol proto) { - switch (proto) { - case TLSv1_1: - return "TLSv1.1"; - - case TLSv1_2: - return "TLSv1.2"; - - default: - return proto.toString(); - } - } - - /** */ - private static KeyManager[] getKeyManagers( - String algorithm, - String keyStore, - String keyStoreType, - String keyStorePwd - ) { - KeyManagerFactory keyMgrFactory; - - try { - keyMgrFactory = KeyManagerFactory.getInstance(algorithm); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Key manager cryptographic algorithm is not available", e); - } - - Predicate empty = s -> s == null || s.isEmpty(); - - if (!empty.test(keyStore) && !empty.test(keyStoreType)) { - char[] pwd = (keyStorePwd == null) ? new char[0] : keyStorePwd.toCharArray(); - - KeyStore store = loadKeyStore("Client", keyStore, keyStoreType, pwd); - - try { - keyMgrFactory.init(store, pwd); - } - catch (UnrecoverableKeyException e) { - throw new ClientError("Could not recover key store key", e); - } - catch (KeyStoreException e) { - throw new ClientError( - String.format("Client key store provider of type [%s] is not available", keyStoreType), - e - ); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Client key store integrity check algorithm is not available", e); - } - } - - return keyMgrFactory.getKeyManagers(); - } - - /** */ - private static TrustManager[] getTrustManagers( - String algorithm, - String trustStore, - String trustStoreType, - String trustStorePwd - ) { - TrustManagerFactory trustMgrFactory; - - try { - trustMgrFactory = TrustManagerFactory.getInstance(algorithm); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError("Trust manager cryptographic algorithm is not available", e); - } - - Predicate empty = s -> s == null || s.isEmpty(); - - if (!empty.test(trustStore) && !empty.test(trustStoreType)) { - char[] pwd = (trustStorePwd == null) ? new char[0] : trustStorePwd.toCharArray(); - - KeyStore store = loadKeyStore("Trust", trustStore, trustStoreType, pwd); - - try { - trustMgrFactory.init(store); - } - catch (KeyStoreException e) { - throw new ClientError( - String.format("Trust key store provider of type [%s] is not available", trustStoreType), - e - ); - } - } - - return trustMgrFactory.getTrustManagers(); - } - - /** */ - private static KeyStore loadKeyStore(String lb, String path, String type, char[] pwd) { - KeyStore store; - - try { - store = KeyStore.getInstance(type); - } - catch (KeyStoreException e) { - throw new ClientError( - String.format("%s key store provider of type [%s] is not available", lb, type), - e - ); - } - - try (InputStream in = new FileInputStream(new File(path))) { - - store.load(in, pwd); - - return store; - } - catch (FileNotFoundException e) { - throw new ClientError(String.format("%s key store file [%s] does not exist", lb, path), e); - } - catch (NoSuchAlgorithmException e) { - throw new ClientError( - String.format("%s key store integrity check algorithm is not available", lb), - e - ); - } - catch (CertificateException e) { - throw new ClientError(String.format("Could not load certificate from %s key store", lb), e); - } - catch (IOException e) { - throw new ClientError(String.format("Could not read %s key store", lb), e); - } - } + private static class ClientRequestFuture extends GridFutureAdapter { } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java index 9cea6a47ef2f3..c67184accc968 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java @@ -24,8 +24,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.Function; import org.apache.ignite.IgniteBinary; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.binary.BinaryObjectException; @@ -55,6 +55,7 @@ import org.apache.ignite.internal.binary.BinaryWriterExImpl; import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.marshaller.MarshallerContext; @@ -101,8 +102,8 @@ private TcpIgniteClient(ClientConfiguration cfg) throws ClientException { * Constructor with custom channel factory. */ TcpIgniteClient( - Function chFactory, - ClientConfiguration cfg + BiFunction chFactory, + ClientConfiguration cfg ) throws ClientException { final ClientBinaryMetadataHandler metadataHandler = new ClientBinaryMetadataHandler(); @@ -116,18 +117,24 @@ private TcpIgniteClient(ClientConfiguration cfg) throws ClientException { ch = new ReliableChannel(chFactory, cfg, binary); - ch.channelsInit(); + try { + ch.channelsInit(); - ch.addChannelFailListener(() -> metadataHandler.onReconnect()); + ch.addChannelFailListener(() -> metadataHandler.onReconnect()); - transactions = new TcpClientTransactions(ch, marsh, - new ClientTransactionConfiguration(cfg.getTransactionConfiguration())); + transactions = new TcpClientTransactions(ch, marsh, + new ClientTransactionConfiguration(cfg.getTransactionConfiguration())); - cluster = new ClientClusterImpl(ch, marsh); + cluster = new ClientClusterImpl(ch, marsh); - compute = new ClientComputeImpl(ch, marsh, cluster.defaultClusterGroup()); + compute = new ClientComputeImpl(ch, marsh, cluster.defaultClusterGroup()); - services = new ClientServicesImpl(ch, marsh, cluster.defaultClusterGroup()); + services = new ClientServicesImpl(ch, marsh, cluster.defaultClusterGroup()); + } + catch (Exception e) { + ch.close(); + throw e; + } } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnection.java new file mode 100644 index 0000000000000..eed90b6bc7756 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnection.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.nio.ByteBuffer; + +import org.apache.ignite.IgniteCheckedException; + +/** + * Client connection: abstracts away sending and receiving messages. + */ +public interface ClientConnection extends AutoCloseable { + /** + * Sends a message. + * + * @param msg Message buffer. + */ + void send(ByteBuffer msg) throws IgniteCheckedException; + + /** + * Closes the connection. + */ + @Override void close(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionMultiplexer.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionMultiplexer.java new file mode 100644 index 0000000000000..891e2b350ec6b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionMultiplexer.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.net.InetSocketAddress; + +import org.apache.ignite.client.ClientConnectionException; + +/** + * Client connection multiplexer: manages multiple connections with a shared resource pool (worker threads, etc). + */ +public interface ClientConnectionMultiplexer { + /** + * Initializes this instance. + */ + void start(); + + /** + * Stops this instance. + */ + void stop(); + + /** + * Opens a new connection. + * + * @param addr Address. + * @param msgHnd Incoming message handler. + * @param stateHnd Connection state handler. + * @return Created connection. + * @throws ClientConnectionException when connection can't be established. + */ + ClientConnection open( + InetSocketAddress addr, + ClientMessageHandler msgHnd, + ClientConnectionStateHandler stateHnd) + throws ClientConnectionException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionStateHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionStateHandler.java new file mode 100644 index 0000000000000..3f9481e525d0a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientConnectionStateHandler.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import org.jetbrains.annotations.Nullable; + +/** + * Handles thin client connection state. + */ +public interface ClientConnectionStateHandler { + /** + * Handles connection loss. + * @param e Exception that caused the disconnect, can be null. + */ + void onDisconnected(@Nullable Exception e); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageDecoder.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageDecoder.java new file mode 100644 index 0000000000000..06ab441db7621 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageDecoder.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.nio.ByteBuffer; + +/** + * Decodes thin client messages from partial buffers. + */ +public class ClientMessageDecoder { + /** */ + private byte[] data; + + /** */ + private int cnt = -4; + + /** */ + private int msgSize; + + /** + * Applies the next partial buffer. + * + * @param buf Buffer. + * @return Decoded message, or null when not yet complete. + */ + public byte[] apply(ByteBuffer buf) { + boolean msgReady = read(buf); + + return msgReady ? data : null; + } + + /** + * Reads the buffer. + * + * @param buf Buffer. + * @return True when a complete message has been received; false otherwise. + */ + @SuppressWarnings("DuplicatedCode") // A little duplication is better than a little dependency. + private boolean read(ByteBuffer buf) { + if (cnt < 0) { + for (; cnt < 0 && buf.hasRemaining(); cnt++) + msgSize |= (buf.get() & 0xFF) << (8 * (4 + cnt)); + + if (cnt < 0) + return false; + + data = new byte[msgSize]; + } + + assert data != null; + assert cnt >= 0; + assert msgSize > 0; + + int remaining = buf.remaining(); + + if (remaining > 0) { + int missing = msgSize - cnt; + + if (missing > 0) { + int len = Math.min(missing, remaining); + + buf.get(data, cnt, len); + + cnt += len; + } + } + + if (cnt == msgSize) { + cnt = -4; + msgSize = 0; + + return true; + } + + return false; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageHandler.java new file mode 100644 index 0000000000000..a52859ff43ce9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/ClientMessageHandler.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io; + +import java.nio.ByteBuffer; + +/** + * Handles thin client responses and server -> client notifications. + */ +public interface ClientMessageHandler { + /** + * Handles messages from the server. + * @param buf Buffer. + */ + void onMessage(ByteBuffer buf); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnection.java new file mode 100644 index 0000000000000..e81d6f4c3a321 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnection.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.nio.ByteBuffer; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.client.thin.io.ClientConnection; +import org.apache.ignite.internal.client.thin.io.ClientConnectionStateHandler; +import org.apache.ignite.internal.client.thin.io.ClientMessageHandler; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.apache.ignite.internal.util.nio.GridNioSessionMetaKey; + +/** + * Client connection. + */ +class GridNioClientConnection implements ClientConnection { + /** */ + static final int SES_META_CONN = GridNioSessionMetaKey.nextUniqueKey(); + + /** */ + private final GridNioSession ses; + + /** */ + private final ClientMessageHandler msgHnd; + + /** */ + private final ClientConnectionStateHandler stateHnd; + + /** + * Ctor. + * + * @param ses Session. + */ + public GridNioClientConnection(GridNioSession ses, + ClientMessageHandler msgHnd, + ClientConnectionStateHandler stateHnd) { + assert ses != null; + assert msgHnd != null; + assert stateHnd != null; + + this.ses = ses; + this.msgHnd = msgHnd; + this.stateHnd = stateHnd; + + ses.addMeta(SES_META_CONN, this); + } + + /** {@inheritDoc} */ + @Override public void send(ByteBuffer msg) throws IgniteCheckedException { + ses.sendNoFuture(msg, null); + } + + /** {@inheritDoc} */ + @Override public void close() { + ses.close(); + } + + /** + * Handles incoming message. + * + * @param msg Message. + */ + void onMessage(ByteBuffer msg) { + assert msg != null; + + msgHnd.onMessage(msg); + } + + /** + * Handles disconnect. + * + * @param e Exception that caused the disconnect. + */ + void onDisconnected(Exception e) { + stateHnd.onDisconnected(e); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnectionMultiplexer.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnectionMultiplexer.java new file mode 100644 index 0000000000000..74a70251e96cb --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientConnectionMultiplexer.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.SocketChannel; +import java.util.HashMap; +import java.util.Map; +import javax.net.ssl.SSLContext; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.client.ClientConnectionException; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.internal.client.thin.ClientSslUtils; +import org.apache.ignite.internal.client.thin.io.ClientConnection; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; +import org.apache.ignite.internal.client.thin.io.ClientConnectionStateHandler; +import org.apache.ignite.internal.client.thin.io.ClientMessageHandler; +import org.apache.ignite.internal.util.nio.GridNioCodecFilter; +import org.apache.ignite.internal.util.nio.GridNioFilter; +import org.apache.ignite.internal.util.nio.GridNioFuture; +import org.apache.ignite.internal.util.nio.GridNioFutureImpl; +import org.apache.ignite.internal.util.nio.GridNioServer; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.apache.ignite.internal.util.nio.ssl.GridNioSslFilter; +import org.apache.ignite.logger.NullLogger; + +/** + * Client connection multiplexer based on {@link org.apache.ignite.internal.util.nio.GridNioServer}. + */ +public class GridNioClientConnectionMultiplexer implements ClientConnectionMultiplexer { + /** Worker thread prefix. */ + private static final String THREAD_PREFIX = "thin-client-channel"; + + /** */ + private static final int CLIENT_MODE_PORT = -1; + + /** */ + private final GridNioServer srv; + + /** */ + private final SSLContext sslCtx; + + /** + * Constructor. + * + * @param cfg Client config. + */ + public GridNioClientConnectionMultiplexer(ClientConfiguration cfg) { + IgniteLogger gridLog = new NullLogger(); + + GridNioFilter[] filters; + + GridNioFilter codecFilter = new GridNioCodecFilter(new GridNioClientParser(), gridLog, false); + + sslCtx = ClientSslUtils.getSslContext(cfg); + + if (sslCtx != null) { + GridNioSslFilter sslFilter = new GridNioSslFilter(sslCtx, true, ByteOrder.nativeOrder(), gridLog); + sslFilter.directMode(false); + filters = new GridNioFilter[] {codecFilter, sslFilter}; + } + else + filters = new GridNioFilter[] {codecFilter}; + + try { + srv = GridNioServer.builder() + .port(CLIENT_MODE_PORT) + .listener(new GridNioClientListener()) + .filters(filters) + .logger(gridLog) + .selectorCount(1) // Using more selectors does not seem to improve performance. + .byteOrder(ByteOrder.nativeOrder()) + .directBuffer(true) + .directMode(false) + .igniteInstanceName("thinClient") + .serverName(THREAD_PREFIX) + .idleTimeout(Long.MAX_VALUE) + .socketReceiveBufferSize(cfg.getReceiveBufferSize()) + .socketSendBufferSize(cfg.getSendBufferSize()) + .tcpNoDelay(true) + .build(); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** {@inheritDoc} */ + @Override public void start() { + srv.start(); + } + + /** {@inheritDoc} */ + @Override public void stop() { + srv.stop(); + } + + /** {@inheritDoc} */ + @Override public ClientConnection open(InetSocketAddress addr, + ClientMessageHandler msgHnd, + ClientConnectionStateHandler stateHnd) + throws ClientConnectionException { + try { + SocketChannel ch = SocketChannel.open(); + ch.socket().connect(new InetSocketAddress(addr.getHostName(), addr.getPort()), Integer.MAX_VALUE); + + Map meta = new HashMap<>(); + GridNioFuture sslHandshakeFut = null; + + if (sslCtx != null) { + sslHandshakeFut = new GridNioFutureImpl<>(null); + + meta.put(GridNioSslFilter.HANDSHAKE_FUT_META_KEY, sslHandshakeFut); + } + + GridNioSession ses = srv.createSession(ch, meta, false, null).get(); + + if (sslHandshakeFut != null) + sslHandshakeFut.get(); + + return new GridNioClientConnection(ses, msgHnd, stateHnd); + } + catch (Exception e) { + throw new ClientConnectionException(e.getMessage(), e); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientListener.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientListener.java new file mode 100644 index 0000000000000..f33835d909754 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientListener.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.nio.ByteBuffer; + +import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.util.nio.GridNioServerListener; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.jetbrains.annotations.Nullable; + +/** + * Client event listener. + */ +class GridNioClientListener implements GridNioServerListener { + /** {@inheritDoc} */ + @Override public void onConnected(GridNioSession ses) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onDisconnected(GridNioSession ses, @Nullable Exception e) { + GridNioClientConnection conn = ses.meta(GridNioClientConnection.SES_META_CONN); + + // Conn can be null when connection fails during initialization in open method. + if (conn != null) + conn.onDisconnected(e); + } + + /** {@inheritDoc} */ + @Override public void onMessageSent(GridNioSession ses, ByteBuffer msg) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onMessage(GridNioSession ses, ByteBuffer msg) { + GridNioClientConnection conn = ses.meta(GridNioClientConnection.SES_META_CONN); + + assert conn != null : "Session must have an associated connection"; + + conn.onMessage(msg); + } + + /** {@inheritDoc} */ + @Override public void onSessionWriteTimeout(GridNioSession ses) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onSessionIdleTimeout(GridNioSession ses) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void onFailure(FailureType failureType, Throwable failure) { + // No-op. + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientParser.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientParser.java new file mode 100644 index 0000000000000..439c78a72c306 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/io/gridnioserver/GridNioClientParser.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.thin.io.gridnioserver; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import org.apache.ignite.internal.client.thin.io.ClientMessageDecoder; +import org.apache.ignite.internal.util.nio.GridNioParser; +import org.apache.ignite.internal.util.nio.GridNioSession; +import org.apache.ignite.internal.util.nio.GridNioSessionMetaKey; +import org.jetbrains.annotations.Nullable; + +/** + * Client message parser. + */ +class GridNioClientParser implements GridNioParser { + /** */ + private static final int SES_META_DECODER = GridNioSessionMetaKey.nextUniqueKey(); + + /** {@inheritDoc} */ + @Override public @Nullable Object decode(GridNioSession ses, ByteBuffer buf) { + ClientMessageDecoder decoder = ses.meta(SES_META_DECODER); + + if (decoder == null) { + decoder = new ClientMessageDecoder(); + + ses.addMeta(SES_META_DECODER, decoder); + } + + byte[] bytes = decoder.apply(buf); + + if (bytes == null) + return null; // Message is not yet completely received. + + // Thin client protocol is little-endian. ByteBuffer will handle conversion as necessary on big-endian systems. + return ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN); + } + + /** {@inheritDoc} */ + @Override public ByteBuffer encode(GridNioSession ses, Object msg) { + return (ByteBuffer)msg; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java b/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java index bbb2c87ceafea..2d75d5c5266f1 100644 --- a/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java +++ b/modules/core/src/test/java/org/apache/ignite/client/ConnectToStartingNodeTest.java @@ -71,14 +71,20 @@ public void testClientConnectBeforeDiscoveryStart() throws Exception { IgniteInternalFuture futStartClient = GridTestUtils.runAsync( () -> startClient(grid())); - // Server doesn't accept connection before discovery SPI started. - assertFalse(GridTestUtils.waitForCondition(futStartClient::isDone, 500L)); + try { + // Server doesn't accept connection before discovery SPI started. + assertFalse(GridTestUtils.waitForCondition(futStartClient::isDone, 500L)); - barrier.await(); + barrier.await(); - futStartGrid.get(); + futStartGrid.get(); - // Server accept connection after discovery SPI started. - assertTrue(GridTestUtils.waitForCondition(futStartClient::isDone, 500L)); + // Server accept connection after discovery SPI started. + assertTrue(GridTestUtils.waitForCondition(futStartClient::isDone, 500L)); + } + finally { + if (futStartClient.isDone()) + futStartClient.get().close(); + } } } diff --git a/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java b/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java index 0f0791b8cb826..c6def06f3f559 100644 --- a/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java +++ b/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java @@ -288,7 +288,7 @@ private void checkClientStartFailure(String[] cipherSuites, String[] protocols) cipherSuites, protocols, ClientConnectionException.class, - "Ignite cluster is unavailable" + "SSL handshake failed" ); } @@ -307,7 +307,7 @@ private void checkClientStartFailure( this.cipherSuites = F.isEmpty(cipherSuites) ? null : cipherSuites; this.protocols = F.isEmpty(protocols) ? null : protocols; - GridTestUtils.assertThrows( + GridTestUtils.assertThrowsAnyCause( null, new Callable() { @Override public Object call() { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java index 61adf6684e5a1..686a193f7cd52 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ReliableChannelTest.java @@ -26,6 +26,7 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -35,6 +36,7 @@ import org.apache.ignite.client.ClientConnectionException; import org.apache.ignite.client.ClientException; import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.testframework.GridTestUtils; @@ -51,7 +53,8 @@ */ public class ReliableChannelTest { /** Mock factory for creating new channels. */ - private final Function chFactory = cfg -> new TestClientChannel(); + private final BiFunction chFactory = + (cfg, hnd) -> new TestClientChannel(); /** */ private final String[] dfltAddrs = new String[]{"127.0.0.1:10800", "127.0.0.1:10801", "127.0.0.1:10802"}; @@ -259,7 +262,7 @@ public void testFailOnInitIfDefaultChannelFailed() { .setAddresses(dfltAddrs) .setPartitionAwarenessEnabled(true); - ReliableChannel rc = new ReliableChannel(cfg -> new TestFailureClientChannel(), ccfg, null); + ReliableChannel rc = new ReliableChannel((cfg, hnd) -> new TestFailureClientChannel(), ccfg, null); rc.channelsInit(); } @@ -302,7 +305,7 @@ private void checkFailAfterSendOperation(Consumer op, boolean ch // Emulate cluster is down after TcpClientChannel#send operation. AtomicInteger step = new AtomicInteger(); - ReliableChannel rc = new ReliableChannel(cfg -> { + ReliableChannel rc = new ReliableChannel((cfg, hnd) -> { if (step.getAndIncrement() == 0) return new TestAsyncServiceFailureClientChannel(); else diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java index dd716d616afeb..7eda71fd4d597 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientAbstractPartitionAwarenessTest.java @@ -36,6 +36,7 @@ import org.apache.ignite.configuration.ClientConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.client.thin.io.ClientConnectionMultiplexer; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.U; @@ -185,11 +186,11 @@ protected ClientConfiguration getClientConfiguration(int... nodeIdxs) { * @param chIdxs Channels to wait for initialization. */ protected void initClient(ClientConfiguration clientCfg, int... chIdxs) throws IgniteInterruptedCheckedException { - client = new TcpIgniteClient(cfg -> { + client = new TcpIgniteClient((cfg, hnd) -> { try { log.info("Establishing connection to " + cfg.getAddress()); - TcpClientChannel ch = new TestTcpClientChannel(cfg); + TcpClientChannel ch = new TestTcpClientChannel(cfg, hnd); log.info("Channel initialized: " + ch); @@ -323,8 +324,8 @@ protected class TestTcpClientChannel extends TcpClientChannel { /** * @param cfg Config. */ - public TestTcpClientChannel(ClientChannelConfiguration cfg) { - super(cfg); + public TestTcpClientChannel(ClientChannelConfiguration cfg, ClientConnectionMultiplexer hnd) { + super(cfg, hnd); this.cfg = cfg; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java index 7dc6222003470..2909c4e9d60cf 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/client/thin/ThinClientPartitionAwarenessResourceReleaseTest.java @@ -23,13 +23,13 @@ import org.apache.ignite.testframework.GridTestUtils; import org.junit.Test; -import static org.apache.ignite.internal.client.thin.ReliableChannel.ASYNC_RUNNER_THREAD_NAME; -import static org.apache.ignite.internal.client.thin.TcpClientChannel.RECEIVER_THREAD_PREFIX; - /** * Test resource releasing by thin client. */ public class ThinClientPartitionAwarenessResourceReleaseTest extends ThinClientAbstractPartitionAwarenessTest { + /** Worker thread prefix. */ + private static final String THREAD_PREFIX = "thin-client-channel"; + /** * Test that resources are correctly released after closing client with partition awareness. */ @@ -46,15 +46,13 @@ public void testResourcesReleasedAfterClientClosed() throws Exception { assertFalse(channels[0].isClosed()); assertFalse(channels[1].isClosed()); - assertEquals(1, threadsCount(ASYNC_RUNNER_THREAD_NAME)); - assertEquals(2, threadsCount(RECEIVER_THREAD_PREFIX)); + assertEquals(1, threadsCount(THREAD_PREFIX)); client.close(); assertTrue(channels[0].isClosed()); assertTrue(channels[1].isClosed()); - assertTrue(GridTestUtils.waitForCondition(() -> threadsCount(ASYNC_RUNNER_THREAD_NAME) == 0, 1_000L)); - assertTrue(GridTestUtils.waitForCondition(() -> threadsCount(RECEIVER_THREAD_PREFIX) == 0, 1_000L)); + assertTrue(GridTestUtils.waitForCondition(() -> threadsCount(THREAD_PREFIX) == 0, 1_000L)); } /** @@ -68,7 +66,7 @@ private static int threadsCount(String name) { for (long id : threadIds) { ThreadInfo info = U.getThreadMx().getThreadInfo(id); - if (info != null && info.getThreadState() != Thread.State.TERMINATED && info.getThreadName().startsWith(name)) + if (info != null && info.getThreadState() != Thread.State.TERMINATED && info.getThreadName().contains(name)) cnt++; } From 77ffffc2e9f92eab3b77fdce85a9ea50579b7122 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Wed, 2 Dec 2020 19:38:30 +0300 Subject: [PATCH 084/110] IGNITE-13793: Implement SQLRowCount for SELECT This closes #8525 --- .../cpp/odbc-test/src/queries_test.cpp | 38 ++++++++++++++++++- modules/platforms/cpp/odbc/src/cursor.cpp | 7 +++- .../cpp/odbc/src/query/data_query.cpp | 6 ++- 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index 6cded84775148..60333ff32c515 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -1629,7 +1629,7 @@ BOOST_AUTO_TEST_CASE(TestErrorMessage) BOOST_AUTO_TEST_CASE(TestAffectedRows) { - Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=1024"); const int recordsNum = 100; @@ -1670,7 +1670,41 @@ BOOST_AUTO_TEST_CASE(TestAffectedRows) if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - BOOST_CHECK_EQUAL(affected, 0); + BOOST_CHECK_EQUAL(affected, 1024); +} + +BOOST_AUTO_TEST_CASE(TestAffectedRowsOnSelect) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PAGE_SIZE=123"); + + const int recordsNum = 1000; + + // Inserting values. + InsertTestStrings(recordsNum); + + // Just selecting everything to make sure everything is OK + SQLCHAR selectReq[] = "SELECT _key, strField FROM TestType ORDER BY _key"; + + SQLRETURN ret = SQLExecDirect(stmt, selectReq, sizeof(selectReq)); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + for (int i = 0; i < 200; ++i) + { + SQLLEN affected = -1; + ret = SQLRowCount(stmt, &affected); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(affected, 123); + + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + } } BOOST_AUTO_TEST_CASE(TestMultipleSelects) diff --git a/modules/platforms/cpp/odbc/src/cursor.cpp b/modules/platforms/cpp/odbc/src/cursor.cpp index b41f5b1f3c895..cee18d80e3c50 100644 --- a/modules/platforms/cpp/odbc/src/cursor.cpp +++ b/modules/platforms/cpp/odbc/src/cursor.cpp @@ -21,8 +21,11 @@ namespace ignite { namespace odbc { - Cursor::Cursor(int64_t queryId) : queryId(queryId), currentPage(), - currentPagePos(0), currentRow() + Cursor::Cursor(int64_t queryId) : + queryId(queryId), + currentPage(), + currentPagePos(0), + currentRow() { // No-op. } diff --git a/modules/platforms/cpp/odbc/src/query/data_query.cpp b/modules/platforms/cpp/odbc/src/query/data_query.cpp index a93e5a379c180..54723a1093ba9 100644 --- a/modules/platforms/cpp/odbc/src/query/data_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/data_query.cpp @@ -189,7 +189,11 @@ namespace ignite int64_t DataQuery::AffectedRows() const { int64_t affected = rowsAffectedIdx < rowsAffected.size() ? rowsAffected[rowsAffectedIdx] : 0; - return affected < 0 ? 0 : affected; + + if (affected >= 0) + return affected; + + return connection.GetConfiguration().GetPageSize(); } SqlResult::Type DataQuery::NextResultSet() From b4e46f3cbd482afc830c8ab06e6b15d6e23481a7 Mon Sep 17 00:00:00 2001 From: Alexey Zinoviev Date: Wed, 2 Dec 2020 19:58:52 +0300 Subject: [PATCH 085/110] [IGNITE-13803] Fixed Scalar test failed due to incorrect Jackson dependency (#8529) * [IGNITE-13803] Changed dependency * [IGNITE-13803] Exclude dependency --- examples/pom.xml | 6 ++++++ modules/ml/pom.xml | 22 +--------------------- 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/examples/pom.xml b/examples/pom.xml index 08fe50ae27ebb..25a5b87852a8b 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -106,6 +106,12 @@ org.apache.ignite ignite-ml ${project.version} + + + com.fasterxml.jackson.core + * + + diff --git a/modules/ml/pom.xml b/modules/ml/pom.xml index 37d9c107a0b93..ad9f8dcf0945e 100644 --- a/modules/ml/pom.xml +++ b/modules/ml/pom.xml @@ -160,30 +160,10 @@ slf4j-api 1.7.7 - - javax.xml.bind - jaxb-api - 2.3.0 - - - com.sun.xml.bind - jaxb-core - 2.3.0 - - - com.sun.xml.bind - jaxb-impl - 2.3.0 - - - javax.activation - activation - 1.1.1 - com.fasterxml.jackson.core jackson-databind - 2.10.3 + ${jackson.version} From 09d5c73c467acf13408d22ab4198bff6c2c7d229 Mon Sep 17 00:00:00 2001 From: ibessonov Date: Thu, 3 Dec 2020 10:34:58 +0300 Subject: [PATCH 086/110] IGNITE-13190 Native Persistence Defragmentation core functionality - Fixes #7984. Signed-off-by: Sergey Chugunov --- .../apache/ignite/IgniteSystemProperties.java | 14 + .../maintenance/MaintenanceProcessor.java | 2 +- .../cache/IgniteCacheOffheapManagerImpl.java | 2 +- .../GridCacheDatabaseSharedManager.java | 199 ++++- .../persistence/GridCacheOffheapManager.java | 17 +- .../IgniteCacheDatabaseSharedManager.java | 2 +- .../LightweightCheckpointManager.java | 6 +- .../CachePartitionDefragmentationManager.java | 827 ++++++++++++++++++ .../DefragmentationFileUtils.java | 401 +++++++++ .../DefragmentationPageReadWriteManager.java | 37 + .../persistence/defragmentation/LinkMap.java | 276 ++++++ .../defragmentation/PageStoreMap.java | 106 +++ .../defragmentation/TreeIterator.java | 109 +++ .../DefragmentationParameters.java | 78 ++ .../DefragmentationWorkflowCallback.java | 66 ++ .../ExecuteDefragmentationAction.java | 74 ++ .../file/FilePageStoreManager.java | 9 + .../cache/persistence/tree/io/PageIO.java | 13 + .../processors/query/GridQueryIndexing.java | 24 + .../internal/util/collection/IntHashMap.java | 26 + .../internal/util/collection/IntMap.java | 6 + .../util/collection/IntRWHashMap.java | 22 + .../util/tostring/GridToStringBuilder.java | 46 + .../maintenance/MaintenanceRegistry.java | 19 + ...gnitePdsDefragmentationEncryptionTest.java | 43 + ...sDefragmentationRandomLruEvictionTest.java | 35 + .../IgnitePdsDefragmentationTest.java | 541 ++++++++++++ .../checkpoint/LightweightCheckpointTest.java | 4 +- .../defragmentation/LinkMapTest.java | 83 ++ .../processors/query/DummyQueryIndexing.java | 16 + .../testsuites/IgniteBasicTestSuite.java | 3 + .../testsuites/IgnitePdsMvccTestSuite4.java | 8 + .../testsuites/IgnitePdsTestSuite4.java | 8 + .../processors/query/h2/IgniteH2Indexing.java | 19 + .../IndexingDefragmentation.java | 430 +++++++++ .../IgniteCacheUpdateSqlQuerySelfTest.java | 4 +- .../IgnitePdsIndexingDefragmentationTest.java | 316 +++++++ .../IgnitePdsWithIndexingTestSuite.java | 4 +- 38 files changed, 3877 insertions(+), 18 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationFileUtils.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationPageReadWriteManager.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMap.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/PageStoreMap.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/TreeIterator.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationParameters.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationWorkflowCallback.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/ExecuteDefragmentationAction.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationEncryptionTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationRandomLruEvictionTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMapTest.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsIndexingDefragmentationTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index 148e86dbe4b96..aa12e541b06e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -81,6 +81,7 @@ import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_ATOMIC_CACHE_DELETE_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_CACHE_REMOVE_ENTRIES_TTL; import static org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager.DFLT_MVCC_TX_SIZE_CACHING_THRESHOLD; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_PDS_WAL_REBALANCE_THRESHOLD; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory.DFLT_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointWorkflow.DFLT_CHECKPOINT_PARALLEL_SORT_THRESHOLD; @@ -1948,6 +1949,19 @@ public final class IgniteSystemProperties { type = Boolean.class) public static final String IGNITE_TEST_ENV = "IGNITE_TEST_ENV"; + /** + * Defragmentation region size percentage of configured region size. + * This percentage will be calculated from largest configured region size and then proportionally subtracted + * from all configured regions. + */ + @SystemProperty(value = "Defragmentation region size percentage of configured region size. " + + "This percentage will be calculated from largest configured region size and then proportionally subtracted " + + "from all configured regions", + type = Integer.class, + defaults = "" + DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE) + public static final String IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE = + "IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE"; + /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java index 347b328935ae0..063bd475562ec 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/maintenance/MaintenanceProcessor.java @@ -190,7 +190,7 @@ else if (isMaintenanceMode()) { */ private void proceedWithMaintenance() { for (Map.Entry cbE : workflowCallbacks.entrySet()) { - MaintenanceAction mntcAct = cbE.getValue().automaticAction(); + MaintenanceAction mntcAct = cbE.getValue().automaticAction(); if (mntcAct != null) { try { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 024287f49f447..773297f23a26e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -1596,7 +1596,7 @@ void decrementSize(int cacheId) { return grp.mvccEnabled() ? dataTree.isEmpty() : storageSize.get() == 0; } catch (IgniteCheckedException e) { - U.error(log, "Failed to perform operation.", e); + U.error(grp.shared().logger(IgniteCacheOffheapManagerImpl.class), "Failed to perform operation.", e); return false; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 8f6f6835342ff..2c366ebba3f6c 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -117,7 +117,11 @@ import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointProgress; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointStatus; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.Checkpointer; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.LightweightCheckpointManager; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.ReservationReason; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationPageReadWriteManager; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationWorkflowCallback; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore; @@ -145,6 +149,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiPredicate; @@ -162,6 +167,7 @@ import static java.util.Objects.nonNull; import static java.util.function.Function.identity; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD; import static org.apache.ignite.IgniteSystemProperties.IGNITE_PREFER_WAL_REBALANCE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_RECOVERY_SEMAPHORE_PERMITS; @@ -178,7 +184,10 @@ import static org.apache.ignite.internal.processors.cache.persistence.CheckpointState.FINISHED; import static org.apache.ignite.internal.processors.cache.persistence.CheckpointState.LOCK_RELEASED; import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointReadWriteLock.CHECKPOINT_LOCK_HOLD_COUNT; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager.DEFRAGMENTATION_MNTC_TASK_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters.fromStore; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.CORRUPTED_DATA_FILES_MNTC_TASK_NAME; +import static org.apache.ignite.internal.util.IgniteUtils.GB; import static org.apache.ignite.internal.util.IgniteUtils.checkpointBufferSize; /** @@ -207,6 +216,12 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** Description of the system view for a {@link MetaStorage}. */ public static final String METASTORE_VIEW_DESC = "Local metastorage data"; + /** */ + public static final String DEFRAGMENTATION_PART_REGION_NAME = "defragPartitionsDataRegion"; + + /** */ + public static final String DEFRAGMENTATION_MAPPING_REGION_NAME = "defragMappingDataRegion"; + /** * Threshold to calculate limit for pages list on-heap caches. *

    @@ -223,6 +238,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** @see IgniteSystemProperties#IGNITE_PDS_WAL_REBALANCE_THRESHOLD */ public static final int DFLT_PDS_WAL_REBALANCE_THRESHOLD = 500; + /** @see IgniteSystemProperties#IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE */ + public static final int DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE = 60; + /** */ private final int walRebalanceThreshold = getInteger(IGNITE_PDS_WAL_REBALANCE_THRESHOLD, DFLT_PDS_WAL_REBALANCE_THRESHOLD); @@ -234,6 +252,10 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan private final String throttlingPolicyOverride = IgniteSystemProperties.getString( IgniteSystemProperties.IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED); + /** Defragmentation regions size percentage of configured ones. */ + private final int defragmentationRegionSizePercentageOfConfiguredSize = + getInteger(IGNITE_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE, DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE); + /** */ private static final String MBEAN_NAME = "DataStorageMetrics"; @@ -318,6 +340,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** Lock for releasing history for preloading. */ private ReentrantLock releaseHistForPreloadingLock = new ReentrantLock(); + /** */ + private CachePartitionDefragmentationManager defrgMgr; + /** Data regions which should be checkpointed. */ protected final Set checkpointedDataRegions = new GridConcurrentHashSet<>(); @@ -447,6 +472,32 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi return cfg; } + /** */ + private DataRegionConfiguration createDefragmentationDataRegionConfig(long regionSize) { + DataRegionConfiguration cfg = new DataRegionConfiguration(); + + cfg.setName(DEFRAGMENTATION_PART_REGION_NAME); + cfg.setInitialSize(regionSize); + cfg.setMaxSize(regionSize); + cfg.setPersistenceEnabled(true); + cfg.setLazyMemoryAllocation(false); + + return cfg; + } + + /** */ + private DataRegionConfiguration createDefragmentationMappingRegionConfig(long regionSize) { + DataRegionConfiguration cfg = new DataRegionConfiguration(); + + cfg.setName(DEFRAGMENTATION_MAPPING_REGION_NAME); + cfg.setInitialSize(regionSize); + cfg.setMaxSize(regionSize); + cfg.setPersistenceEnabled(true); + cfg.setLazyMemoryAllocation(false); + + return cfg; + } + /** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { super.start0(); @@ -497,6 +548,99 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi } } + /** {@inheritDoc} */ + @Override protected void initDataRegions(DataStorageConfiguration memCfg) throws IgniteCheckedException { + if (isDefragmentationScheduled() && !dataRegionsInitialized) { + //Region size configuration will be changed for defragmentation needs. + memCfg = configureDataRegionForDefragmentation(memCfg); + } + + super.initDataRegions(memCfg); + } + + /** + * Configure data regions: + *

    Size of configured cache data regions will be decreased in order of freeing space for

    + *

    defragmentation needs. * New defragmentation regions will be created which size would be based on freed space + * from previous step.

    + * + * @param memCfg Data storage configuration with data region configurations. + * @return New data storage configuration which contains data regions with changed size. + * @throws IgniteCheckedException If fail. + */ + private DataStorageConfiguration configureDataRegionForDefragmentation( + DataStorageConfiguration memCfg + ) throws IgniteCheckedException { + List regionConfs = new ArrayList<>(); + + DataStorageConfiguration dataConf = memCfg;//not do the changes in-place it's better to make the copy of memCfg. + + regionConfs.add(dataConf.getDefaultDataRegionConfiguration()); + + if (dataConf.getDataRegionConfigurations() != null) + regionConfs.addAll(Arrays.asList(dataConf.getDataRegionConfigurations())); + + long totalDefrRegionSize = 0; + long totalRegionsSize = 0; + + for (DataRegionConfiguration regionCfg : regionConfs) { + totalDefrRegionSize = Math.max( + totalDefrRegionSize, + (long)(regionCfg.getMaxSize() * 0.01 * defragmentationRegionSizePercentageOfConfiguredSize) + ); + + totalRegionsSize += regionCfg.getMaxSize(); + } + + double shrinkPercentage = 1d * (totalRegionsSize - totalDefrRegionSize) / totalRegionsSize; + + for (DataRegionConfiguration region : regionConfs) { + long newSize = (long)(region.getMaxSize() * shrinkPercentage); + long newInitSize = Math.min(region.getInitialSize(), newSize); + + log.info("Region size was reassigned by defragmentation reason: " + + "region = '" + region.getName() + "', " + + "oldInitialSize = '" + region.getInitialSize() + "', " + + "newInitialSize = '" + newInitSize + "', " + + "oldMaxSize = '" + region.getMaxSize() + "', " + + "newMaxSize = '" + newSize + ); + + region.setMaxSize(newSize); + region.setInitialSize(newInitSize); + region.setCheckpointPageBufferSize(0); + } + + long mappingRegionSize = Math.min(GB, (long)(totalDefrRegionSize * 0.1)); + + checkpointedDataRegions.remove( + addDataRegion( + memCfg, + createDefragmentationDataRegionConfig(totalDefrRegionSize - mappingRegionSize), + true, + new DefragmentationPageReadWriteManager(cctx.kernalContext(), "defrgPartitionsStore") + ) + ); + + checkpointedDataRegions.remove( + addDataRegion( + memCfg, + createDefragmentationMappingRegionConfig(mappingRegionSize), + true, + new DefragmentationPageReadWriteManager(cctx.kernalContext(), "defrgLinkMappingStore") + ) + ); + + return dataConf; + } + + /** + * @return {@code true} if maintenance mode is on and defragmentation task exists. + */ + private boolean isDefragmentationScheduled() { + return cctx.kernalContext().maintenanceRegistry().activeMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME) != null; + } + /** */ public Collection checkpointedDataRegions() { return checkpointedDataRegions; @@ -603,10 +747,51 @@ private void releaseFileLock() { fileLockHolder.close(); } + /** */ + private void prepareCacheDefragmentation(List cacheGroupIds) throws IgniteCheckedException { + GridKernalContext kernalCtx = cctx.kernalContext(); + DataStorageConfiguration dsCfg = kernalCtx.config().getDataStorageConfiguration(); + + assert CU.isPersistenceEnabled(dsCfg); + + List regions = Arrays.asList( + dataRegion(DEFRAGMENTATION_MAPPING_REGION_NAME), + dataRegion(DEFRAGMENTATION_PART_REGION_NAME) + ); + + LightweightCheckpointManager lightCheckpointMgr = new LightweightCheckpointManager( + kernalCtx::log, + cctx.igniteInstanceName(), + "db-checkpoint-thread-defrag", + kernalCtx.workersRegistry(), + persistenceCfg, + () -> regions, + this::getPageMemoryForCacheGroup, + resolveThrottlingPolicy(), + snapshotMgr, + persistentStoreMetricsImpl(), + kernalCtx.longJvmPauseDetector(), + kernalCtx.failure(), + kernalCtx.cache() + ); + + lightCheckpointMgr.start(); + + defrgMgr = new CachePartitionDefragmentationManager( + cacheGroupIds, + cctx, + this, + (FilePageStoreManager)cctx.pageStore(), + checkpointManager, + lightCheckpointMgr, + persistenceCfg.getPageSize() + ); + } + /** {@inheritDoc} */ @Override public DataRegion addDataRegion(DataStorageConfiguration dataStorageCfg, DataRegionConfiguration dataRegionCfg, - boolean trackable) throws IgniteCheckedException { - DataRegion region = super.addDataRegion(dataStorageCfg, dataRegionCfg, trackable); + boolean trackable, PageReadWriteManager pmPageMgr) throws IgniteCheckedException { + DataRegion region = super.addDataRegion(dataStorageCfg, dataRegionCfg, trackable, pmPageMgr); checkpointedDataRegions.add(region); @@ -636,6 +821,16 @@ private void readMetastore() throws IgniteCheckedException { registerSystemView(); notifyMetastorageReadyForRead(); + + cctx.kernalContext().maintenanceRegistry() + .registerWorkflowCallbackIfTaskExists( + DEFRAGMENTATION_MNTC_TASK_NAME, + task -> { + prepareCacheDefragmentation(fromStore(task).cacheGroupIds()); + + return new DefragmentationWorkflowCallback(cctx.kernalContext()::log, defrgMgr); + } + ); } finally { metaStorage = null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index ad062a83d6a8e..50c3039d76edd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -247,7 +247,7 @@ public IndexStorage getIndexStorage() { boolean exists = ctx.pageStore() != null && ctx.pageStore().exists(grp.groupId(), p); - return new GridCacheDataStore(grp, p, exists, busyLock, log); + return createGridCacheDataStore(grp, p, exists, log); } /** {@inheritDoc} */ @@ -1357,8 +1357,19 @@ private void saveIndexReencryptionStatus(int grpId) throws IgniteCheckedExceptio } /** */ - public GridSpinBusyLock busyLock() { - return busyLock; + public GridCacheDataStore createGridCacheDataStore( + CacheGroupContext grpCtx, + int partId, + boolean exists, + IgniteLogger log + ) { + return new GridCacheDataStore( + grpCtx, + partId, + exists, + busyLock, + log + ); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index bfadeb22d0863..346b842585c5b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -147,7 +147,7 @@ public class IgniteCacheDatabaseSharedManager extends GridCacheSharedManagerAdap protected final Map memMetricsMap = new ConcurrentHashMap<>(); /** */ - private volatile boolean dataRegionsInitialized; + protected volatile boolean dataRegionsInitialized; /** */ private volatile boolean dataRegionsStarted; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java index 9e7b3dccf283c..73bec40ec0b08 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/LightweightCheckpointManager.java @@ -34,7 +34,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DataStorageMetricsImpl; -import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; @@ -106,8 +105,7 @@ public LightweightCheckpointManager( DataStorageMetricsImpl persStoreMetrics, LongJVMPauseDetector longJvmPauseDetector, FailureProcessor failureProcessor, - GridCacheProcessor cacheProcessor, - FilePageStoreManager pageStoreManager + GridCacheProcessor cacheProcessor ) throws IgniteCheckedException { CheckpointReadWriteLock lock = new CheckpointReadWriteLock(logger); @@ -139,7 +137,7 @@ public LightweightCheckpointManager( logger, snapshotMgr, (pageMemEx, fullPage, buf, tag) -> - pageStoreManager.write(fullPage.groupId(), fullPage.pageId(), buf, tag, true), + pageMemEx.pageManager().write(fullPage.groupId(), fullPage.pageId(), buf, tag, true), persStoreMetrics, throttlingPolicy, threadBuf, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java new file mode 100644 index 0000000000000..006fa8e90cb1e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java @@ -0,0 +1,827 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.io.File; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongConsumer; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.metric.IoStatisticsHolderNoOp; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.CacheType; +import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager.CacheDataStore; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; +import org.apache.ignite.internal.processors.cache.persistence.CheckpointState; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager.GridCacheDataStore; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.LightweightCheckpointManager; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.cache.persistence.freelist.AbstractFreeList; +import org.apache.ignite.internal.processors.cache.persistence.freelist.SimpleDataRow; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV3; +import org.apache.ignite.internal.processors.cache.tree.AbstractDataLeafIO; +import org.apache.ignite.internal.processors.cache.tree.CacheDataTree; +import org.apache.ignite.internal.processors.cache.tree.DataRow; +import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; +import org.apache.ignite.internal.processors.cache.tree.PendingRow; +import org.apache.ignite.internal.processors.query.GridQueryIndexing; +import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.util.collection.IntHashMap; +import org.apache.ignite.internal.util.collection.IntMap; +import org.apache.ignite.internal.util.future.GridCompoundFuture; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.lang.IgniteOutClosure; +import org.apache.ignite.maintenance.MaintenanceRegistry; + +import static java.util.stream.StreamSupport.stream; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX; +import static org.apache.ignite.internal.processors.cache.persistence.CheckpointState.FINISHED; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DEFRAGMENTATION_MAPPING_REGION_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DEFRAGMENTATION_PART_REGION_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.batchRenameDefragmentedCacheGroupPartitions; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedIndexTmpFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartMappingFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartTmpFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.renameTempIndexFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.renameTempPartitionFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.skipAlreadyDefragmentedCacheGroup; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.skipAlreadyDefragmentedPartition; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.writeDefragmentationCompletionMarker; + +/** + * Defragmentation manager is the core class that contains main defragmentation procedure. + */ +public class CachePartitionDefragmentationManager { + /** */ + public static final String DEFRAGMENTATION_MNTC_TASK_NAME = "defragmentationMaintenanceTask"; + + /** */ + private final Set cacheGroupsForDefragmentation; + + /** Cache shared context. */ + private final GridCacheSharedContext sharedCtx; + + /** Maintenance registry. */ + private final MaintenanceRegistry mntcReg; + + /** Logger. */ + private final IgniteLogger log; + + /** Database shared manager. */ + private final GridCacheDatabaseSharedManager dbMgr; + + /** File page store manager. */ + private final FilePageStoreManager filePageStoreMgr; + + /** + * Checkpoint for specific defragmentation regions which would store the data to new partitions + * during the defragmentation. + */ + private final LightweightCheckpointManager defragmentationCheckpoint; + + /** Default checkpoint for current node. */ + private final CheckpointManager nodeCheckpoint; + + /** Page size. */ + private final int pageSize; + + /** */ + private final DataRegion partDataRegion; + + /** */ + private final DataRegion mappingDataRegion; + + /** + * @param cacheGrpIds + * @param sharedCtx Cache shared context. + * @param dbMgr Database manager. + * @param filePageStoreMgr File page store manager. + * @param nodeCheckpoint Default checkpoint for this node. + * @param defragmentationCheckpoint Specific checkpoint for defragmentation. + * @param pageSize Page size. + */ + public CachePartitionDefragmentationManager( + List cacheGrpIds, + GridCacheSharedContext sharedCtx, + GridCacheDatabaseSharedManager dbMgr, + FilePageStoreManager filePageStoreMgr, + CheckpointManager nodeCheckpoint, + LightweightCheckpointManager defragmentationCheckpoint, + int pageSize + ) throws IgniteCheckedException { + cacheGroupsForDefragmentation = new HashSet<>(cacheGrpIds); + + this.dbMgr = dbMgr; + this.filePageStoreMgr = filePageStoreMgr; + this.pageSize = pageSize; + this.sharedCtx = sharedCtx; + + this.mntcReg = sharedCtx.kernalContext().maintenanceRegistry(); + this.log = sharedCtx.logger(getClass()); + this.defragmentationCheckpoint = defragmentationCheckpoint; + this.nodeCheckpoint = nodeCheckpoint; + + partDataRegion = dbMgr.dataRegion(DEFRAGMENTATION_PART_REGION_NAME); + mappingDataRegion = dbMgr.dataRegion(DEFRAGMENTATION_MAPPING_REGION_NAME); + } + + /** */ + public void executeDefragmentation() throws IgniteCheckedException { + log.info("Defragmentation started."); + + try { + // Checkpointer must be enabled so all pages on disk are in their latest valid state. + dbMgr.resumeWalLogging(); + + dbMgr.onStateRestored(null); + + nodeCheckpoint.forceCheckpoint("beforeDefragmentation", null).futureFor(FINISHED).get(); + + sharedCtx.wal().onDeActivate(sharedCtx.kernalContext()); + + // Now the actual process starts. + TreeIterator treeIter = new TreeIterator(pageSize); + + IgniteInternalFuture idxDfrgFut = null; + DataPageEvictionMode prevPageEvictionMode = null; + + for (CacheGroupContext oldGrpCtx : sharedCtx.cache().cacheGroups()) { + if (!oldGrpCtx.userCache()) + continue; + + int grpId = oldGrpCtx.groupId(); + + if (!cacheGroupsForDefragmentation.isEmpty() && !cacheGroupsForDefragmentation.contains(grpId)) + continue; + + File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName()); + + if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) + continue; + + GridCacheOffheapManager offheap = (GridCacheOffheapManager)oldGrpCtx.offheap(); + + List oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false) + .filter(store -> { + try { + return filePageStoreMgr.exists(grpId, store.partId()); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + }) + .collect(Collectors.toList()); + + if (workDir != null && !oldCacheDataStores.isEmpty()) { + // We can't start defragmentation of new group on the region that has wrong eviction mode. + // So waiting of the previous cache group defragmentation is inevitable. + DataPageEvictionMode curPageEvictionMode = oldGrpCtx.dataRegion().config().getPageEvictionMode(); + + if (prevPageEvictionMode == null || prevPageEvictionMode != curPageEvictionMode) { + prevPageEvictionMode = curPageEvictionMode; + + partDataRegion.config().setPageEvictionMode(curPageEvictionMode); + + if (idxDfrgFut != null) + idxDfrgFut.get(); + } + + IntMap cacheDataStores = new IntHashMap<>(); + + for (CacheDataStore store : offheap.cacheDataStores()) { + // Tree can be null for not yet initialized partitions. + // This would mean that these partitions are empty. + assert store.tree() == null || store.tree().groupId() == grpId; + + if (store.tree() != null) + cacheDataStores.put(store.partId(), store); + } + + dbMgr.checkpointedDataRegions().remove(oldGrpCtx.dataRegion()); + + // Another cheat. Ttl cleanup manager knows too much shit. + oldGrpCtx.caches().stream() + .filter(cacheCtx -> cacheCtx.groupId() == grpId) + .forEach(cacheCtx -> cacheCtx.ttl().unregister()); + + // Technically wal is already disabled, but "PageHandler.isWalDeltaRecordNeeded" doesn't care and + // WAL records will be allocated anyway just to be ignored later if we don't disable WAL for + // cache group explicitly. + oldGrpCtx.localWalEnabled(false, false); + + boolean encrypted = oldGrpCtx.config().isEncryptionEnabled(); + + FilePageStoreFactory pageStoreFactory = filePageStoreMgr.getPageStoreFactory(grpId, encrypted); + + createIndexPageStore(grpId, workDir, pageStoreFactory, partDataRegion, val -> { + }); //TODO Allocated tracker. + + GridCompoundFuture cmpFut = new GridCompoundFuture<>(); + + PageMemoryEx oldPageMem = (PageMemoryEx)oldGrpCtx.dataRegion().pageMemory(); + + CacheGroupContext newGrpCtx = new CacheGroupContext( + sharedCtx, + grpId, + oldGrpCtx.receivedFrom(), + CacheType.USER, + oldGrpCtx.config(), + oldGrpCtx.affinityNode(), + partDataRegion, + oldGrpCtx.cacheObjectContext(), + null, + null, + oldGrpCtx.localStartVersion(), + true, + false, + true + ); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + // This will initialize partition meta in index partition - meta tree and reuse list. + newGrpCtx.start(); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + IntMap linkMapByPart = new IntHashMap<>(); + + for (CacheDataStore oldCacheDataStore : oldCacheDataStores) { + int partId = oldCacheDataStore.partId(); + + PartitionContext partCtx = new PartitionContext( + workDir, + grpId, + partId, + partDataRegion, + mappingDataRegion, + oldGrpCtx, + newGrpCtx, + cacheDataStores.get(partId), + pageStoreFactory + ); + + if (skipAlreadyDefragmentedPartition(workDir, grpId, partId, log)) { + partCtx.createPageStore( + () -> defragmentedPartMappingFile(workDir, partId).toPath(), + partCtx.mappingPagesAllocated, + partCtx.mappingPageMemory + ); + + linkMapByPart.put(partId, partCtx.createLinkMapTree(false)); + + continue; + } + + partCtx.createPageStore( + () -> defragmentedPartMappingFile(workDir, partId).toPath(), + partCtx.mappingPagesAllocated, + partCtx.mappingPageMemory + ); + + linkMapByPart.put(partId, partCtx.createLinkMapTree(true)); + + partCtx.createPageStore( + () -> defragmentedPartTmpFile(workDir, partId).toPath(), + partCtx.partPagesAllocated, + partCtx.partPageMemory + ); + + partCtx.createNewCacheDataStore(offheap); + + copyPartitionData(partCtx, treeIter); + + IgniteInClosure> cpLsnr = fut -> { + if (fut.error() != null) + return; + + PageStore oldPageStore = null; + + try { + oldPageStore = filePageStoreMgr.getStore(grpId, partId); + } + catch (IgniteCheckedException ignore) { + } + + if (log.isDebugEnabled()) { + log.debug(S.toString( + "Partition defragmented", + "grpId", grpId, false, + "partId", partId, false, + "oldPages", oldPageStore.pages(), false, + "newPages", partCtx.partPagesAllocated.get() + 1, false, + "mappingPages", partCtx.mappingPagesAllocated.get() + 1, false, + "pageSize", pageSize, false, + "partFile", defragmentedPartFile(workDir, partId).getName(), false, + "workDir", workDir, false + )); + } + + oldPageMem.invalidate(grpId, partId); + + partCtx.partPageMemory.invalidate(grpId, partId); + + DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager)partCtx.partPageMemory.pageManager(); + + pageMgr.pageStoreMap().removePageStore(grpId, partId); // Yes, it'll be invalid in a second. + + renameTempPartitionFile(workDir, partId); + }; + + GridFutureAdapter cpFut = defragmentationCheckpoint + .forceCheckpoint("partition defragmented", null) + .futureFor(CheckpointState.FINISHED); + + cpFut.listen(cpLsnr); + + cmpFut.add((IgniteInternalFuture)cpFut); + } + + // A bit too general for now, but I like it more then saving only the last checkpoint future. + cmpFut.markInitialized().get(); + + idxDfrgFut = new GridFinishedFuture<>(); + + if (filePageStoreMgr.hasIndexStore(grpId)) { + defragmentIndexPartition(oldGrpCtx, newGrpCtx, linkMapByPart); + + idxDfrgFut = defragmentationCheckpoint + .forceCheckpoint("index defragmented", null) + .futureFor(CheckpointState.FINISHED); + } + + idxDfrgFut.listen(fut -> { + oldPageMem.invalidate(grpId, PageIdAllocator.INDEX_PARTITION); + + PageMemoryEx partPageMem = (PageMemoryEx)partDataRegion.pageMemory(); + + partPageMem.invalidate(grpId, PageIdAllocator.INDEX_PARTITION); + + DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager)partPageMem.pageManager(); + + pageMgr.pageStoreMap().removePageStore(grpId, PageIdAllocator.INDEX_PARTITION); + + PageMemoryEx mappingPageMem = (PageMemoryEx)mappingDataRegion.pageMemory(); + + pageMgr = (DefragmentationPageReadWriteManager)mappingPageMem.pageManager(); + + pageMgr.pageStoreMap().clear(grpId); + + renameTempIndexFile(workDir); + + writeDefragmentationCompletionMarker(filePageStoreMgr.getPageStoreFileIoFactory(), workDir, log); + + batchRenameDefragmentedCacheGroupPartitions(workDir, log); + }); + } + + // I guess we should wait for it? + if (idxDfrgFut != null) + idxDfrgFut.get(); + } + + mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME); + + log.info("Defragmentation completed. All partitions are defragmented."); + } + finally { + defragmentationCheckpoint.stop(true); + } + } + + /** */ + public void createIndexPageStore( + int grpId, + File workDir, + FilePageStoreFactory pageStoreFactory, + DataRegion partRegion, + LongConsumer allocatedTracker + ) throws IgniteCheckedException { + // Index partition file has to be deleted before we begin, otherwise there's a chance of reading corrupted file. + // There is a time period when index is already defragmented but marker file is not created yet. If node is + // failed in that time window then index will be deframented once again. That's fine, situation is rare but code + // to fix that would add unnecessary complications. + U.delete(defragmentedIndexTmpFile(workDir)); + + PageStore idxPageStore; + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + try { + idxPageStore = pageStoreFactory.createPageStore( + FLAG_IDX, + () -> defragmentedIndexTmpFile(workDir).toPath(), + allocatedTracker + ); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + idxPageStore.sync(); + + PageMemoryEx partPageMem = (PageMemoryEx)partRegion.pageMemory(); + + DefragmentationPageReadWriteManager partMgr = (DefragmentationPageReadWriteManager)partPageMem.pageManager(); + + partMgr.pageStoreMap().addPageStore(grpId, PageIdAllocator.INDEX_PARTITION, idxPageStore); + } + + /** + * Defragmentate partition. + * + * @param partCtx + * @param treeIter + * @throws IgniteCheckedException If failed. + */ + private void copyPartitionData( + PartitionContext partCtx, + TreeIterator treeIter + ) throws IgniteCheckedException { + CacheDataTree tree = partCtx.oldCacheDataStore.tree(); + + CacheDataTree newTree = partCtx.newCacheDataStore.tree(); + PendingEntriesTree newPendingTree = partCtx.newCacheDataStore.pendingTree(); + AbstractFreeList freeList = partCtx.newCacheDataStore.getCacheStoreFreeList(); + + long cpLockThreshold = 150L; + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + AtomicLong lastCpLockTs = new AtomicLong(System.currentTimeMillis()); + AtomicInteger entriesProcessed = new AtomicInteger(); + + treeIter.iterate(tree, partCtx.cachePageMemory, (tree0, io, pageAddr, idx) -> { + if (System.currentTimeMillis() - lastCpLockTs.get() >= cpLockThreshold) { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + lastCpLockTs.set(System.currentTimeMillis()); + } + + AbstractDataLeafIO leafIo = (AbstractDataLeafIO)io; + CacheDataRow row = tree.getRow(io, pageAddr, idx); + + int cacheId = row.cacheId(); + + // Reuse row that we just read. + row.link(0); + + // "insertDataRow" will corrupt page memory if we don't do this. + if (row instanceof DataRow && !partCtx.oldGrpCtx.storeCacheIdInDataPage()) + ((DataRow)row).cacheId(CU.UNDEFINED_CACHE_ID); + + freeList.insertDataRow(row, IoStatisticsHolderNoOp.INSTANCE); + + // Put it back. + if (row instanceof DataRow) + ((DataRow)row).cacheId(cacheId); + + newTree.putx(row); + + long newLink = row.link(); + + partCtx.linkMap.put(leafIo.getLink(pageAddr, idx), newLink); + + if (row.expireTime() != 0) + newPendingTree.putx(new PendingRow(cacheId, row.expireTime(), newLink)); + + entriesProcessed.incrementAndGet(); + + return true; + }); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + freeList.saveMetadata(IoStatisticsHolderNoOp.INSTANCE); + + copyCacheMetadata(partCtx); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + } + + /** */ + private void copyCacheMetadata( + PartitionContext partCtx + ) throws IgniteCheckedException { + // Same for all page memories. Why does it need to be in PageMemory? + long partMetaPageId = partCtx.cachePageMemory.partitionMetaPageId(partCtx.grpId, partCtx.partId); + + long oldPartMetaPage = partCtx.cachePageMemory.acquirePage(partCtx.grpId, partMetaPageId); + + try { + long oldPartMetaPageAddr = partCtx.cachePageMemory.readLock(partCtx.grpId, partMetaPageId, oldPartMetaPage); + + try { + PagePartitionMetaIO oldPartMetaIo = PageIO.getPageIO(oldPartMetaPageAddr); + + // Newer meta versions may contain new data that we don't copy during defragmentation. + assert Arrays.asList(1, 2, 3).contains(oldPartMetaIo.getVersion()) + : "IO version " + oldPartMetaIo.getVersion() + " is not supported by current defragmentation algorithm." + + " Please implement copying of all data added in new version."; + + long newPartMetaPage = partCtx.partPageMemory.acquirePage(partCtx.grpId, partMetaPageId); + + try { + long newPartMetaPageAddr = partCtx.partPageMemory.writeLock(partCtx.grpId, partMetaPageId, newPartMetaPage); + + try { + PagePartitionMetaIOV3 newPartMetaIo = PageIO.getPageIO(newPartMetaPageAddr); + + // Copy partition state. + byte partState = oldPartMetaIo.getPartitionState(oldPartMetaPageAddr); + newPartMetaIo.setPartitionState(newPartMetaPageAddr, partState); + + // Copy cache size for single cache group. + long size = oldPartMetaIo.getSize(oldPartMetaPageAddr); + newPartMetaIo.setSize(newPartMetaPageAddr, size); + + // Copy update counter value. + long updateCntr = oldPartMetaIo.getUpdateCounter(oldPartMetaPageAddr); + newPartMetaIo.setUpdateCounter(newPartMetaPageAddr, updateCntr); + + // Copy global remove Id. + long rmvId = oldPartMetaIo.getGlobalRemoveId(oldPartMetaPageAddr); + newPartMetaIo.setGlobalRemoveId(newPartMetaPageAddr, rmvId); + + // Copy cache sizes for shared cache group. + long oldCountersPageId = oldPartMetaIo.getCountersPageId(oldPartMetaPageAddr); + if (oldCountersPageId != 0L) { + Map sizes = GridCacheOffheapManager.readSharedGroupCacheSizes( + partCtx.cachePageMemory, + partCtx.grpId, + oldCountersPageId + ); + + long newCountersPageId = GridCacheOffheapManager.writeSharedGroupCacheSizes( + partCtx.partPageMemory, + partCtx.grpId, + 0L, + partCtx.partId, + sizes + ); + + newPartMetaIo.setCountersPageId(newPartMetaPageAddr, newCountersPageId); + } + + // Copy counter gaps. + long oldGapsLink = oldPartMetaIo.getGapsLink(oldPartMetaPageAddr); + if (oldGapsLink != 0L) { + byte[] gapsBytes = partCtx.oldCacheDataStore.partStorage().readRow(oldGapsLink); + + SimpleDataRow gapsDataRow = new SimpleDataRow(partCtx.partId, gapsBytes); + + partCtx.newCacheDataStore.partStorage().insertDataRow(gapsDataRow, IoStatisticsHolderNoOp.INSTANCE); + + newPartMetaIo.setGapsLink(newPartMetaPageAddr, gapsDataRow.link()); + } + + // Encryption stuff. + newPartMetaIo.setEncryptedPageCount(newPartMetaPageAddr, 0); + newPartMetaIo.setEncryptedPageIndex(newPartMetaPageAddr, 0); + } + finally { + partCtx.partPageMemory.writeUnlock(partCtx.grpId, partMetaPageId, newPartMetaPage, null, true); + } + } + finally { + partCtx.partPageMemory.releasePage(partCtx.grpId, partMetaPageId, newPartMetaPage); + } + } + finally { + partCtx.cachePageMemory.readUnlock(partCtx.grpId, partMetaPageId, oldPartMetaPage); + } + } + finally { + partCtx.cachePageMemory.releasePage(partCtx.grpId, partMetaPageId, oldPartMetaPage); + } + } + + /** + * Defragmentate indexing partition. + * + * @param grpCtx + * @param mappingByPartition + * + * @throws IgniteCheckedException If failed. + */ + private void defragmentIndexPartition( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + IntMap mappingByPartition + ) throws IgniteCheckedException { + GridQueryProcessor query = grpCtx.caches().get(0).kernalContext().query(); + + if (!query.moduleEnabled()) + return; + + final GridQueryIndexing idx = query.getIndexing(); + + CheckpointTimeoutLock cpLock = defragmentationCheckpoint.checkpointTimeoutLock(); + + idx.defragment( + grpCtx, + newCtx, + (PageMemoryEx)partDataRegion.pageMemory(), + mappingByPartition, + cpLock + ); + } + + /** */ + @SuppressWarnings("PublicField") + private class PartitionContext { + /** */ + public final File workDir; + + /** */ + public final int grpId; + + /** */ + public final int partId; + + /** */ + public final DataRegion cacheDataRegion; + + /** */ + public final PageMemoryEx cachePageMemory; + + /** */ + public final PageMemoryEx partPageMemory; + + /** */ + public final PageMemoryEx mappingPageMemory; + + /** */ + public final CacheGroupContext oldGrpCtx; + + /** */ + public final CacheGroupContext newGrpCtx; + + /** */ + public final CacheDataStore oldCacheDataStore; + + /** */ + private GridCacheDataStore newCacheDataStore; + + /** */ + public final FilePageStoreFactory pageStoreFactory; + + /** */ + public final AtomicLong partPagesAllocated = new AtomicLong(); + + /** */ + public final AtomicLong mappingPagesAllocated = new AtomicLong(); + + /** */ + private LinkMap linkMap; + + /** */ + public PartitionContext( + File workDir, + int grpId, + int partId, + DataRegion partDataRegion, + DataRegion mappingDataRegion, + CacheGroupContext oldGrpCtx, + CacheGroupContext newGrpCtx, + CacheDataStore oldCacheDataStore, + FilePageStoreFactory pageStoreFactory + ) { + this.workDir = workDir; + this.grpId = grpId; + this.partId = partId; + cacheDataRegion = oldGrpCtx.dataRegion(); + + cachePageMemory = (PageMemoryEx)cacheDataRegion.pageMemory(); + partPageMemory = (PageMemoryEx)partDataRegion.pageMemory(); + mappingPageMemory = (PageMemoryEx)mappingDataRegion.pageMemory(); + + this.oldGrpCtx = oldGrpCtx; + this.newGrpCtx = newGrpCtx; + this.oldCacheDataStore = oldCacheDataStore; + this.pageStoreFactory = pageStoreFactory; + } + + /** */ + public PageStore createPageStore(IgniteOutClosure pathProvider, AtomicLong pagesAllocated, PageMemoryEx pageMemory) throws IgniteCheckedException { + PageStore partPageStore; + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + try { + partPageStore = pageStoreFactory.createPageStore( + FLAG_DATA, + pathProvider, + pagesAllocated::addAndGet + ); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + partPageStore.sync(); + + DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager)pageMemory.pageManager(); + + pageMgr.pageStoreMap().addPageStore(grpId, partId, partPageStore); + + return partPageStore; + } + + /** */ + public LinkMap createLinkMapTree(boolean initNew) throws IgniteCheckedException { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + long mappingMetaPageId = initNew + ? mappingPageMemory.allocatePage(grpId, partId, FLAG_DATA) + : PageIdUtils.pageId(partId, FLAG_DATA, LinkMap.META_PAGE_IDX); + + assert PageIdUtils.pageIndex(mappingMetaPageId) == LinkMap.META_PAGE_IDX + : PageIdUtils.toDetailString(mappingMetaPageId); + + linkMap = new LinkMap(newGrpCtx, mappingPageMemory, mappingMetaPageId, initNew); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + return linkMap; + } + + /** */ + public void createNewCacheDataStore(GridCacheOffheapManager offheap) { + GridCacheDataStore newCacheDataStore = offheap.createGridCacheDataStore( + newGrpCtx, + partId, + true, + log + ); + + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock(); + + try { + newCacheDataStore.init(); + } + finally { + defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock(); + } + + this.newCacheDataStore = newCacheDataStore; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationFileUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationFileUtils.java new file mode 100644 index 0000000000000..b4273cd78d15f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationFileUtils.java @@ -0,0 +1,401 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; + +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; +import static java.nio.file.StandardOpenOption.CREATE_NEW; +import static java.nio.file.StandardOpenOption.WRITE; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.FILE_SUFFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.INDEX_FILE_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.INDEX_FILE_PREFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_PREFIX; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_TEMPLATE; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.TMP_SUFFIX; + +/** + * Everything related to file management during defragmentation process. + */ +public class DefragmentationFileUtils { + /** Prefix for link mapping files. */ + private static final String DFRG_LINK_MAPPING_FILE_PREFIX = PART_FILE_PREFIX + "map-"; + + /** Link mapping file template. */ + private static final String DFRG_LINK_MAPPING_FILE_TEMPLATE = DFRG_LINK_MAPPING_FILE_PREFIX + "%d" + FILE_SUFFIX; + + /** Defragmentation complation marker file name. */ + private static final String DFRG_COMPLETION_MARKER_FILE_NAME = "dfrg-completion-marker"; + + /** Name of defragmentated index partition file. */ + private static final String DFRG_INDEX_FILE_NAME = INDEX_FILE_PREFIX + "-dfrg" + FILE_SUFFIX; + + /** Name of defragmentated index partition temporary file. */ + private static final String DFRG_INDEX_TMP_FILE_NAME = DFRG_INDEX_FILE_NAME + TMP_SUFFIX; + + /** Prefix for defragmented partition files. */ + private static final String DFRG_PARTITION_FILE_PREFIX = PART_FILE_PREFIX + "dfrg-"; + + /** Defragmented partition file template. */ + private static final String DFRG_PARTITION_FILE_TEMPLATE = DFRG_PARTITION_FILE_PREFIX + "%d" + FILE_SUFFIX; + + /** Defragmented partition temp file template. */ + private static final String DFRG_PARTITION_TMP_FILE_TEMPLATE = DFRG_PARTITION_FILE_TEMPLATE + TMP_SUFFIX; + + /** + * Performs cleanup of work dir before initializing file page stores. + * Will finish batch renaming if defragmentation was completed or delete garbage if it wasn't. + * + * @param workDir Cache group working directory. + * @param log Logger to write messages. + * @throws IgniteCheckedException If {@link IOException} occurred. + */ + public static void beforeInitPageStores(File workDir, IgniteLogger log) throws IgniteCheckedException { + try { + batchRenameDefragmentedCacheGroupPartitions(workDir, log); + + U.delete(defragmentationCompletionMarkerFile(workDir)); + + for (File file : workDir.listFiles()) { + String fileName = file.getName(); + + if ( + fileName.startsWith(DFRG_PARTITION_FILE_PREFIX) + || fileName.startsWith(DFRG_INDEX_FILE_NAME) + || fileName.startsWith(DFRG_LINK_MAPPING_FILE_PREFIX) + ) + U.delete(file); + } + } + catch (IgniteException e) { + throw new IgniteCheckedException(e); + } + } + + /** + * Checks whether cache group defragmentation completed or not. Completes it if all that's left is renaming. + * + * @param workDir Cache group working directory. + * @param grpId Cache group Id of cache group belonging to the given working directory. + * @param log Logger to write messages. + * @return {@code true} if given cache group is already defragmented. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentationCompletionMarkerFile(File) + */ + public static boolean skipAlreadyDefragmentedCacheGroup(File workDir, int grpId, IgniteLogger log) throws IgniteException { + File completionMarkerFile = defragmentationCompletionMarkerFile(workDir); + + if (completionMarkerFile.exists()) { + if (log.isInfoEnabled()) { + log.info(S.toString( + "Skipping already defragmented page group", + "grpId", grpId, false, + "markerFileName", completionMarkerFile.getName(), false, + "workDir", workDir.getAbsolutePath(), false + )); + } + + batchRenameDefragmentedCacheGroupPartitions(workDir, log); + + return true; + } + + return false; + } + + /** + * Checks whether partition has already been defragmented or not. Cleans corrupted data if previous failed + * defragmentation attempt was found. + * + * @param workDir Cache group working directory. + * @param grpId Cache group Id of cache group belonging to the given working directory. + * @param partId Partition index to check. + * @param log Logger to write messages. + * @return {@code true} if given partition is already defragmented. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentedPartTmpFile(File, int) + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + * @see DefragmentationFileUtils#defragmentedPartMappingFile(File, int) + */ + public static boolean skipAlreadyDefragmentedPartition(File workDir, int grpId, int partId, IgniteLogger log) throws IgniteException { + File defragmentedPartFile = defragmentedPartFile(workDir, partId); + File defragmentedPartMappingFile = defragmentedPartMappingFile(workDir, partId); + + if (defragmentedPartFile.exists() && defragmentedPartMappingFile.exists()) { + if (log.isInfoEnabled()) { + log.info(S.toString( + "Skipping already defragmented partition", + "grpId", grpId, false, + "partId", partId, false, + "partFileName", defragmentedPartFile.getName(), false, + "mappingFileName", defragmentedPartMappingFile.getName(), false, + "workDir", workDir.getAbsolutePath(), false + )); + } + + return true; + } + + File defragmentedPartTmpFile = defragmentedPartTmpFile(workDir, partId); + + try { + Files.deleteIfExists(defragmentedPartTmpFile.toPath()); + + Files.deleteIfExists(defragmentedPartFile.toPath()); + + Files.deleteIfExists(defragmentedPartMappingFile.toPath()); + } + catch (IOException e) { + throw new IgniteException(e); + } + + return false; + } + + /** + * Failure-tolerant batch rename of defragmented partition files. + * + * Deletes all link mapping files old partition and index files, renaming defragmentated files in the process. Can + * be run on the same folder multiple times if failed for some reason. + * + * Does something only if completion marker is present in the folder. This marker won't be deleted in the end. + * Deletion of the marker must be done outside of defragmentation mode to prevent cache groups to be defragmentated + * several times in case of failures. + * + * @param workDir Cache group working directory. + * @param log Logger to write messages. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#writeDefragmentationCompletionMarker(FileIOFactory, File, IgniteLogger) + */ + public static void batchRenameDefragmentedCacheGroupPartitions(File workDir, IgniteLogger log) throws IgniteException { + File completionMarkerFile = defragmentationCompletionMarkerFile(workDir); + + if (!completionMarkerFile.exists()) + return; + + try { + for (File mappingFile : workDir.listFiles((dir, name) -> name.startsWith(DFRG_LINK_MAPPING_FILE_PREFIX))) + Files.delete(mappingFile.toPath()); + + for (File partFile : workDir.listFiles((dir, name) -> name.startsWith(DFRG_PARTITION_FILE_PREFIX))) { + int partId = extractPartId(partFile.getName()); + + File oldPartFile = new File(workDir, String.format(PART_FILE_TEMPLATE, partId)); + + Files.move(partFile.toPath(), oldPartFile.toPath(), ATOMIC_MOVE, REPLACE_EXISTING); + } + + File idxFile = new File(workDir, DFRG_INDEX_FILE_NAME); + + if (idxFile.exists()) { + File oldIdxFile = new File(workDir, INDEX_FILE_NAME); + + Files.move(idxFile.toPath(), oldIdxFile.toPath(), ATOMIC_MOVE, REPLACE_EXISTING); + } + } + catch (IOException e) { + throw new IgniteException(e); + } + } + + /** + * Extracts partition number from file names like {@code part-dfrg-%d.bin}. + * + * @param dfrgPartFileName Defragmented partition file name. + * @return Partition index. + * + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + */ + private static int extractPartId(String dfrgPartFileName) { + assert dfrgPartFileName.startsWith(DFRG_PARTITION_FILE_PREFIX) : dfrgPartFileName; + assert dfrgPartFileName.endsWith(FILE_SUFFIX) : dfrgPartFileName; + + String partIdStr = dfrgPartFileName.substring( + DFRG_PARTITION_FILE_PREFIX.length(), + dfrgPartFileName.length() - FILE_SUFFIX.length() + ); + + return Integer.parseInt(partIdStr); + } + + /** + * Return file named {@code index-dfrg.bin.tmp} in given folder. It will be used for storing defragmented index + * partition during the process. + * + * @param workDir Cache group working directory. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedIndexFile(File) + */ + public static File defragmentedIndexTmpFile(File workDir) { + return new File(workDir, DFRG_INDEX_TMP_FILE_NAME); + } + + /** + * Return file named {@code index-dfrg.bin} in given folder. It will be used for storing defragmented index + * partition when the process is over. + * + * @param workDir Cache group working directory. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedIndexTmpFile(File) + */ + public static File defragmentedIndexFile(File workDir) { + return new File(workDir, DFRG_INDEX_FILE_NAME); + } + + /** + * Rename temporary index defragmentation file to a finalized one. + * + * @param workDir Cache group working directory. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentedIndexTmpFile(File) + * @see DefragmentationFileUtils#defragmentedIndexFile(File) + */ + public static void renameTempIndexFile(File workDir) throws IgniteException { + File defragmentedIdxTmpFile = defragmentedIndexTmpFile(workDir); + File defragmentedIdxFile = defragmentedIndexFile(workDir); + + try { + Files.move(defragmentedIdxTmpFile.toPath(), defragmentedIdxFile.toPath(), ATOMIC_MOVE); + } + catch (IOException e) { + throw new IgniteException(e); + } + } + + /** + * Return file named {@code part-dfrg-%d.bin.tmp} in given folder. It will be used for storing defragmented data + * partition during the process. + * + * @param workDir Cache group working directory. + * @param partId Partition index, will be substituted into file name. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + */ + public static File defragmentedPartTmpFile(File workDir, int partId) { + return new File(workDir, String.format(DFRG_PARTITION_TMP_FILE_TEMPLATE, partId)); + } + + /** + * Return file named {@code part-dfrg-%d.bin} in given folder. It will be used for storing defragmented data + * partition when the process is over. + * + * @param workDir Cache group working directory. + * @param partId Partition index, will be substituted into file name. + * @return File. + * + * @see DefragmentationFileUtils#defragmentedPartTmpFile(File, int) + */ + public static File defragmentedPartFile(File workDir, int partId) { + return new File(workDir, String.format(DFRG_PARTITION_FILE_TEMPLATE, partId)); + } + + /** + * Rename temporary partition defragmentation file to a finalized one. + * + * @param workDir Cache group working directory. + * @param partId Partition index. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentedPartTmpFile(File, int) + * @see DefragmentationFileUtils#defragmentedPartFile(File, int) + */ + public static void renameTempPartitionFile(File workDir, int partId) throws IgniteException { + File defragmentedPartTmpFile = defragmentedPartTmpFile(workDir, partId); + File defragmentedPartFile = defragmentedPartFile(workDir, partId); + + assert !defragmentedPartFile.exists() : defragmentedPartFile; + + try { + Files.move(defragmentedPartTmpFile.toPath(), defragmentedPartFile.toPath(), ATOMIC_MOVE); + } + catch (IOException e) { + throw new IgniteException(e); + } + } + + /** + * Return file named {@code part-map-%d.bin} in given folder. It will be used for storing defragmention links + * mapping for given partition during and after defragmentation process. No temporary counterpart is required here. + * + * @param workDir Cache group working directory. + * @param partId Partition index, will be substituted into file name. + * @return File. + * + * @see LinkMap + */ + public static File defragmentedPartMappingFile(File workDir, int partId) { + return new File(workDir, String.format(DFRG_LINK_MAPPING_FILE_TEMPLATE, partId)); + } + + /** + * Return defragmentation completion marker file. This file can only be created when all partitions and index are + * defragmented and renamed from their original {@code *.tmp} versions. Presence of this file signals that no data + * will be lost if original partitions are deleted and batch rename process can be safely initiated. + * + * @param workDir Cache group working directory. + * @return File. + * + * @see DefragmentationFileUtils#writeDefragmentationCompletionMarker(FileIOFactory, File, IgniteLogger) + * @see DefragmentationFileUtils#batchRenameDefragmentedCacheGroupPartitions(File, IgniteLogger) + */ + public static File defragmentationCompletionMarkerFile(File workDir) { + return new File(workDir, DFRG_COMPLETION_MARKER_FILE_NAME); + } + + /** + * Creates empty completion marker file in given directory. + * + * @param ioFactory File IO factory. + * @param workDir Cache group working directory. + * @param log Logger to write messages. + * @throws IgniteException If {@link IOException} occurred. + * + * @see DefragmentationFileUtils#defragmentationCompletionMarkerFile(File) + */ + public static void writeDefragmentationCompletionMarker( + FileIOFactory ioFactory, + File workDir, + IgniteLogger log + ) throws IgniteException { + File completionMarker = defragmentationCompletionMarkerFile(workDir); + + try (FileIO io = ioFactory.create(completionMarker, CREATE_NEW, WRITE)) { + io.force(true); + } + catch (IOException e) { + throw new IgniteException(e); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationPageReadWriteManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationPageReadWriteManager.java new file mode 100644 index 0000000000000..2ed7c91893cb8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationPageReadWriteManager.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManagerImpl; + +/** */ +public class DefragmentationPageReadWriteManager extends PageReadWriteManagerImpl { + /** + * @param ctx Kernal context. + * @param name name. + */ + public DefragmentationPageReadWriteManager(GridKernalContext ctx, String name) { + super(ctx, new PageStoreMap(), name); + } + + /** */ + public PageStoreMap pageStoreMap() { + return (PageStoreMap)pageStores; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMap.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMap.java new file mode 100644 index 0000000000000..a796ab90d844d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMap.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.util.concurrent.atomic.AtomicLong; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.PageUtils; +import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; +import org.apache.ignite.internal.processors.failure.FailureProcessor; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_AUX; +import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_DATA; + +/** + * Class that holds mappings of old links to new links. + */ +public class LinkMap { + /** Tree meta page index. */ + public static final int META_PAGE_IDX = 2; + + /** */ + public static final IOVersions> LEAF_IO_VERSIONS = new IOVersions<>( + new LinkMappingLeafIO() + ); + + /** */ + public static final IOVersions> INNER_IO_VERSIONS = new IOVersions<>( + new LinkMappingInnerIO() + ); + + /** Mapping tree. */ + private final LinkTree tree; + + /** + * @param ctx Cache group context. + * @param pageMem Page memory. + * @param metaPageId Meta page id. + * @param initNew If tree should be (re)created. + */ + public LinkMap( + CacheGroupContext ctx, + PageMemory pageMem, + long metaPageId, + boolean initNew + ) throws IgniteCheckedException { + this(ctx.groupId(), ctx.name(), pageMem, metaPageId, initNew); + } + + /** + * @param grpId Cache group id. + * @param grpName Cache group name. + * @param pageMem Page memory. + * @param metaPageId Meta page id. + * @param initNew If tree should be (re)created. + */ + public LinkMap( + int grpId, + String grpName, + PageMemory pageMem, + long metaPageId, + boolean initNew + ) throws IgniteCheckedException { + tree = new LinkTree( + "link-map", + grpId, + grpName, + pageMem, + null, + new AtomicLong(), + metaPageId, + null, + (IOVersions)INNER_IO_VERSIONS, + (IOVersions)LEAF_IO_VERSIONS, + null, + null, + initNew + ); + } + + /** + * Add link mapping. + * + * @param oldLink Old link. + * @param newLink New link. + */ + public void put(long oldLink, long newLink) throws IgniteCheckedException { + tree.put(new LinkMapping(oldLink, newLink)); + } + + /** + * Get new link by old link. + * + * @param oldLink Old link. + */ + public long get(long oldLink) throws IgniteCheckedException { + LinkMapping get = new LinkMapping(oldLink, 0); + LinkMapping found = tree.findOne(get); + + return found.getNewLink(); + } + + /** */ + private static class LinkTree extends BPlusTree { + /** + * @param name Tree name. + * @param cacheGrpId Cache group ID. + * @param cacheGrpName Cache group name. + * @param pageMem Page memory. + * @param wal Write ahead log manager. + * @param globalRmvId Remove ID. + * @param metaPageId Meta page ID. + * @param reuseList Reuse list. + * @param innerIos Inner IO versions. + * @param leafIos Leaf IO versions. + * @param failureProcessor if the tree is corrupted. + * @param initNew If tree should be (re)created. + * + * @throws IgniteCheckedException If failed. + */ + protected LinkTree( + String name, + int cacheGrpId, + String cacheGrpName, + PageMemory pageMem, + IgniteWriteAheadLogManager wal, + AtomicLong globalRmvId, + long metaPageId, + ReuseList reuseList, + IOVersions> innerIos, + IOVersions> leafIos, + @Nullable FailureProcessor failureProcessor, + @Nullable PageLockListener lockLsnr, + boolean initNew + ) throws IgniteCheckedException { + super(name, cacheGrpId, cacheGrpName, pageMem, wal, globalRmvId, metaPageId, reuseList, innerIos, leafIos, FLAG_AUX, failureProcessor, lockLsnr); + + PageIO.registerTest(latestInnerIO(), latestLeafIO()); + + initTree(initNew); + } + + /** {@inheritDoc} */ + @Override protected int compare(BPlusIO io, long pageAddr, int idx, LinkMapping row) throws IgniteCheckedException { + LinkMapping lookupRow = io.getLookupRow(this, pageAddr, idx); + + return Long.compare(lookupRow.getOldLink(), row.getOldLink()); + } + + /** {@inheritDoc} */ + @Override public LinkMapping getRow(BPlusIO io, long pageAddr, int idx, Object x) throws IgniteCheckedException { + return io.getLookupRow(this, pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override protected long allocatePageNoReuse() throws IgniteCheckedException { + return pageMem.allocatePage(grpId, PageIdUtils.partId(metaPageId), FLAG_DATA); + } + } + + /** + * Class holding mapping from old link to new link. + */ + private static class LinkMapping { + /** Old link. */ + private final long oldLink; + + /** New link. */ + private final long newLink; + + /** + * @param oldLink Old link. + * @param newLink New link. + */ + public LinkMapping(long oldLink, long newLink) { + this.oldLink = oldLink; + this.newLink = newLink; + } + + /** */ + public long getOldLink() { + return oldLink; + } + + /** */ + public long getNewLink() { + return newLink; + } + } + + /** */ + private static class LinkMappingInnerIO extends BPlusInnerIO { + /** */ + protected LinkMappingInnerIO() { + super(PageIO.T_DEFRAG_LINK_MAPPING_INNER, 1, true, Long.BYTES * 2); + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, LinkMapping row) { + PageUtils.putLong(pageAddr, off, row.getOldLink()); + PageUtils.putLong(pageAddr, off + Long.BYTES, row.getNewLink()); + } + + /** {@inheritDoc} */ + @Override public void store(long dst, int dstIdx, BPlusIO srcIo, long src, int srcIdx) + throws IgniteCheckedException { + assert srcIo == this; + + storeByOffset(dst, offset(dstIdx), srcIo.getLookupRow(null, src, srcIdx)); + } + + /** {@inheritDoc} */ + @Override public LinkMapping getLookupRow(BPlusTree tree, long pageAddr, int idx) { + long oldLink = PageUtils.getLong(pageAddr, offset(idx)); + long newLink = PageUtils.getLong(pageAddr, offset(idx) + Long.BYTES); + + return new LinkMapping(oldLink, newLink); + } + } + + /** */ + private static class LinkMappingLeafIO extends BPlusLeafIO { + /** */ + protected LinkMappingLeafIO() { + super(PageIO.T_DEFRAG_LINK_MAPPING_LEAF, 1, Long.BYTES * 2); + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, LinkMapping row) { + PageUtils.putLong(pageAddr, off, row.getOldLink()); + PageUtils.putLong(pageAddr, off + Long.BYTES, row.getNewLink()); + } + + /** {@inheritDoc} */ + @Override public void store(long dst, int dstIdx, BPlusIO srcIo, long src, int srcIdx) + throws IgniteCheckedException { + assert srcIo == this; + + storeByOffset(dst, offset(dstIdx), srcIo.getLookupRow(null, src, srcIdx)); + } + + /** {@inheritDoc} */ + @Override public LinkMapping getLookupRow(BPlusTree tree, long pageAddr, int idx) { + long oldLink = PageUtils.getLong(pageAddr, offset(idx)); + long newLink = PageUtils.getLong(pageAddr, offset(idx) + Long.BYTES); + + return new LinkMapping(oldLink, newLink); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/PageStoreMap.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/PageStoreMap.java new file mode 100644 index 0000000000000..946fea16c0668 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/PageStoreMap.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.util.Arrays; +import java.util.Collection; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.pagemem.store.PageStoreCollection; +import org.apache.ignite.internal.util.collection.IntMap; +import org.apache.ignite.internal.util.collection.IntRWHashMap; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** */ +class PageStoreMap implements PageStoreCollection { + /** GroupId -> PartId -> PageStore */ + private final IntMap> grpPageStoresMap = new IntRWHashMap<>(); + + /** */ + public void addPageStore( + int grpId, + int partId, + PageStore pageStore + ) { + IntMap pageStoresMap = grpPageStoresMap.get(grpId); + + //This code cannot be used concurrently. If we decide to parallel defragmentation then we should correct current class. + if (pageStoresMap == null) + grpPageStoresMap.put(grpId, pageStoresMap = new IntRWHashMap<>()); + + pageStoresMap.put(partId, pageStore); + } + + /** */ + public void removePageStore( + int grpId, + int partId + ) { + IntMap pageStoresMap = grpPageStoresMap.get(grpId); + + if (pageStoresMap != null) + pageStoresMap.remove(partId); + } + + /** */ + public void clear(int grpId) { + grpPageStoresMap.remove(grpId); + } + + /** {@inheritDoc} */ + @Override public PageStore getStore(int grpId, int partId) throws IgniteCheckedException { + IntMap partPageStoresMap = grpPageStoresMap.get(grpId); + + if (partPageStoresMap == null) { + throw new IgniteCheckedException(S.toString("Page store map not found. ", + "grpId", grpId, false, + "partId", partId, false, + "keys", Arrays.toString(grpPageStoresMap.keys()), false, + "this", hashCode(), false + )); + } + + PageStore pageStore = partPageStoresMap.get(partId); + + if (pageStore == null) { + throw new IgniteCheckedException(S.toString("Page store not found. ", + "grpId", grpId, false, + "partId", partId, false, + "keys", Arrays.toString(partPageStoresMap.keys()), false, + "this", hashCode(), false + )); + } + + return pageStore; + } + + /** {@inheritDoc} */ + @Override public Collection getStores(int grpId) throws IgniteCheckedException { + IntMap partPageStoresMap = grpPageStoresMap.get(grpId); + + if (partPageStoresMap == null) { + throw new IgniteCheckedException(S.toString("Page store map not found. ", + "grpId", grpId, false, + "keys", Arrays.toString(grpPageStoresMap.keys()), false, + "this", hashCode(), false + )); + } + + return Arrays.asList(partPageStoresMap.values()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/TreeIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/TreeIterator.java new file mode 100644 index 0000000000000..90e47c97439a9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/TreeIterator.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.util.GridUnsafe; + +/** */ +public class TreeIterator { + /** Direct memory buffer with a size of one page. */ + private final ByteBuffer pageBuf; + + /** Offheap page size. */ + private final int pageSize; + + /** */ + public TreeIterator(int size) { + pageSize = size; + + pageBuf = ByteBuffer.allocateDirect(pageSize); + } + + /** */ + public void iterate( + BPlusTree tree, + PageMemoryEx pageMemory, + BPlusTree.TreeRowClosure c + ) throws IgniteCheckedException { + int grpId = tree.groupId(); + + long leafId = findFirstLeafId(grpId, tree.getMetaPageId(), pageMemory); + + long bufAddr = GridUnsafe.bufferAddress(pageBuf); + + while (leafId != 0L) { + long leafPage = pageMemory.acquirePage(grpId, leafId); + + BPlusIO io; + + try { + long leafPageAddr = pageMemory.readLock(grpId, leafId, leafPage); + + try { + io = PageIO.getBPlusIO(leafPageAddr); + + assert io instanceof BPlusLeafIO : io; + + GridUnsafe.copyMemory(leafPageAddr, bufAddr, pageSize); + } + finally { + pageMemory.readUnlock(grpId, leafId, leafPage); + } + } + finally { + pageMemory.releasePage(grpId, leafId, leafPage); + } + + int cnt = io.getCount(bufAddr); + + for (int idx = 0; idx < cnt; idx++) + c.apply(tree, io, bufAddr, idx); + + leafId = io.getForward(bufAddr); + } + } + + /** */ + private long findFirstLeafId(int grpId, long metaPageId, PageMemoryEx partPageMemory) throws IgniteCheckedException { + long metaPage = partPageMemory.acquirePage(grpId, metaPageId); + + try { + long metaPageAddr = partPageMemory.readLock(grpId, metaPageId, metaPage); + + try { + BPlusMetaIO metaIO = PageIO.getPageIO(metaPageAddr); + + return metaIO.getFirstPageId(metaPageAddr, 0); + } + finally { + partPageMemory.readUnlock(grpId, metaPageId, metaPage); + } + } + finally { + partPageMemory.releasePage(grpId, metaPageId, metaPage); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationParameters.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationParameters.java new file mode 100644 index 0000000000000..6bc3ddcd21cc1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationParameters.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.ignite.maintenance.MaintenanceTask; + +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager.DEFRAGMENTATION_MNTC_TASK_NAME; + +/** + * Maintenance task for defragmentation. + */ +public class DefragmentationParameters { + /** */ + public static final String CACHE_GROUP_ID_SEPARATOR = ","; + + /** */ + private final List cacheGrpIds; + + /** + * @param cacheGrpIds Id of cache group for defragmentations. + */ + private DefragmentationParameters(List cacheGrpIds) { + this.cacheGrpIds = cacheGrpIds; + } + + /** + * Convert parameter to maintenance storage. + * + * @param cacheGroupIds Cache group ids for defragmentation. + * @return Maintenance task. + */ + public static MaintenanceTask toStore(List cacheGroupIds) { + return new MaintenanceTask( + DEFRAGMENTATION_MNTC_TASK_NAME, + "Cache group defragmentation", + cacheGroupIds.stream() + .map(String::valueOf) + .collect(Collectors.joining(CACHE_GROUP_ID_SEPARATOR)) + ); + } + + /** + * @param rawTask Task from maintenance storage. + * @return Defragmentation parameters. + */ + public static DefragmentationParameters fromStore(MaintenanceTask rawTask) { + return new DefragmentationParameters(Arrays.stream(rawTask.parameters() + .split(CACHE_GROUP_ID_SEPARATOR)) + .map(Integer::valueOf) + .collect(Collectors.toList()) + ); + } + + /** + * @return Cache groups ids. + */ + public List cacheGroupIds() { + return cacheGrpIds; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationWorkflowCallback.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationWorkflowCallback.java new file mode 100644 index 0000000000000..a809579d14d34 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/DefragmentationWorkflowCallback.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance; + +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager; +import org.apache.ignite.maintenance.MaintenanceAction; +import org.apache.ignite.maintenance.MaintenanceWorkflowCallback; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Defragmentation specific callback for maintenance mode. + */ +public class DefragmentationWorkflowCallback implements MaintenanceWorkflowCallback { + /** Defragmentation manager. */ + private final CachePartitionDefragmentationManager defrgMgr; + + /** Logger provider. */ + private final Function, IgniteLogger> logProvider; + + /** + * @param logProvider Logger provider. + * @param defrgMgr Defragmentation manager. + */ + public DefragmentationWorkflowCallback( + Function, IgniteLogger> logProvider, + CachePartitionDefragmentationManager defrgMgr + ) { + this.defrgMgr = defrgMgr; + this.logProvider = logProvider; + } + + /** {@inheritDoc} */ + @Override public boolean shouldProceedWithMaintenance() { + return true; + } + + /** {@inheritDoc} */ + @Override public @NotNull List> allActions() { + return Collections.singletonList(automaticAction()); + } + + /** {@inheritDoc} */ + @Override public @Nullable MaintenanceAction automaticAction() { + return new ExecuteDefragmentationAction(logProvider, defrgMgr); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/ExecuteDefragmentationAction.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/ExecuteDefragmentationAction.java new file mode 100644 index 0000000000000..42b2de7945bad --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/maintenance/ExecuteDefragmentationAction.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance; + +import java.util.function.Function; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager; +import org.apache.ignite.maintenance.MaintenanceAction; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Action which allows to start the defragmentation process. + */ +class ExecuteDefragmentationAction implements MaintenanceAction { + /** Logger. */ + private final IgniteLogger log; + + /** Defragmentation manager. */ + private final CachePartitionDefragmentationManager defrgMgr; + + /** + * @param logFunction Logger provider. + * @param defrgMgr Defragmentation manager. + */ + public ExecuteDefragmentationAction( + Function, IgniteLogger> logFunction, + CachePartitionDefragmentationManager defrgMgr + ) { + this.log = logFunction.apply(ExecuteDefragmentationAction.class); + this.defrgMgr = defrgMgr; + } + + /** {@inheritDoc} */ + @Override public Boolean execute() { + try { + defrgMgr.executeDefragmentation(); + } + catch (IgniteCheckedException | IgniteException e) { + log.error("Defragmentation is failed", e); + + return false; + } + + return true; + } + + /** {@inheritDoc} */ + @Override public @NotNull String name() { + return "execute"; + } + + /** {@inheritDoc} */ + @Override public @Nullable String description() { + return "Starting the process of defragmentation."; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index 3f5f8a9359676..f8f28d8092502 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -75,6 +75,7 @@ import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.StorageException; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageReadWriteManager; @@ -87,6 +88,7 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.util.worker.GridWorker; import org.apache.ignite.lang.IgniteOutClosure; +import org.apache.ignite.maintenance.MaintenanceRegistry; import org.apache.ignite.maintenance.MaintenanceTask; import org.apache.ignite.marshaller.Marshaller; import org.apache.ignite.marshaller.MarshallerUtils; @@ -731,6 +733,13 @@ private CacheStoreHolder initDir(File cacheWorkDir, try { boolean dirExisted = checkAndInitCacheWorkDir(cacheWorkDir); + if (dirExisted) { + MaintenanceRegistry mntcReg = cctx.kernalContext().maintenanceRegistry(); + + if (!mntcReg.isMaintenanceMode()) + DefragmentationFileUtils.beforeInitPageStores(cacheWorkDir, log); + } + File idxFile = new File(cacheWorkDir, INDEX_FILE_NAME); if (dirExisted && !idxFile.exists()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java index 070d426813eab..eb90b2f01fbe9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java @@ -30,6 +30,7 @@ import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLogInnerIO; import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLogLeafIO; import org.apache.ignite.internal.processors.cache.persistence.IndexStorageImpl; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListMetaIO; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListNodeIO; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageBPlusIO; @@ -258,6 +259,12 @@ public abstract class PageIO { /** */ public static final short T_MARKER_PAGE = 33; + /** */ + public static final short T_DEFRAG_LINK_MAPPING_INNER = 34; + + /** */ + public static final short T_DEFRAG_LINK_MAPPING_LEAF = 35; + /** Index for payload == 1. */ public static final short T_H2_EX_REF_LEAF_START = 10_000; @@ -799,6 +806,12 @@ public static > Q getBPlusIO(int type, int ver) throws Igni case T_DATA_REF_METASTORAGE_LEAF: return (Q)MetastorageBPlusIO.LEAF_IO_VERSIONS.forVersion(ver); + case T_DEFRAG_LINK_MAPPING_INNER: + return (Q) LinkMap.INNER_IO_VERSIONS.forVersion(ver); + + case T_DEFRAG_LINK_MAPPING_LEAF: + return (Q) LinkMap.LEAF_IO_VERSIONS.forVersion(ver); + default: // For tests. if (innerTestIO != null && innerTestIO.getType() == type && innerTestIO.getVersion() == ver) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java index d282f4d8b4d1f..236de0c4e620f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java @@ -33,16 +33,21 @@ import org.apache.ignite.internal.managers.IgniteMBeansManager; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheContextInfo; import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.RootPage; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; import org.apache.ignite.internal.util.GridAtomicLong; import org.apache.ignite.internal.util.GridSpinBusyLock; +import org.apache.ignite.internal.util.collection.IntMap; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; @@ -487,4 +492,23 @@ default long indexSize(String schemaName, String tblName, String idxName) throws default Map secondaryIndexesInlineSize() { return Collections.emptyMap(); } + + /** + * Defragment index partition. + * + * @param grpCtx Old group context. + * @param newCtx New group context. + * @param partPageMem Partition page memory. + * @param mappingByPart Mapping page memory. + * @param cpLock Defragmentation checkpoint read lock. + * + * @throws IgniteCheckedException If failed. + */ + void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPart, + CheckpointTimeoutLock cpLock + ) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java index ada52760667f9..21605907fa36e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntHashMap.java @@ -180,6 +180,32 @@ public IntHashMap(int cap) { return size() == 0; } + /** {@inheritDoc} */ + @Override public int[] keys() { + int[] keys = new int[size]; + + int idx = 0; + + for (Entry entry : entries) + if (entry != null) + keys[idx++] = entry.key; + + return keys; + } + + /** {@inheritDoc} */ + @Override public V[] values() { + V[] vals = (V[])new Object[size]; + + int idx = 0; + + for (Entry entry : entries) + if (entry != null) + vals[idx++] = entry.val; + + return vals; + } + /** {@inheritDoc} */ @Override public boolean containsKey(int key) { return find(key) >= 0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java index f1bbe51e55546..c60600399c750 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntMap.java @@ -82,4 +82,10 @@ public interface EntryConsumer { * Returns true if this map contains no key-value mappings. */ boolean isEmpty(); + + /** Returns array of keys. */ + int[] keys(); + + /** Return array of values. */ + V[] values(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java index 8d379bb95eed2..52cffaa818f80 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/collection/IntRWHashMap.java @@ -106,6 +106,28 @@ public IntRWHashMap() { return size() == 0; } + /** {@inheritDoc} */ + @Override public int[] keys() { + lock.readLock().lock(); + try { + return delegate.keys(); + } + finally { + lock.readLock().unlock(); + } + } + + /** {@inheritDoc} */ + @Override public V[] values() { + lock.readLock().lock(); + try { + return delegate.values(); + } + finally { + lock.readLock().unlock(); + } + } + /** {@inheritDoc} */ @Override public boolean containsKey(int key) { lock.readLock().lock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java b/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java index 358313824cff9..a526764c759b0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java @@ -1678,6 +1678,52 @@ public static String toString(String str, } } + /** + * Produces uniformed output of string with context properties + * + * @param str Output prefix or {@code null} if empty. + * @param triplets Triplets {@code {name, value, sencitivity}}. + * @return String presentation. + */ + public static String toString(String str, Object... triplets) { + if (triplets.length % 3 != 0) + throw new IllegalArgumentException("Array length must be a multiple of 3"); + + int propCnt = triplets.length / 3; + + Object[] propNames = new Object[propCnt]; + Object[] propVals = new Object[propCnt]; + boolean[] propSens = new boolean[propCnt]; + + for (int i = 0; i < propCnt; i++) { + Object name = triplets[i * 3]; + + assert name != null; + + propNames[i] = name; + + propVals[i] = triplets[i * 3 + 1]; + + Object sens = triplets[i * 3 + 2]; + + assert sens instanceof Boolean; + + propSens[i] = (Boolean)sens; + } + + SBLimitedLength sb = threadLocSB.get(); + + boolean newStr = sb.length() == 0; + + try { + return toStringImpl(str, sb, propNames, propVals, propSens, propCnt); + } + finally { + if (newStr) + sb.reset(); + } + } + /** * Creates an uniformed string presentation for the binary-like object. * diff --git a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java index 3ce1aea825fd7..9cebef0e09a2d 100644 --- a/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java +++ b/modules/core/src/main/java/org/apache/ignite/maintenance/MaintenanceRegistry.java @@ -20,6 +20,7 @@ import java.util.List; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.util.lang.IgniteThrowableFunction; import org.apache.ignite.lang.IgniteExperimental; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -153,4 +154,22 @@ public interface MaintenanceRegistry { * and their {@link MaintenanceAction maintenance actions} are not executed. */ public void prepareAndExecuteMaintenance(); + + /** + * Call the {@link #registerWorkflowCallback(String, MaintenanceWorkflowCallback)} if the active maintenance task + * with given name exists. + * + * @param maintenanceTaskName name of {@link MaintenanceTask} this callback is registered for. + * @param workflowCalProvider provider of {@link MaintenanceWorkflowCallback} which construct the callback by given + * task. + */ + public default void registerWorkflowCallbackIfTaskExists( + @NotNull String maintenanceTaskName, + @NotNull IgniteThrowableFunction workflowCalProvider + ) throws IgniteCheckedException { + MaintenanceTask task = activeMaintenanceTask(maintenanceTaskName); + + if (task != null) + registerWorkflowCallback(maintenanceTaskName, workflowCalProvider.apply(task)); + } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationEncryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationEncryptionTest.java new file mode 100644 index 0000000000000..f1ef929478e46 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationEncryptionTest.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.encryption.AbstractEncryptionTest; +import org.apache.ignite.spi.encryption.keystore.KeystoreEncryptionSpi; + +/** */ +public class IgnitePdsDefragmentationEncryptionTest extends IgnitePdsDefragmentationTest { + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + KeystoreEncryptionSpi encSpi = new KeystoreEncryptionSpi(); + + encSpi.setKeyStorePath(AbstractEncryptionTest.KEYSTORE_PATH); + encSpi.setKeyStorePassword(AbstractEncryptionTest.KEYSTORE_PASSWORD.toCharArray()); + + cfg.setEncryptionSpi(encSpi); + + for (CacheConfiguration ccfg : cfg.getCacheConfiguration()) + ccfg.setEncryptionEnabled(true); + + return cfg; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationRandomLruEvictionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationRandomLruEvictionTest.java new file mode 100644 index 0000000000000..7709d76f95266 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationRandomLruEvictionTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.IgniteConfiguration; + +/** */ +public class IgnitePdsDefragmentationRandomLruEvictionTest extends IgnitePdsDefragmentationTest { + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.getDataStorageConfiguration() + .getDefaultDataRegionConfiguration() + .setPageEvictionMode(DataPageEvictionMode.RANDOM_LRU); + + return cfg; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java new file mode 100644 index 0000000000000..8f06a4895cd66 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java @@ -0,0 +1,541 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.io.File; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.FileVisitor; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collections; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.UnaryOperator; +import java.util.stream.IntStream; +import javax.cache.configuration.Factory; +import javax.cache.expiry.Duration; +import javax.cache.expiry.ExpiryPolicy; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.FailureHandler; +import org.apache.ignite.failure.StopNodeFailureHandler; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.maintenance.MaintenanceFileStore; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.util.lang.IgniteThrowableConsumer; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.maintenance.MaintenanceRegistry; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentationCompletionMarkerFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedIndexFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartMappingFile; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters.toStore; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; + +/** */ +public class IgnitePdsDefragmentationTest extends GridCommonAbstractTest { + /** */ + public static final String CACHE_2_NAME = "cache2"; + + /** */ + public static final int PARTS = 5; + + /** */ + public static final int ADDED_KEYS_COUNT = 150; + + /** */ + protected static final String GRP_NAME = "group"; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(true); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(true); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected FailureHandler getFailureHandler(String igniteInstanceName) { + return new StopNodeFailureHandler(); + } + + /** */ + protected static class PolicyFactory implements Factory { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override public ExpiryPolicy create() { + return new ExpiryPolicy() { + @Override public Duration getExpiryForCreation() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + + /** {@inheritDoc} */ + @Override public Duration getExpiryForAccess() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + + /** {@inheritDoc} */ + @Override public Duration getExpiryForUpdate() { + return new Duration(TimeUnit.MILLISECONDS, 13000); + } + }; + } + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration dsCfg = new DataStorageConfiguration(); + dsCfg.setWalSegmentSize(4 * 1024 * 1024); + + dsCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setInitialSize(100L * 1024 * 1024) + .setMaxSize(1024L * 1024 * 1024) + .setPersistenceEnabled(true) + ); + + cfg.setDataStorageConfiguration(dsCfg); + + CacheConfiguration cache1Cfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + CacheConfiguration cache2Cfg = new CacheConfiguration<>(CACHE_2_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setExpiryPolicyFactory(new PolicyFactory()) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + cfg.setCacheConfiguration(cache1Cfg, cache2Cfg); + + return cfg; + } + + /** + * Basic test scenario. Does following steps: + * - Start node; + * - Fill cache; + * - Remove part of data; + * - Stop node; + * - Start node in defragmentation mode; + * - Stop node; + * - Start node; + * - Check that partitions became smaller; + * - Check that cache is accessible and works just fine. + * + * @throws Exception If failed. + */ + @Test + public void testSuccessfulDefragmentation() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(ig.cache(DEFAULT_CACHE_NAME)); + + forceCheckpoint(ig); + + createMaintenanceRecord(); + + stopGrid(0); + + File workDir = resolveCacheWorkDir(ig); + + long[] oldPartLen = partitionSizes(workDir); + + long oldIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + startGrid(0); + + long[] newPartLen = partitionSizes(workDir); + + for (int p = 0; p < PARTS; p++) + assertTrue(newPartLen[p] < oldPartLen[p]); //TODO Fails. + + long newIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + assertTrue(newIdxFileLen <= oldIdxFileLen); + + File completionMarkerFile = defragmentationCompletionMarkerFile(workDir); + assertTrue(completionMarkerFile.exists()); + + stopGrid(0); + + IgniteEx ig0 = startGrid(0); + + ig0.cluster().state(ClusterState.ACTIVE); + + assertFalse(completionMarkerFile.exists()); + + validateCache(grid(0).cache(DEFAULT_CACHE_NAME)); + + validateLeftovers(workDir); + } + + /** + * @return Working directory for cache group {@link IgnitePdsDefragmentationTest#GRP_NAME}. + * @throws IgniteCheckedException If failed for some reason, like if it's a file instead of directory. + */ + private File resolveCacheWorkDir(IgniteEx ig) throws IgniteCheckedException { + File dbWorkDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); + + File nodeWorkDir = new File(dbWorkDir, U.maskForFileName(ig.name())); + + return new File(nodeWorkDir, FilePageStoreManager.CACHE_GRP_DIR_PREFIX + GRP_NAME); + } + + /** + * Force checkpoint and wait for it so all partitions will be in their final state after restart if no more data is + * uploaded. + * + * @param ig Ignite node. + * @throws IgniteCheckedException If checkpoint failed for some reason. + */ + private void forceCheckpoint(IgniteEx ig) throws IgniteCheckedException { + ig.context().cache().context().database() + .forceCheckpoint("testDefrag") + .futureFor(CheckpointState.FINISHED) + .get(); + } + + /** */ + protected void createMaintenanceRecord() throws IgniteCheckedException { + IgniteEx grid = grid(0); + MaintenanceRegistry mntcReg = grid.context().maintenanceRegistry(); + + mntcReg.registerMaintenanceTask(toStore(Collections.singletonList(groupIdForCache(grid, DEFAULT_CACHE_NAME)))); + } + + /** + * Returns array that contains sizes of partition files in gived working directories. Assumes that partitions + * {@code 0} to {@code PARTS - 1} exist in that dir. + * + * @param workDir Working directory. + * @return The array. + */ + protected long[] partitionSizes(File workDir) { + return IntStream.range(0, PARTS) + .mapToObj(p -> new File(workDir, String.format(FilePageStoreManager.PART_FILE_TEMPLATE, p))) + .mapToLong(File::length) + .toArray(); + } + + /** + * Checks that plain node start after failed defragmentation will finish batch renaming. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverRestartWithoutDefragmentation() throws Exception { + testFailover(workDir -> { + try { + File mntcRecFile = new File(workDir.getParent(), MaintenanceFileStore.MAINTENANCE_FILE_NAME); + + assertTrue(mntcRecFile.exists()); + + Files.delete(mntcRecFile.toPath()); + + startGrid(0); + + validateLeftovers(workDir); + } + catch (Exception e) { + throw new IgniteCheckedException(e); + } + finally { + createMaintenanceRecord(); + + stopGrid(0); + } + }); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if no completion marker was found. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverOnLastStage() throws Exception { + testFailover(workDir -> {}); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if index was not defragmented. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverIncompletedIndex() throws Exception { + testFailover(workDir -> move( + DefragmentationFileUtils.defragmentedIndexFile(workDir), + DefragmentationFileUtils.defragmentedIndexTmpFile(workDir) + )); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if partition was not defragmented. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverIncompletedPartition1() throws Exception { + testFailover(workDir -> { + DefragmentationFileUtils.defragmentedIndexFile(workDir).delete(); + + move( + DefragmentationFileUtils.defragmentedPartFile(workDir, PARTS - 1), + DefragmentationFileUtils.defragmentedPartTmpFile(workDir, PARTS - 1) + ); + }); + } + + /** + * Checks that second start in defragmentation mode will finish defragmentation if no mapping was found for partition. + * + * @throws Exception If failed. + */ + @Test + public void testFailoverIncompletedPartition2() throws Exception { + testFailover(workDir -> { + DefragmentationFileUtils.defragmentedIndexFile(workDir).delete(); + + DefragmentationFileUtils.defragmentedPartMappingFile(workDir, PARTS - 1).delete(); + }); + } + + /** */ + private void move(File from, File to) throws IgniteCheckedException { + try { + Files.move(from.toPath(), to.toPath(), StandardCopyOption.REPLACE_EXISTING); + } + catch (IOException e) { + throw new IgniteCheckedException(e); + } + } + + /** */ + private void testFailover(IgniteThrowableConsumer c) throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(ig.cache(DEFAULT_CACHE_NAME)); + + forceCheckpoint(ig); + + createMaintenanceRecord(); + + stopGrid(0); + + File workDir = resolveCacheWorkDir(ig); + + String errMsg = "Failed to create defragmentation completion marker."; + + AtomicBoolean errOccurred = new AtomicBoolean(); + + UnaryOperator cfgOp = cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + + FileIOFactory delegate = dsCfg.getFileIOFactory(); + + dsCfg.setFileIOFactory((file, modes) -> { + if (file.equals(defragmentationCompletionMarkerFile(workDir))) { + errOccurred.set(true); + + throw new IOException(errMsg); + } + + return delegate.create(file, modes); + }); + + return cfg; + }; + + try { + startGrid(0, cfgOp); + } + catch (Exception ignore) { + // No-op. + } + + // Failed node can leave interrupted status of the thread that needs to be cleared, + // otherwise following "wait" wouldn't work. + // This call can't be moved inside of "catch" block because interruption can actually be silent. + Thread.interrupted(); + + assertTrue(GridTestUtils.waitForCondition(errOccurred::get, 10_000L)); + + assertTrue(GridTestUtils.waitForCondition(() -> G.allGrids().isEmpty(), 10_000L)); + + c.accept(workDir); + + startGrid(0); + + stopGrid(0); + + // Everything must be completed. + startGrid(0).cluster().state(ClusterState.ACTIVE); + + validateCache(grid(0).cache(DEFAULT_CACHE_NAME)); + + validateLeftovers(workDir); + } + + /** */ + public void validateLeftovers(File workDir) { + assertFalse(defragmentedIndexFile(workDir).exists()); + + for (int p = 0; p < PARTS; p++) { + assertFalse(defragmentedPartMappingFile(workDir, p).exists()); + + assertFalse(defragmentedPartFile(workDir, p).exists()); + } + } + + /** */ + @Test + public void testDefragmentedPartitionCreated() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(ig.cache(DEFAULT_CACHE_NAME)); + + fillCache(ig.getOrCreateCache(CACHE_2_NAME)); + + createMaintenanceRecord(); + + stopGrid(0); + + startGrid(0); + + File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); + + AtomicReference cachePartFile = new AtomicReference<>(); + AtomicReference defragCachePartFile = new AtomicReference<>(); + + Files.walkFileTree(workDir.toPath(), new FileVisitor() { + @Override public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes basicFileAttributes) throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override public FileVisitResult visitFile(Path path, BasicFileAttributes basicFileAttributes) throws IOException { + if (path.toString().contains("cacheGroup-group")) { + File file = path.toFile(); + + if (file.getName().contains("part-dfrg-")) + cachePartFile.set(file); + else if (file.getName().contains("part-")) + defragCachePartFile.set(file); + } + + return FileVisitResult.CONTINUE; + } + + @Override public FileVisitResult visitFileFailed(Path path, IOException e) throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override public FileVisitResult postVisitDirectory(Path path, IOException e) throws IOException { + return FileVisitResult.CONTINUE; + } + }); + + assertNull(cachePartFile.get()); //TODO Fails. + assertNotNull(defragCachePartFile.get()); + } + + /** + * Fill cache using integer keys. + * + * @param cache + */ + protected void fillCache(IgniteCache cache) { + fillCache(Function.identity(), cache); + } + + /** */ + protected void fillCache(Function keyMapper, IgniteCache cache) { + try (IgniteDataStreamer ds = grid(0).dataStreamer(cache.getName())) { + for (int i = 0; i < ADDED_KEYS_COUNT; i++) { + byte[] val = new byte[8192]; + new Random().nextBytes(val); + + ds.addData(keyMapper.apply(i), val); + } + } + + try (IgniteDataStreamer ds = grid(0).dataStreamer(cache.getName())) { + ds.allowOverwrite(true); + + for (int i = 0; i <= ADDED_KEYS_COUNT / 2; i++) + ds.removeData(keyMapper.apply(i * 2)); + } + } + + /** */ + public void validateCache(IgniteCache cache) { + for (int k = 0; k < ADDED_KEYS_COUNT; k++) { + Object val = cache.get(k); + + if (k % 2 == 0) + assertNull(val); + else + assertNotNull(val); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java index 85da92c122bbe..1a677165ba9bb 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/checkpoint/LightweightCheckpointTest.java @@ -37,7 +37,6 @@ import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener; import org.apache.ignite.internal.processors.cache.persistence.checkpoint.LightweightCheckpointManager; -import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; @@ -155,8 +154,7 @@ public void testLightCheckpointAbleToStoreOnlyGivenDataRegion() throws Exception db.persistentStoreMetricsImpl(), context.longJvmPauseDetector(), context.failure(), - context.cache(), - (FilePageStoreManager)context.cache().context().pageStore() + context.cache() ); //and: Add checkpoint listener for DEFAULT_CACHE in order of storing the meta pages. diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMapTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMapTest.java new file mode 100644 index 0000000000000..ee2d436362f87 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/LinkMapTest.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; +import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; +import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** + * Class for LinkMap tests. + */ +public class LinkMapTest extends GridCommonAbstractTest { + /** */ + protected static final int PAGE_SIZE = 512; + + /** */ + protected static final long MB = 1024 * 1024; + + /** + * Test that LinkMap works. + * @throws Exception + */ + @Test + public void test() throws Exception { + PageMemory pageMem = createPageMemory(); + + int cacheGroupId = 1; + + String groupName = "test"; + + FullPageId pageId = new FullPageId(pageMem.allocatePage(cacheGroupId, 0, PageIdAllocator.FLAG_DATA), cacheGroupId); + + LinkMap map = new LinkMap(cacheGroupId, groupName, pageMem, pageId.pageId(), true); + + for (int i = 0; i < 10_000; i++) + map.put(i, i + 1); + + for (int i = 0; i < 10_000; i++) + assertEquals(i + 1, map.get(i)); + } + + /** + * Create page memory for LinkMap tree. + */ + protected PageMemory createPageMemory() throws Exception { + DataRegionConfiguration plcCfg = new DataRegionConfiguration() + .setInitialSize(2 * MB) + .setMaxSize(2 * MB); + + PageMemory pageMem = new PageMemoryNoStoreImpl(log, + new UnsafeMemoryProvider(log), + null, + PAGE_SIZE, + plcCfg, + new LongAdderMetric("NO_OP", null), + true); + + pageMem.start(); + + return pageMem; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java index c5cfce11628c0..5f0b04ff9324f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/query/DummyQueryIndexing.java @@ -30,16 +30,21 @@ import org.apache.ignite.internal.managers.IgniteMBeansManager; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheContextInfo; import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.RootPage; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; import org.apache.ignite.internal.util.GridAtomicLong; import org.apache.ignite.internal.util.GridSpinBusyLock; +import org.apache.ignite.internal.util.collection.IntMap; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; @@ -333,4 +338,15 @@ public class DummyQueryIndexing implements GridQueryIndexing { String colNamePtrn) { return null; } + + /** {@inheritDoc} */ + @Override public void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPart, + CheckpointTimeoutLock cpLock + ) throws IgniteCheckedException { + // No-op. + } } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java index d548e382687d2..93ca870a2d40b 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java @@ -75,6 +75,7 @@ import org.apache.ignite.internal.processors.cache.SetTxTimeoutOnPartitionMapExchangeTest; import org.apache.ignite.internal.processors.cache.distributed.IgniteRejectConnectOnNodeStopTest; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.EvictPartitionInLogTest; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMapTest; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PagePoolTest; import org.apache.ignite.internal.processors.cache.query.continuous.DiscoveryDataDeserializationFailureHanderTest; import org.apache.ignite.internal.processors.cache.transactions.AtomicOperationsInTxTest; @@ -295,6 +296,8 @@ ClusterActivationStartedEventTest.class, IgniteThreadGroupNodeRestartTest.class, + + LinkMapTest.class, }) public class IgniteBasicTestSuite { } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java index 9978761d887e4..23256a0b24b6c 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsMvccTestSuite4.java @@ -22,6 +22,9 @@ import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheEntriesExpirationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationEncryptionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationRandomLruEvictionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsTaskCancelingTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPartitionPreloadTest; import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.PageLockTrackerManagerTest; @@ -68,6 +71,11 @@ public static List> suite() { ignoredTests.add(OffHeapLockStackTest.class); ignoredTests.add(IgnitePdsCacheEntriesExpirationTest.class); + // Defragmentation. + ignoredTests.add(IgnitePdsDefragmentationTest.class); + ignoredTests.add(IgnitePdsDefragmentationRandomLruEvictionTest.class); + ignoredTests.add(IgnitePdsDefragmentationEncryptionTest.class); + return IgnitePdsTestSuite4.suite(ignoredTests); } } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java index be885e00704e3..d63439587b536 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java @@ -35,6 +35,9 @@ import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheEntriesExpirationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsConsistencyOnDelayedPartitionOwning; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationEncryptionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationRandomLruEvictionTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDefragmentationTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRecoveryAfterFileCorruptionTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRemoveDuringRebalancingTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRestartAfterFailedToWriteMetaPageTest; @@ -122,6 +125,11 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, WarmUpSelfTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, LoadAllWarmUpStrategySelfTest.class, ignoredTests); + // Defragmentation. + GridTestUtils.addTestIfNeeded(suite, IgnitePdsDefragmentationTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgnitePdsDefragmentationRandomLruEvictionTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, IgnitePdsDefragmentationEncryptionTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, PendingTreeCorruptionTest.class, ignoredTests); return suite; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index d06418cb0b3db..517bc69c8d90c 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -65,6 +65,7 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.CacheObjectValueContext; import org.apache.ignite.internal.processors.cache.CacheOperationContext; @@ -82,6 +83,9 @@ import org.apache.ignite.internal.processors.cache.mvcc.StaticMvccQueryTracker; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.RootPage; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; @@ -128,6 +132,7 @@ import org.apache.ignite.internal.processors.query.h2.database.io.H2LeafIO; import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccInnerIO; import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccLeafIO; +import org.apache.ignite.internal.processors.query.h2.defragmentation.IndexingDefragmentation; import org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedPlanInfo; import org.apache.ignite.internal.processors.query.h2.dml.DmlUpdateResultsIterator; import org.apache.ignite.internal.processors.query.h2.dml.DmlUpdateSingleEntryIterator; @@ -164,6 +169,7 @@ import org.apache.ignite.internal.util.GridEmptyCloseableIterator; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.internal.util.collection.IntMap; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.internal.util.lang.GridPlainRunnable; @@ -295,6 +301,8 @@ public class IgniteH2Indexing implements GridQueryIndexing { /** Parser. */ private QueryParser parser; + private IndexingDefragmentation defragmentation = new IndexingDefragmentation(this); + /** */ private final IgniteInClosure> logger = new IgniteInClosure>() { @Override public void apply(IgniteInternalFuture fut) { @@ -3186,4 +3194,15 @@ public DistributedSqlConfiguration distributedConfiguration() { return map; } + + /** {@inheritDoc} */ + @Override public void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPart, + CheckpointTimeoutLock cpLock + ) throws IgniteCheckedException { + defragmentation.defragment(grpCtx, newCtx, partPageMem, mappingByPart, cpLock, log); + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java new file mode 100644 index 0000000000000..c41f587dd3c00 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java @@ -0,0 +1,430 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.defragmentation; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointTimeoutLock; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.LinkMap; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.TreeIterator; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; +import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.InsertLast; +import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.processors.query.h2.database.H2Tree; +import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex; +import org.apache.ignite.internal.processors.query.h2.database.InlineIndexColumn; +import org.apache.ignite.internal.processors.query.h2.database.inlinecolumn.AbstractInlineIndexColumn; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2ExtrasInnerIO; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2ExtrasLeafIO; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2InnerIO; +import org.apache.ignite.internal.processors.query.h2.database.io.AbstractH2LeafIO; +import org.apache.ignite.internal.processors.query.h2.database.io.H2RowLinkIO; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; +import org.apache.ignite.internal.processors.query.h2.opt.H2CacheRow; +import org.apache.ignite.internal.processors.query.h2.opt.H2Row; +import org.apache.ignite.internal.util.collection.IntMap; +import org.h2.index.Index; +import org.h2.value.Value; + +/** + * + */ +public class IndexingDefragmentation { + /** Indexing. */ + private final IgniteH2Indexing indexing; + + /** Constructor. */ + public IndexingDefragmentation(IgniteH2Indexing indexing) { + this.indexing = indexing; + } + + /** + * Defragment index partition. + * + * @param grpCtx Old group context. + * @param newCtx New group context. + * @param partPageMem Partition page memory. + * @param mappingByPartition Mapping page memory. + * @param cpLock Defragmentation checkpoint read lock. + * @param log Log. + * + * @throws IgniteCheckedException If failed. + */ + public void defragment( + CacheGroupContext grpCtx, + CacheGroupContext newCtx, + PageMemoryEx partPageMem, + IntMap mappingByPartition, + CheckpointTimeoutLock cpLock, + IgniteLogger log + ) throws IgniteCheckedException { + int pageSize = grpCtx.cacheObjectContext().kernalContext().grid().configuration().getDataStorageConfiguration().getPageSize(); + + TreeIterator treeIterator = new TreeIterator(pageSize); + + PageMemoryEx oldCachePageMem = (PageMemoryEx)grpCtx.dataRegion().pageMemory(); + + PageMemory newCachePageMemory = partPageMem; + + Collection tables = indexing.schemaManager().dataTables(); + + long cpLockThreshold = 150L; + + cpLock.checkpointReadLock(); + + try { + AtomicLong lastCpLockTs = new AtomicLong(System.currentTimeMillis()); + + for (GridH2Table table : tables) { + GridCacheContext cctx = table.cacheContext(); + + if (cctx.groupId() != grpCtx.groupId()) + continue; // Not our index. + + GridH2RowDescriptor rowDesc = table.rowDescriptor(); + + List indexes = table.getIndexes(); + H2TreeIndex oldH2Idx = (H2TreeIndex)indexes.get(2); + + int segments = oldH2Idx.segmentsCount(); + + H2Tree firstTree = oldH2Idx.treeForRead(0); + + PageIoResolver pageIoRslvr = pageAddr -> { + PageIO io = PageIoResolver.DEFAULT_PAGE_IO_RESOLVER.resolve(pageAddr); + + if (io instanceof BPlusMetaIO) + return io; + + //noinspection unchecked,rawtypes,rawtypes + return wrap((BPlusIO)io); + }; + + H2TreeIndex newIdx = H2TreeIndex.createIndex( + cctx, + null, + table, + oldH2Idx.getName(), + firstTree.getPk(), + firstTree.getAffinityKey(), + Arrays.asList(firstTree.cols()), + Arrays.asList(firstTree.cols()), + oldH2Idx.inlineSize(), + segments, + newCachePageMemory, + newCtx.offheap(), + pageIoRslvr, + log + ); + + for (int i = 0; i < segments; i++) { + H2Tree tree = oldH2Idx.treeForRead(i); + + treeIterator.iterate(tree, oldCachePageMem, (theTree, io, pageAddr, idx) -> { + if (System.currentTimeMillis() - lastCpLockTs.get() >= cpLockThreshold) { + cpLock.checkpointReadUnlock(); + + cpLock.checkpointReadLock(); + + lastCpLockTs.set(System.currentTimeMillis()); + } + + assert 1 == io.getVersion() + : "IO version " + io.getVersion() + " is not supported by current defragmentation algorithm." + + " Please implement copying of tree in a new format."; + + BPlusIO h2IO = wrap(io); + + H2Row row = theTree.getRow(h2IO, pageAddr, idx); + + if (row instanceof H2CacheRowWithIndex) { + H2CacheRowWithIndex h2CacheRow = (H2CacheRowWithIndex)row; + + CacheDataRow cacheDataRow = h2CacheRow.getRow(); + + int partition = cacheDataRow.partition(); + + long link = h2CacheRow.link(); + + LinkMap map = mappingByPartition.get(partition); + + long newLink = map.get(link); + + H2CacheRowWithIndex newRow = H2CacheRowWithIndex.create( + rowDesc, + newLink, + h2CacheRow, + ((H2RowLinkIO)io).storeMvccInfo() + ); + + newIdx.putx(newRow); + } + + return true; + }); + } + } + } + finally { + cpLock.checkpointReadUnlock(); + } + } + + /** */ + private static & H2RowLinkIO> H2Row lookupRow( + BPlusTree tree, + long pageAddr, + int idx, + T io + ) throws IgniteCheckedException { + long link = io.getLink(pageAddr, idx); + + List inlineIdxs = ((H2Tree) tree).inlineIndexes(); + + int off = io.offset(idx); + + List values = new ArrayList<>(); + + if (inlineIdxs != null) { + int fieldOff = 0; + + for (int i = 0; i < inlineIdxs.size(); i++) { + AbstractInlineIndexColumn inlineIndexColumn = (AbstractInlineIndexColumn) inlineIdxs.get(i); + + Value value = inlineIndexColumn.get(pageAddr, off + fieldOff, io.getPayloadSize() - fieldOff); + + fieldOff += inlineIndexColumn.inlineSizeOf(value); + + values.add(value); + } + } + + if (io.storeMvccInfo()) { + long mvccCrdVer = io.getMvccCoordinatorVersion(pageAddr, idx); + long mvccCntr = io.getMvccCounter(pageAddr, idx); + int mvccOpCntr = io.getMvccOperationCounter(pageAddr, idx); + + H2CacheRow row = (H2CacheRow) ((H2Tree) tree).createMvccRow(link, mvccCrdVer, mvccCntr, mvccOpCntr, CacheDataRowAdapter.RowData.LINK_ONLY); + + return new H2CacheRowWithIndex(row.getDesc(), row.getRow(), values); + } + + H2CacheRow row = (H2CacheRow) ((H2Tree) tree).createRow(link, false); + + return new H2CacheRowWithIndex(row.getDesc(), row.getRow(), values); + } + + /** */ + @SuppressWarnings({"unchecked", "rawtypes"}) + private static BPlusIO wrap(BPlusIO io) { + assert io instanceof H2RowLinkIO; + + if (io instanceof BPlusInnerIO) { + assert io instanceof AbstractH2ExtrasInnerIO + || io instanceof AbstractH2InnerIO; + + return new BPlusInnerIoDelegate((BPlusInnerIO)io); + } + else { + assert io instanceof AbstractH2ExtrasLeafIO + || io instanceof AbstractH2LeafIO; + + return new BPlusLeafIoDelegate((BPlusLeafIO)io); + } + } + + /** */ + private static class BPlusInnerIoDelegate & H2RowLinkIO> + extends BPlusInnerIO implements H2RowLinkIO { + /** */ + private final IO io; + + /** */ + public BPlusInnerIoDelegate(IO io) { + super(io.getType(), io.getVersion(), io.canGetRow(), io.getItemSize()); + this.io = io; + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, H2Row row) throws IgniteCheckedException { + io.storeByOffset(pageAddr, off, row); + } + + /** {@inheritDoc} */ + @Override public void store(long dstPageAddr, int dstIdx, BPlusIO srcIo, long srcPageAddr, int srcIdx) + throws IgniteCheckedException + { + io.store(dstPageAddr, dstIdx, srcIo, srcPageAddr, srcIdx); + } + + /** {@inheritDoc} */ + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { + return lookupRow(tree, pageAddr, idx, this); + } + + /** {@inheritDoc} */ + @Override public long getLink(long pageAddr, int idx) { + return io.getLink(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) { + return io.getMvccCoordinatorVersion(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCounter(long pageAddr, int idx) { + return io.getMvccCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public int getMvccOperationCounter(long pageAddr, int idx) { + return io.getMvccOperationCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public boolean storeMvccInfo() { + return io.storeMvccInfo(); + } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return io.getPayloadSize(); + } + } + + /** */ + private static class BPlusLeafIoDelegate & H2RowLinkIO> + extends BPlusLeafIO implements H2RowLinkIO { + /** */ + private final IO io; + + /** */ + public BPlusLeafIoDelegate(IO io) { + super(io.getType(), io.getVersion(), io.getItemSize()); + this.io = io; + } + + /** {@inheritDoc} */ + @Override public void storeByOffset(long pageAddr, int off, H2Row row) throws IgniteCheckedException { + io.storeByOffset(pageAddr, off, row); + } + + /** {@inheritDoc} */ + @Override public void store(long dstPageAddr, int dstIdx, BPlusIO srcIo, long srcPageAddr, int srcIdx) + throws IgniteCheckedException + { + io.store(dstPageAddr, dstIdx, srcIo, srcPageAddr, srcIdx); + } + + /** {@inheritDoc} */ + @Override public H2Row getLookupRow(BPlusTree tree, long pageAddr, int idx) throws IgniteCheckedException { + return lookupRow(tree, pageAddr, idx, this); + } + + /** {@inheritDoc} */ + @Override public long getLink(long pageAddr, int idx) { + return io.getLink(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) { + return io.getMvccCoordinatorVersion(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public long getMvccCounter(long pageAddr, int idx) { + return io.getMvccCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public int getMvccOperationCounter(long pageAddr, int idx) { + return io.getMvccOperationCounter(pageAddr, idx); + } + + /** {@inheritDoc} */ + @Override public boolean storeMvccInfo() { + return io.storeMvccInfo(); + } + + /** {@inheritDoc} */ + @Override public int getPayloadSize() { + return io.getPayloadSize(); + } + } + + /** + * H2CacheRow with stored index values + */ + private static class H2CacheRowWithIndex extends H2CacheRow implements InsertLast { + /** List of index values. */ + private final List values; + + /** Constructor. */ + public H2CacheRowWithIndex(GridH2RowDescriptor desc, CacheDataRow row, List values) { + super(desc, row); + this.values = values; + } + + public static H2CacheRowWithIndex create( + GridH2RowDescriptor desc, + long newLink, + H2CacheRowWithIndex oldValue, + boolean storeMvcc + ) { + CacheDataRow row = oldValue.getRow(); + + CacheDataRow newDataRow; + + if (storeMvcc) { + newDataRow = new MvccDataRow(newLink); + newDataRow.mvccVersion(row); + } else + newDataRow = new CacheDataRowAdapter(newLink); + + return new H2CacheRowWithIndex(desc, newDataRow, oldValue.values); + } + + /** {@inheritDoc} */ + @Override public Value getValue(int col) { + if (values.isEmpty()) + return null; + + return values.get(col); + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java index 95925ff705ee1..3607be078eb78 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheUpdateSqlQuerySelfTest.java @@ -400,7 +400,7 @@ private List> execute(SqlFieldsQuery qry) { /** * */ - static final class AllTypes implements Serializable { + public static final class AllTypes implements Serializable { /** * Data Long. */ @@ -602,7 +602,7 @@ private void init(Long key, String str) { } /** */ - AllTypes(Long key) { + public AllTypes(Long key) { this.init(key, Long.toString(key)); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsIndexingDefragmentationTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsIndexingDefragmentationTest.java new file mode 100644 index 0000000000000..bbb69ae3c7670 --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsIndexingDefragmentationTest.java @@ -0,0 +1,316 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.io.File; +import java.util.Collections; +import java.util.function.Function; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheUpdateSqlQuerySelfTest; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.verify.ValidateIndexesClosure; +import org.apache.ignite.internal.visor.verify.VisorValidateIndexesJobResult; +import org.apache.ignite.testframework.junits.WithSystemProperty; +import org.junit.Test; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; + +/** + * Defragmentation tests with enabled ignite-indexing. + */ +public class IgnitePdsIndexingDefragmentationTest extends IgnitePdsDefragmentationTest { + /** Use MVCC in tests. */ + private static final String USE_MVCC = "USE_MVCC"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setConsistentId(igniteInstanceName); + + DataStorageConfiguration dsCfg = new DataStorageConfiguration(); + dsCfg.setWalSegmentSize(4 * 1024 * 1024); + + dsCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setInitialSize(100L * 1024 * 1024) + .setMaxSize(1024L * 1024 * 1024) + .setPersistenceEnabled(true) + ); + + cfg.setDataStorageConfiguration(dsCfg); + + CacheConfiguration cache1Cfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setIndexedTypes( + IgniteCacheUpdateSqlQuerySelfTest.AllTypes.class, byte[].class, + Integer.class, byte[].class + ) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + CacheConfiguration cache2Cfg = new CacheConfiguration<>(CACHE_2_NAME) + .setAtomicityMode(TRANSACTIONAL) + .setGroupName(GRP_NAME) + .setIndexedTypes( + IgniteCacheUpdateSqlQuerySelfTest.AllTypes.class, byte[].class, + Integer.class, byte[].class + ) + .setAffinity(new RendezvousAffinityFunction(false, PARTS)); + + if (Boolean.TRUE.toString().equals(System.getProperty(USE_MVCC))) { + cache1Cfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT); + cache2Cfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT); + } else + cache2Cfg.setExpiryPolicyFactory(new PolicyFactory()); + + cfg.setCacheConfiguration(cache1Cfg, cache2Cfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + GridQueryProcessor.idxCls = null; + } + + /** + * Fill cache, remove half of the entries, defragmentate PDS and check index. + * + * @param keyMapper Function that provides key based on the index of entry. + * @param Type of cache key. + * + * @throws Exception If failed. + */ + private void test(Function keyMapper) throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + fillCache(keyMapper, ig.cache(DEFAULT_CACHE_NAME)); + + forceCheckpoint(ig); + + createMaintenanceRecord(); + + stopGrid(0); + + File dbWorkDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); + File nodeWorkDir = new File(dbWorkDir, U.maskForFileName(ig.name())); + File workDir = new File(nodeWorkDir, FilePageStoreManager.CACHE_GRP_DIR_PREFIX + GRP_NAME); + + long oldIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + startGrid(0); + + long newIdxFileLen = new File(workDir, FilePageStoreManager.INDEX_FILE_NAME).length(); + + assertTrue(newIdxFileLen <= oldIdxFileLen); + + File completionMarkerFile = DefragmentationFileUtils.defragmentationCompletionMarkerFile(workDir); + assertTrue(completionMarkerFile.exists()); + + stopGrid(0); + + GridQueryProcessor.idxCls = CaptureRebuildGridQueryIndexing.class; + + IgniteEx node = startGrid(0); + + awaitPartitionMapExchange(); + + CaptureRebuildGridQueryIndexing indexing = (CaptureRebuildGridQueryIndexing) node.context().query().getIndexing(); + + assertFalse(indexing.didRebuildIndexes()); + + IgniteCache cache = node.cache(DEFAULT_CACHE_NAME); + + assertFalse(completionMarkerFile.exists()); + + validateIndexes(node); + + for (int k = 0; k < ADDED_KEYS_COUNT; k++) + cache.get(keyMapper.apply(k)); + } + + /** + * Test that indexes are correct. + * + * @param node Node. + * @throws Exception If failed. + */ + private static void validateIndexes(IgniteEx node) throws Exception { + ValidateIndexesClosure clo = new ValidateIndexesClosure( + Collections.singleton(DEFAULT_CACHE_NAME), + 0, + 0, + false, + true + ); + + node.context().resource().injectGeneric(clo); + + VisorValidateIndexesJobResult call = clo.call(); + + assertFalse(call.hasIssues()); + } + + /** + * Test using integer keys. + * + * @throws Exception If failed. + */ + @Test + public void testIndexingWithIntegerKey() throws Exception { + test(Function.identity()); + } + + /** + * Test using complex keys (integer and string). + * + * @throws Exception If failed. + */ + @Test + public void testIndexingWithComplexKey() throws Exception { + test(integer -> new IgniteCacheUpdateSqlQuerySelfTest.AllTypes((long)integer)); + } + + /** + * Test using integer keys. + * + * @throws Exception If failed. + */ + @Test + @WithSystemProperty(key = USE_MVCC, value = "true") + public void testIndexingWithIntegerKeyAndMVCC() throws Exception { + test(Function.identity()); + } + + /** + * Test using complex keys (integer and string). + * + * @throws Exception If failed. + */ + @Test + @WithSystemProperty(key = USE_MVCC, value = "true") + public void testIndexingWithComplexKeyAndMVCC() throws Exception { + test(integer -> new IgniteCacheUpdateSqlQuerySelfTest.AllTypes((long)integer)); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testMultipleIndexes() throws Exception { + startGrid(0).cluster().state(ClusterState.ACTIVE); + + IgniteCache cache = grid(0).cache(DEFAULT_CACHE_NAME); + + cache.query(new SqlFieldsQuery("CREATE TABLE TEST (ID INT PRIMARY KEY, VAL_INT INT, VAL_OBJ LONG)")); + + cache.query(new SqlFieldsQuery("CREATE INDEX TEST_VAL_INT ON TEST(VAL_INT)")); + + cache.query(new SqlFieldsQuery("CREATE INDEX TEST_VAL_OBJ ON TEST(VAL_OBJ)")); + + for (int i = 0; i < ADDED_KEYS_COUNT; i++) + cache.query(new SqlFieldsQuery("INSERT INTO TEST VALUES (?, ?, ?)").setArgs(i, i, (long)i)); + + cache.query(new SqlFieldsQuery("DELETE FROM TEST WHERE MOD(ID, 2) = 0")); + + createMaintenanceRecord(); + + // Restart first time. + stopGrid(0); + + startGrid(0); + + // Restart second time. + stopGrid(0); + + startGrid(0); + + // Reinit cache object. + cache = grid(0).cache(DEFAULT_CACHE_NAME); + + assertTrue(explainQuery(cache, "EXPLAIN SELECT * FROM TEST WHERE ID > 0").contains("_key_pk_proxy")); + + cache.query(new SqlFieldsQuery("SELECT * FROM TEST WHERE ID > 0")).getAll(); + + assertTrue(explainQuery(cache, "EXPLAIN SELECT * FROM TEST WHERE VAL_INT > 0").contains("test_val_int")); + + cache.query(new SqlFieldsQuery("SELECT * FROM TEST WHERE VAL_INT > 0")).getAll(); + + assertTrue(explainQuery(cache, "EXPLAIN SELECT * FROM TEST WHERE VAL_OBJ > 0").contains("test_val_obj")); + + cache.query(new SqlFieldsQuery("SELECT * FROM TEST WHERE VAL_OBJ > 0")).getAll(); + } + + /** */ + private static String explainQuery(IgniteCache cache, String qry) { + return cache + .query(new SqlFieldsQuery(qry)) + .getAll() + .get(0) + .get(0) + .toString() + .toLowerCase(); + } + + /** + * IgniteH2Indexing that captures index rebuild operations. + */ + public static class CaptureRebuildGridQueryIndexing extends IgniteH2Indexing { + /** + * Whether index rebuild happened. + */ + private boolean rebuiltIndexes; + + /** {@inheritDoc} */ + @Override public IgniteInternalFuture rebuildIndexesFromHash(GridCacheContext cctx) { + IgniteInternalFuture future = super.rebuildIndexesFromHash(cctx); + rebuiltIndexes = future != null; + return future; + } + + /** + * Get index rebuild flag. + * + * @return Whether index rebuild happened. + */ + public boolean didRebuildIndexes() { + return rebuiltIndexes; + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java index f4a0ac95e7a97..d018457b3abde 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java @@ -21,6 +21,7 @@ import org.apache.ignite.internal.processors.cache.IgnitePdsSingleNodeWithIndexingAndGroupPutGetPersistenceSelfTest; import org.apache.ignite.internal.processors.cache.IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest; import org.apache.ignite.internal.processors.cache.index.ClientReconnectWithSqlTableConfiguredTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsIndexingDefragmentationTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgniteTcBotInitNewPageTest; import org.apache.ignite.internal.processors.cache.persistence.db.IndexingMultithreadedLoadContinuousRestartTest; import org.apache.ignite.internal.processors.cache.persistence.db.LongDestroyDurableBackgroundTaskTest; @@ -58,7 +59,8 @@ IgniteClusterSnapshotWithIndexesTest.class, ClientReconnectWithSqlTableConfiguredTest.class, MultipleParallelCacheDeleteDeadlockTest.class, - CacheGroupReencryptionTest.class + CacheGroupReencryptionTest.class, + IgnitePdsIndexingDefragmentationTest.class }) public class IgnitePdsWithIndexingTestSuite { } From 50f43b48572ebd2cfd49578e425524bbd71f8a71 Mon Sep 17 00:00:00 2001 From: ibessonov Date: Thu, 3 Dec 2020 10:54:13 +0300 Subject: [PATCH 087/110] IGNITE-13742 INACTIVE mode is forced on nodes in Maintenance Mode - Fixes #8524. Signed-off-by: Sergey Chugunov --- .../apache/ignite/internal/IgniteKernal.java | 17 ++++++++++++++++- .../cluster/GridClusterStateProcessor.java | 6 ++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index 588ae7d52ba4c..1bb3e6c6711dd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -1195,7 +1195,7 @@ public void start( // Assign discovery manager to context before other processors start so they // are able to register custom event listener. - GridManager discoMgr = new GridDiscoveryManager(ctx); + GridDiscoveryManager discoMgr = new GridDiscoveryManager(ctx); ctx.add(discoMgr, false); @@ -1210,10 +1210,25 @@ public void start( startProcessor(mntcProcessor); if (mntcProcessor.isMaintenanceMode()) { + if (log.isInfoEnabled()) { + log.info( + "Node is being started in maintenance mode. " + + "Starting IsolatedDiscoverySpi instead of configured discovery SPI." + ); + } + + cfg.setClusterStateOnStart(ClusterState.INACTIVE); + + if (log.isInfoEnabled()) + log.info("Overriding 'clusterStateOnStart' configuration to 'INACTIVE'."); + ctx.config().setDiscoverySpi(new IsolatedDiscoverySpi()); discoMgr = new GridDiscoveryManager(ctx); + // Reinitialized discovery manager won't have a valid consistentId on creation. + discoMgr.consistentId(ctx.pdsFolderResolver().resolveFolders().consistentId()); + ctx.add(discoMgr, false); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java index faacc71716fc5..01ded97995189 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java @@ -1049,6 +1049,12 @@ public IgniteInternalFuture changeGlobalState( boolean forceChangeBaselineTopology, boolean isAutoAdjust ) { + if (ctx.maintenanceRegistry().isMaintenanceMode()) { + return new GridFinishedFuture<>( + new IgniteCheckedException("Failed to " + prettyStr(state) + " (node is in maintenance mode).") + ); + } + BaselineTopology blt = (compatibilityMode && !forceChangeBaselineTopology) ? null : calculateNewBaselineTopology(state, baselineNodes, forceChangeBaselineTopology); From 4ba240b6f41d4a2fbc4182e09b0cb497d39d5eb9 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 3 Dec 2020 11:26:30 +0300 Subject: [PATCH 088/110] IGNITE-13807 [MINOR] Fix error message in tests. (#8530) --- .../ignite/internal/encryption/EncryptedCacheNodeJoinTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java index 2391bdb0643be..072afffa7b907 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCacheNodeJoinTest.java @@ -284,7 +284,7 @@ private void checkNodeJoinWithStaticCacheConfig( if (client && newCfg) { String expErrMsg = "Joining node has encrypted caches which are not presented on the cluster, " + "encrypted caches configured on client node cannot be started when such node joins " + - "the cluster, these caches can be started manually (dynamically) after node is joined " + + "the cluster, these caches can be started manually (dynamically) after node joined" + "[caches=" + cacheName() + ']'; GridTestUtils.assertThrowsAnyCause(log, () -> startClientGrid(CLIENT), IgniteSpiException.class, expErrMsg); From fe81475b0e2bcf84ca1c78f3695d5b89222cc4b5 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Tue, 27 Oct 2020 15:35:59 +0300 Subject: [PATCH 089/110] IGNITE-13320 Cache encryption key rotation CLI management. --- .../internal/commandline/CommandList.java | 4 +- .../encryption/CacheGroupKeysCommand.java | 104 ++++++++++ .../ChangeCacheGroupKeyCommand.java | 88 +++++++++ .../encryption/ChangeMasterKeyCommand.java | 87 ++++++++ .../encryption/EncryptionCommand.java | 130 ------------ .../encryption/EncryptionCommands.java | 71 +++++++ .../encryption/EncryptionSubcommand.java | 59 ------ .../encryption/EncryptionSubcommands.java | 92 +++++++++ .../encryption/GetMasterKeyNameCommand.java | 72 +++++++ .../encryption/ReencryptionRateCommand.java | 163 +++++++++++++++ .../encryption/ReencryptionStartCommand.java | 100 ++++++++++ .../encryption/ReencryptionStatusCommand.java | 104 ++++++++++ .../encryption/ReencryptionStopCommand.java | 100 ++++++++++ .../util/GridCommandHandlerAbstractTest.java | 30 ++- .../ignite/util/GridCommandHandlerTest.java | 186 +++++++++++++++++- .../encryption/CacheGroupPageScanner.java | 60 +++++- .../encryption/GridEncryptionManager.java | 31 +++ .../encryption/VisorCacheGroupKeyIdsTask.java | 81 ++++++++ .../VisorChangeCacheGroupKeyTask.java | 63 ++++++ .../encryption/VisorEncryptionStatusTask.java | 119 +++++++++++ .../encryption/VisorReencryptionRateTask.java | 77 ++++++++ .../VisorStartReencryptionTask.java | 83 ++++++++ .../encryption/VisorStopReencryptionTask.java | 78 ++++++++ ...mmandHandlerClusterByClassTest_help.output | 21 ++ ...ndlerClusterByClassWithSSLTest_help.output | 21 ++ 25 files changed, 1824 insertions(+), 200 deletions(-) create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java delete mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java delete mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java index e16acaa97a32e..f00a4c0606067 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/CommandList.java @@ -19,7 +19,7 @@ import org.apache.ignite.internal.commandline.cache.CacheCommands; import org.apache.ignite.internal.commandline.diagnostic.DiagnosticCommand; -import org.apache.ignite.internal.commandline.encryption.EncryptionCommand; +import org.apache.ignite.internal.commandline.encryption.EncryptionCommands; import org.apache.ignite.internal.commandline.meta.MetadataCommand; import org.apache.ignite.internal.commandline.metric.MetricCommand; import org.apache.ignite.internal.commandline.property.PropertyCommand; @@ -59,7 +59,7 @@ public enum CommandList { DIAGNOSTIC("--diagnostic", new DiagnosticCommand()), /** Encryption features command. */ - ENCRYPTION("--encryption", new EncryptionCommand()), + ENCRYPTION("--encryption", new EncryptionCommands()), /** Kill command. */ KILL("--kill", new KillCommand()), diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java new file mode 100644 index 0000000000000..4d74e1ad1ad30 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupKeyIdsTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; + +/** + * View cache group encryption key identifiers subcommand. + */ +public class CacheGroupKeysCommand implements Command { + /** Cache group name, */ + private String argCacheGrpName; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Map> keyIdsMap = executeTaskByNameOnNode( + client, + VisorCacheGroupKeyIdsTask.class.getName(), + argCacheGrpName, + BROADCAST_UUID, + clientCfg + ); + + log.info("Encryption key identifiers for cache: " + argCacheGrpName); + + for (Map.Entry> entry : keyIdsMap.entrySet()) { + log.info(INDENT + "Node: " + entry.getKey()); + + List keyIds = entry.getValue(); + + if (F.isEmpty(keyIds)) { + log.info(DOUBLE_INDENT + "---"); + + continue; + } + + for (int i = 0; i < keyIds.size(); i++) + log.info(DOUBLE_INDENT + keyIds.get(i) + (i == 0 ? " (active)" : "")); + } + + return keyIdsMap; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String arg() { + return argCacheGrpName; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + argCacheGrpName = argIter.nextArg("Expected cache group name."); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "View encryption key identifiers of the cache group:", ENCRYPTION, + CACHE_GROUP_KEY_IDS.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CACHE_GROUP_KEY_IDS.name(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java new file mode 100644 index 0000000000000..8e1f17b0bf2b4 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorChangeCacheGroupKeyTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_CACHE_GROUP_KEY; + +/** + * Change cache group key encryption subcommand. + */ +public class ChangeCacheGroupKeyCommand implements Command { + /** Cache group name. */ + private String argCacheGrpName; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + executeTaskByNameOnNode( + client, + VisorChangeCacheGroupKeyTask.class.getName(), + argCacheGrpName, + null, + clientCfg + ); + + log.info("The encryption key has been changed for cache group \"" + argCacheGrpName + "\"."); + + return null; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: the command will change the encryption key of the cache group. Joining a node during " + + "the key change process is prohibited and will be rejected."; + } + + /** {@inheritDoc} */ + @Override public String arg() { + return argCacheGrpName; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + argCacheGrpName = argIter.nextArg("Expected cache group name."); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Change the encryption key of the cache group:", ENCRYPTION, + CHANGE_CACHE_GROUP_KEY.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CHANGE_CACHE_GROUP_KEY.name(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java new file mode 100644 index 0000000000000..a1f5dd483ce01 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_MASTER_KEY; + +/** + * Change master key encryption subcommand. + */ +public class ChangeMasterKeyCommand implements Command { + /** New master key name. */ + private String argMasterKeyName; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + String resMsg = executeTaskByNameOnNode( + client, + VisorChangeMasterKeyTask.class.getName(), + argMasterKeyName, + null, + clientCfg + ); + + log.info(resMsg); + + return resMsg; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: the command will change the master key. Cache start and node join during the key change " + + "process is prohibited and will be rejected."; + } + + /** {@inheritDoc} */ + @Override public String arg() { + return argMasterKeyName; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + argMasterKeyName = argIter.nextArg("Expected master key name."); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Change the master key:", ENCRYPTION, CHANGE_MASTER_KEY.toString(), "newMasterKeyName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CHANGE_MASTER_KEY.name(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java deleted file mode 100644 index 5cbd723ddf73d..0000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommand.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import java.util.logging.Logger; -import org.apache.ignite.internal.client.GridClient; -import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.AbstractCommand; -import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.CommandArgIterator; -import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask; -import org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask; - -import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; -import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommand.CHANGE_MASTER_KEY; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommand.GET_MASTER_KEY_NAME; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommand.of; - -/** - * Commands assosiated with encryption features. - * - * @see EncryptionSubcommand - */ -public class EncryptionCommand extends AbstractCommand { - /** Subcommand. */ - EncryptionSubcommand cmd; - - /** The task name. */ - String taskName; - - /** The task arguments. */ - Object taskArgs; - - /** {@inheritDoc} */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { - try (GridClient client = Command.startClient(clientCfg)) { - String res = executeTaskByNameOnNode( - client, - taskName, - taskArgs, - null, - clientCfg - ); - - logger.info(res); - - return res; - } - catch (Throwable e) { - logger.severe("Failed to perform operation."); - logger.severe(CommandLogger.errorMessage(e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public String confirmationPrompt() { - if (CHANGE_MASTER_KEY == cmd) { - return "Warning: the command will change the master key. Cache start and node join during the key change " + - "process is prohibited and will be rejected."; - } - - return null; - } - - /** {@inheritDoc} */ - @Override public void parseArguments(CommandArgIterator argIter) { - EncryptionSubcommand cmd = of(argIter.nextArg("Expected encryption action.")); - - if (cmd == null) - throw new IllegalArgumentException("Expected correct encryption action."); - - switch (cmd) { - case GET_MASTER_KEY_NAME: - taskName = VisorGetMasterKeyNameTask.class.getName(); - - taskArgs = null; - - break; - - case CHANGE_MASTER_KEY: - String masterKeyName = argIter.nextArg("Expected master key name."); - - taskName = VisorChangeMasterKeyTask.class.getName(); - - taskArgs = masterKeyName; - - break; - - default: - throw new IllegalArgumentException("Unknown encryption subcommand: " + cmd); - } - - this.cmd = cmd; - } - - /** {@inheritDoc} */ - @Override public Object arg() { - return taskArgs; - } - - /** {@inheritDoc} */ - @Override public void printUsage(Logger logger) { - Command.usage(logger, "Print the current master key name:", ENCRYPTION, GET_MASTER_KEY_NAME.toString()); - Command.usage(logger, "Change the master key:", ENCRYPTION, CHANGE_MASTER_KEY.toString(), "newMasterKeyName"); - } - - /** {@inheritDoc} */ - @Override public String name() { - return ENCRYPTION.toCommandName(); - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java new file mode 100644 index 0000000000000..3c96d9777eb25 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandList; + +/** + * Commands assosiated with encryption features. + * + * @see EncryptionSubcommands + */ +public class EncryptionCommands extends AbstractCommand { + /** Subcommand. */ + private EncryptionSubcommands cmd; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + return cmd.subcommand().execute(clientCfg, logger); + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + EncryptionSubcommands cmd = EncryptionSubcommands.of(argIter.nextArg("Expected encryption action.")); + + if (cmd == null) + throw new IllegalArgumentException("Expected correct encryption action."); + + cmd.subcommand().parseArguments(argIter); + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected argument of --encryptiopn subcommand: " + argIter.peekNextArg()); + + this.cmd = cmd; + } + + /** {@inheritDoc} */ + @Override public Object arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + for (EncryptionSubcommands cmd : EncryptionSubcommands.values()) + cmd.subcommand().printUsage(logger); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CommandList.ENCRYPTION.toCommandName(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommand.java deleted file mode 100644 index 3c47c024ea137..0000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommand.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import org.jetbrains.annotations.Nullable; - -/** - * Set of encryption subcommands. - * - * @see EncryptionCommand - */ -public enum EncryptionSubcommand { - /** Subcommand to get the current master key name. */ - GET_MASTER_KEY_NAME("get_master_key_name"), - - /** Subcommand to change the master key. */ - CHANGE_MASTER_KEY("change_master_key"); - - /** Subcommand name. */ - private final String name; - - /** @param name Encryption subcommand name. */ - EncryptionSubcommand(String name) { - this.name = name; - } - - /** - * @param text Command text (case insensitive). - * @return Command for the text. {@code Null} if there is no such command. - */ - @Nullable public static EncryptionSubcommand of(String text) { - for (EncryptionSubcommand cmd : EncryptionSubcommand.values()) { - if (cmd.name.equalsIgnoreCase(text)) - return cmd; - } - - return null; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return name; - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java new file mode 100644 index 0000000000000..2c0d3ea7fe308 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import org.apache.ignite.internal.commandline.Command; +import org.jetbrains.annotations.Nullable; + +/** + * Set of encryption subcommands. + * + * @see EncryptionCommands + */ +public enum EncryptionSubcommands { + /** Subcommand to get the current master key name. */ + GET_MASTER_KEY_NAME("get_master_key_name", new GetMasterKeyNameCommand()), + + /** Subcommand to change the master key. */ + CHANGE_MASTER_KEY("change_master_key", new ChangeMasterKeyCommand()), + + /** Subcommand to change the current encryption key for specified cache group. */ + CHANGE_CACHE_GROUP_KEY("change_cache_key", new ChangeCacheGroupKeyCommand()), + + /** Subcommand to view current encryption key IDs for specified cache group. */ + CACHE_GROUP_KEY_IDS("cache_key_ids", new CacheGroupKeysCommand()), + + /** Subcommand to view re-encryption status of cache group. */ + REENCRYPTION_STATUS("reencryption_status", new ReencryptionStatusCommand()), + + /** Subcommand to stop cache group reencryption. */ + REENCRYPTION_STOP("reencryption_stop", new ReencryptionStopCommand()), + + /** Subcommand to start cache group reencryption. */ + REENCRYPTION_START("reencryption_start", new ReencryptionStartCommand()), + + /** Subcommand to view/change cache group re-encryption rate limit. */ + REENCRYPTION_RATE("reencryption_rate", new ReencryptionRateCommand()); + + /** Subcommand name. */ + private final String name; + + /** Command. */ + private final Command command; + + /** + * @param name Encryption subcommand name. + * @param command Command implementation. + */ + EncryptionSubcommands(String name, Command command) { + this.name = name; + this.command = command; + } + + /** + * @return Cache subcommand implementation. + */ + public Command subcommand() { + return command; + } + + /** + * @param text Command text (case insensitive). + * @return Command for the text. {@code Null} if there is no such command. + */ + @Nullable public static EncryptionSubcommands of(String text) { + for (EncryptionSubcommands cmd : values()) { + if (cmd.name.equalsIgnoreCase(text)) + return cmd; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java new file mode 100644 index 0000000000000..58a466201c8f0 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.GET_MASTER_KEY_NAME; + +/** + * Get master key name encryption subcommand. + */ +public class GetMasterKeyNameCommand implements Command { + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + String masterKeyName = executeTaskByNameOnNode( + client, + VisorGetMasterKeyNameTask.class.getName(), + null, + null, + clientCfg + ); + + log.info(masterKeyName); + + return masterKeyName; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Print the current master key name:", ENCRYPTION, GET_MASTER_KEY_NAME.toString()); + } + + /** {@inheritDoc} */ + @Override public String name() { + return GET_MASTER_KEY_NAME.name(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java new file mode 100644 index 0000000000000..58432409113e5 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask; + +import static java.util.Collections.singletonMap; +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RATE; + +/** + * View/change cache group re-encryption rate limit subcommand. + */ +public class ReencryptionRateCommand implements Command { + /** Re-encryption rate limit in megabytes per second. */ + private Double rateLimit; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Map results = executeTaskByNameOnNode( + client, + VisorReencryptionRateTask.class.getName(), + rateLimit, + BROADCAST_UUID, + clientCfg + ); + + for (Map.Entry entry : results.entrySet()) { + boolean read = rateLimit == null; + + String msg; + + if (entry.getValue() instanceof Throwable) { + msg = " failed to " + (read ? "get" : "limit") + " reencryption rate (" + + ((Throwable)entry.getValue()).getMessage() + ")."; + } + else { + double prevRate = (double)entry.getValue(); + boolean unlimited = read ? prevRate == 0 : rateLimit == 0; + + if (unlimited) + msg = "reencryption rate is not limited."; + else { + msg = "reencryption rate " + (read ? + "is limited to " + prevRate : + "has been limited to " + rateLimit) + " MB/s."; + } + } + + log.info(INDENT + "Node " + entry.getKey() + ": " + msg); + } + + return null; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public Double arg() { + return rateLimit; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + rateLimit = null; + + while (argIter.hasNextSubArg()) { + String arg = argIter.nextArg("Failed to read command argument."); + + ReencryptionRateCommandArg cmdArg = CommandArgUtils.of(arg, ReencryptionRateCommandArg.class); + + if (cmdArg == ReencryptionRateCommandArg.LIMIT) { + String rateLimitArg = argIter.nextArg("Expected decimal value for reencryption rate."); + + try { + rateLimit = Double.parseDouble(rateLimitArg); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Failed to parse " + ReencryptionRateCommandArg.LIMIT + + " command argument. Decimal value expected.", e); + } + } + else + throw new IllegalArgumentException("Unexpected command argument: " + arg); + } + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "View/change re-encryption rate limit:", ENCRYPTION, + singletonMap("limit", "decimal value to change rate limit (MB/s)"), + REENCRYPTION_RATE.toString(), optional(ReencryptionRateCommandArg.LIMIT, "limit")); + } + + /** {@inheritDoc} */ + @Override public String name() { + return REENCRYPTION_RATE.name(); + } + + /** + * Warm-up command arguments name. + */ + private enum ReencryptionRateCommandArg implements CommandArg { + /** Re-encryption rate limit argument. */ + LIMIT("--limit"); + + /** Option name. */ + private final String name; + + /** + * Constructor. + * + * @param name Argument name. + */ + ReencryptionRateCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java new file mode 100644 index 0000000000000..7cb437e71ec01 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.visor.encryption.VisorStartReencryptionTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_START; + +/** + * Start cache group reencryption subcommand. + */ +public class ReencryptionStartCommand implements Command { + /** Cache group name. */ + private String grpName; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Map resErrs = executeTaskByNameOnNode( + client, + VisorStartReencryptionTask.class.getName(), + CU.cacheId(grpName), + BROADCAST_UUID, + clientCfg + ); + + for (Map.Entry entry : resErrs.entrySet()) { + String msg; + + if (entry.getValue() instanceof Throwable) { + msg = "failed to start re-encryption of the cache group \"" + grpName + + "\" (" + ((Throwable)entry.getValue()).getMessage() + ")."; + } + else { + msg = "re-encryption of the cache group \"" + grpName + "\" has " + + (((boolean)entry.getValue()) ? "" : "already ") + "been started."; + } + + log.info(INDENT + "Node " + entry.getKey() + ": " + msg); + } + + return null; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String arg() { + return grpName; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + grpName = argIter.nextArg("Expected cache group name."); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Start cache group re-encryption:", ENCRYPTION, + REENCRYPTION_START.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return REENCRYPTION_START.name(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java new file mode 100644 index 0000000000000..bde1569692004 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.visor.encryption.VisorEncryptionStatusTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STATUS; + +/** + * View cache group re-encryption status. + */ +public class ReencryptionStatusCommand implements Command { + /** Cache group name, */ + private String argCacheGrpName; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Map> reencryptStates = executeTaskByNameOnNode( + client, + VisorEncryptionStatusTask.class.getName(), + argCacheGrpName, + BROADCAST_UUID, + clientCfg + ); + + log.info("Encryption status for cache group: " + argCacheGrpName); + + for (Map.Entry> entry : reencryptStates.entrySet()) { + log.info(INDENT + "Node: " + entry.getKey()); + + long pagesEncrypted = entry.getValue().get1(); + long pagesTotal = entry.getValue().get2(); + + if (pagesTotal == 0) { + log.info(DOUBLE_INDENT + "Re-encryption completed or not required"); + + continue; + } + + log.info(DOUBLE_INDENT + String.format("left=%d total=%d (memory pages), completed=%.1f%%", + (pagesTotal - pagesEncrypted), pagesTotal, pagesEncrypted * 100 / (double)pagesTotal)); + } + + return reencryptStates; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String arg() { + return argCacheGrpName; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + argCacheGrpName = argIter.nextArg("Expected cache group name."); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "View cache group encryption status:", ENCRYPTION, + REENCRYPTION_STATUS.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return REENCRYPTION_STATUS.name(); + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java new file mode 100644 index 0000000000000..d0355e87c93cd --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.visor.encryption.VisorStopReencryptionTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STOP; + +/** + * Stop cache group reencryption subcommand. + */ +public class ReencryptionStopCommand implements Command { + /** Cache group name. */ + private String grpName; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Map resErrs = executeTaskByNameOnNode( + client, + VisorStopReencryptionTask.class.getName(), + CU.cacheId(grpName), + BROADCAST_UUID, + clientCfg + ); + + for (Map.Entry entry : resErrs.entrySet()) { + String msg; + + if (entry.getValue() instanceof Throwable) { + msg = "failed to stop re-encryption of the cache group \"" + grpName + + "\" (" + ((Throwable)entry.getValue()).getMessage() + ")."; + } + else { + msg = "re-encryption of the cache group \"" + grpName + "\" has " + + (((boolean)entry.getValue()) ? "" : "already ") + "been stopped."; + } + + log.info(INDENT + "Node " + entry.getKey() + ": " + msg); + } + + return null; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String arg() { + return grpName; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + grpName = argIter.nextArg("Expected cache group name."); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Stop cache group re-encryption:", ENCRYPTION, + REENCRYPTION_STOP.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return REENCRYPTION_STOP.name(); + } +} diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java index bc31ce71304a6..cf31ea1e6124e 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java @@ -40,6 +40,7 @@ import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.EncryptionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; @@ -68,6 +69,8 @@ import static java.util.Objects.nonNull; import static org.apache.ignite.IgniteSystemProperties.IGNITE_ENABLE_EXPERIMENTAL_COMMAND; import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_CHECKPOINT_FREQ; +import static org.apache.ignite.configuration.EncryptionConfiguration.DFLT_REENCRYPTION_BATCH_SIZE; +import static org.apache.ignite.configuration.EncryptionConfiguration.DFLT_REENCRYPTION_RATE_MBPS; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.KEYSTORE_PASSWORD; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.KEYSTORE_PATH; import static org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsDumpTask.IDLE_DUMP_FILE_PREFIX; @@ -85,6 +88,9 @@ public abstract class GridCommandHandlerAbstractTest extends GridCommonAbstractT /** */ protected static final String CLIENT_NODE_NAME_PREFIX = "client"; + /** */ + protected static final String DAEMON_NODE_NAME_PREFIX = "daemon"; + /** Option is used for auto confirmation. */ protected static final String CMD_AUTO_CONFIRMATION = "--yes"; @@ -113,7 +119,13 @@ public abstract class GridCommandHandlerAbstractTest extends GridCommonAbstractT protected boolean autoConfirmation = true; /** {@code True} if encription is enabled. */ - protected boolean encriptionEnabled; + protected boolean encryptionEnabled; + + /** Re-encryption rate limit in megabytes per second. */ + protected double reencryptSpeed = DFLT_REENCRYPTION_RATE_MBPS; + + /** The number of pages that is scanned during re-encryption under checkpoint lock. */ + protected int reencryptBatchSize = DFLT_REENCRYPTION_BATCH_SIZE; /** Last operation result. */ protected Object lastOperationResult; @@ -171,7 +183,7 @@ protected boolean persistenceEnable() { testOut.reset(); - encriptionEnabled = false; + encryptionEnabled = false; GridClientFactory.stopAll(false); } @@ -233,13 +245,22 @@ protected boolean idleVerifyRes(Path p) { cfg.setClientMode(igniteInstanceName.startsWith(CLIENT_NODE_NAME_PREFIX)); - if (encriptionEnabled) { + cfg.setDaemon(igniteInstanceName.startsWith(DAEMON_NODE_NAME_PREFIX)); + + if (encryptionEnabled) { KeystoreEncryptionSpi encSpi = new KeystoreEncryptionSpi(); encSpi.setKeyStorePath(KEYSTORE_PATH); encSpi.setKeyStorePassword(KEYSTORE_PASSWORD.toCharArray()); cfg.setEncryptionSpi(encSpi); + + EncryptionConfiguration encrCfg = new EncryptionConfiguration(); + + encrCfg.setReencryptionRateLimit(reencryptSpeed); + encrCfg.setReencryptionBatchSize(reencryptBatchSize); + + dsCfg.setEncryptionConfiguration(encrCfg); } return cfg; @@ -418,7 +439,8 @@ protected void createCacheAndPreload( CacheConfiguration ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME) .setAffinity(new RendezvousAffinityFunction(false, partitions)) - .setBackups(1); + .setBackups(1) + .setEncryptionEnabled(encryptionEnabled); if (filter != null) ccfg.setNodeFilter(filter); diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index 5557b5e5eb165..2daca126b3d2f 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -103,6 +103,7 @@ import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTaskResult; import org.apache.ignite.internal.visor.tx.VisorTxInfo; @@ -139,6 +140,12 @@ import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_OK; import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_UNEXPECTED_ERROR; import static org.apache.ignite.internal.commandline.CommandList.DEACTIVATE; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STATUS; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_CACHE_GROUP_KEY; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RATE; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_START; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STOP; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.MASTER_KEY_NAME_2; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP; import static org.apache.ignite.internal.processors.cache.persistence.snapshot.AbstractSnapshotSelfTest.doSnapshotCancellationTest; @@ -2626,7 +2633,7 @@ public void testCacheIdleVerifyPrintLostPartitions() throws Exception { /** @throws Exception If failed. */ @Test public void testMasterKeyChange() throws Exception { - encriptionEnabled = true; + encryptionEnabled = true; injectTestSystemOut(); @@ -2661,10 +2668,171 @@ public void testMasterKeyChange() throws Exception { "Master key change was rejected. Unable to get the master key digest."); } + /** @throws Exception If failed. */ + @Test + public void testCacheGroupKeyChange() throws Exception { + encryptionEnabled = true; + + injectTestSystemOut(); + + int srvNodes = 2; + + IgniteEx ignite = startGrids(srvNodes); + + startGrid(CLIENT_NODE_NAME_PREFIX); + startGrid(DAEMON_NODE_NAME_PREFIX); + + ignite.cluster().state(ACTIVE); + + createCacheAndPreload(ignite, 10); + + int ret = execute("--encryption", CACHE_GROUP_KEY_IDS.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertContains(log, testOut.toString(), "Encryption key identifiers for cache: " + DEFAULT_CACHE_NAME); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "0 (active)")); + + ret = execute("--encryption", CHANGE_CACHE_GROUP_KEY.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertContains(log, testOut.toString(), + "The encryption key has been changed for cache group \"" + DEFAULT_CACHE_NAME + '"'); + + ret = execute("--encryption", CACHE_GROUP_KEY_IDS.toString(), DEFAULT_CACHE_NAME); + + assertEquals(testOut.toString(), EXIT_CODE_OK, ret); + assertContains(log, testOut.toString(), "Encryption key identifiers for cache: " + DEFAULT_CACHE_NAME); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "1 (active)")); + } + + /** @throws Exception If failed. */ + @Test + public void testCHangeReencryptionRate() throws Exception { + int srvNodes = 2; + + IgniteEx ignite = startGrids(srvNodes); + + ignite.cluster().state(ACTIVE); + + injectTestSystemOut(); + + int ret = execute("--encryption", REENCRYPTION_RATE.toString()); + + assertEquals(EXIT_CODE_OK, ret); + + assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate is not limited.")); + + ret = execute("--encryption", REENCRYPTION_RATE.toString(), "--limit", "0.01"); + + assertEquals(EXIT_CODE_OK, ret); + + assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate has been limited to 0.01 MB/s.")); + + ret = execute("--encryption", REENCRYPTION_RATE.toString()); + + assertEquals(EXIT_CODE_OK, ret); + + assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate is limited to 0.01 MB/s.")); + + ret = execute("--encryption", REENCRYPTION_RATE.toString(), "--limit", "0"); + + assertEquals(EXIT_CODE_OK, ret); + + assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate is not limited.")); + } + + /** @throws Exception If failed. */ + @Test + public void testReencryptionInterruptAndResume() throws Exception { + encryptionEnabled = true; + reencryptSpeed = 0.01; + reencryptBatchSize = 1; + + int srvNodes = 2; + + IgniteEx ignite = startGrids(srvNodes); + + ignite.cluster().state(ACTIVE); + + injectTestSystemOut(); + + createCacheAndPreload(ignite, 10_000); + + ignite.encryption().changeCacheGroupKey(Collections.singleton(DEFAULT_CACHE_NAME)).get(); + + assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); + + int ret = execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + + Pattern ptrn = Pattern.compile("(?m)Node: [-0-9a-f]{36}\n\\s+left=(?\\d+) total=(?\\d+).+"); + Matcher matcher = ptrn.matcher(testOut.toString()); + int matchesCnt = 0; + + while (matcher.find()) { + assertEquals(2, matcher.groupCount()); + + int pagesLeft = Integer.parseInt(matcher.group("left")); + int pagesTotal = Integer.parseInt(matcher.group("total")); + + assertTrue(pagesLeft > 0); + assertTrue(pagesLeft < pagesTotal); + + matchesCnt++; + } + + assertEquals(srvNodes, matchesCnt); + + ret = execute("--encryption", REENCRYPTION_STOP.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been stopped.")); + assertFalse(isReencryptionStarted(DEFAULT_CACHE_NAME)); + + ret = execute("--encryption", REENCRYPTION_STOP.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been stopped.")); + + ret = execute("--encryption", REENCRYPTION_START.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been started.")); + assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); + + ret = execute("--encryption", REENCRYPTION_START.toString(), DEFAULT_CACHE_NAME); + + assertEquals(EXIT_CODE_OK, ret); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been started.")); + } + + /** + * @param cacheName Cache name. + * @return {@code True} if re-encryption of the specified cache is started on all server nodes. + */ + private boolean isReencryptionStarted(String cacheName) { + for (Ignite grid : G.allGrids()) { + ClusterNode locNode = grid.cluster().localNode(); + + if (locNode.isClient() || locNode.isDaemon()) + continue; + + if (((IgniteEx)grid).context().encryption().reencryptionFuture(CU.cacheId(cacheName)).isDone()) + return false; + } + + return true; + } + /** @throws Exception If failed. */ @Test public void testMasterKeyChangeOnInactiveCluster() throws Exception { - encriptionEnabled = true; + encryptionEnabled = true; injectTestSystemOut(); @@ -2834,4 +3002,18 @@ private VisorFindAndDeleteGarbageInPersistenceTaskResult executeTaskViaControlCo return hnd.getLastOperationResult(); } + + /** + * @param str String. + * @param substr Substring to find in the specified string. + * @return The number of substrings found in the specified string. + */ + private int countSubstrs(String str, String substr) { + int cnt = 0; + + for (int off = 0; (off = str.indexOf(substr, off)) != -1; off++) + ++cnt; + + return cnt; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java index dc0a29b05eb30..12e86834ea73f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java @@ -77,15 +77,15 @@ public class CacheGroupPageScanner implements CheckpointListener { /** Collection of groups waiting for a checkpoint. */ private final Collection cpWaitGrps = new ConcurrentLinkedQueue<>(); - /** Page scanning speed limiter. */ - private final BasicRateLimiter limiter; - /** Single-threaded executor to run cache group scan task. */ private final ThreadPoolExecutor singleExecSvc; /** Number of pages that is scanned during reencryption under checkpoint lock. */ private final int batchSize; + /** Page scanning speed limiter. */ + private volatile BasicRateLimiter limiter; + /** Stop flag. */ private boolean stopped; @@ -313,6 +313,60 @@ public long[] pagesCount(CacheGroupContext grp) throws IgniteCheckedException { return partStates; } + /** + * @return Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public double getRate() { + DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); + + if (CU.isPersistenceEnabled(dsCfg)) { + BasicRateLimiter limiter0 = limiter; + + if (limiter0 != null) + return dsCfg.getPageSize() * limiter0.getRate() / MB; + } + + return 0; + } + + /** + * @param rate Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public void setRate(double rate) { + DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); + + if (!CU.isPersistenceEnabled(dsCfg)) + return; + + BasicRateLimiter limiter0 = limiter; + + if (rate == 0 && limiter0 != null) { + limiter = null; + + return; + } + + double permits = calcPermits(rate, dsCfg); + + if (limiter0 != null) { + limiter0.setRate(permits); + + return; + } + + limiter = new BasicRateLimiter(permits); + } + + /** + * @param rate Maximum scan speed in megabytes per second + * @param dsCfg Datastorage configuration. + * @return The number of permits allowed per second. + */ + private double calcPermits(double rate, DataStorageConfiguration dsCfg) { + return rate * MB / + (dsCfg.getPageSize() == 0 ? DataStorageConfiguration.DFLT_PAGE_SIZE : dsCfg.getPageSize()); + } + /** * @param grp Cache group. * @param hnd Partition handler. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index 2c31dcdf7f34c..9c1f4fc969ad1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -823,6 +823,20 @@ public boolean reencryptionInProgress(int grpId) { return reencryptGroups.containsKey(grpId); } + /** + * @return Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public double getReencryptionRate() { + return pageScanner.getRate(); + } + + /** + * @param rate Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). + */ + public void setReencryptionRate(double rate) { + pageScanner.setRate(rate); + } + /** * Removes encryption key(s). * @@ -1159,6 +1173,23 @@ private void sendGenerateEncryptionKeyRequest(GenerateEncryptionKeyFuture fut) t ctx.io().sendToGridTopic(rndNode.id(), TOPIC_GEN_ENC_KEY, req, SYSTEM_POOL); } + /** + * Forces re-encryption of the cache group. + * + * @param grpId Cache group ID. + */ + public void resumeReencryption(int grpId) throws IgniteCheckedException { + if (grpKeyChangeProc.inProgress()) + throw new IgniteCheckedException("Cannot force start reencryption during cache group key change."); + + if (!reencryptionInProgress(grpId)) + throw new IgniteCheckedException("Re-encryption completed or not required [grpId=" + grpId + "]"); + + + + startReencryption(Collections.singleton(grpId)); + } + /** * @param grpIds Cache group IDs. * @throws IgniteCheckedException If failed. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java new file mode 100644 index 0000000000000..ccd4238f8c97a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * The task for getting encryption key identifiers of the cache group. + */ +public class VisorCacheGroupKeyIdsTask extends VisorMultiNodeTask>, List> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job(String arg) { + return new VisorGetCacheGroupKeysJob(arg, debug); + } + + /** {@inheritDoc} */ + @Nullable @Override protected Map> reduce0(List results) { + Map> resMap = new HashMap<>(); + + for (ComputeJobResult res : results) { + List keyIds = res.getData(); + + resMap.put(res.getNode().id(), keyIds); + } + + return resMap; + } + + /** The job for getting the master key name. */ + private static class VisorGetCacheGroupKeysJob extends VisorJob> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorGetCacheGroupKeysJob(String arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected List run(String arg) throws IgniteException { + IgniteInternalCache cache = ignite.context().cache().cache(arg); + + if (cache == null) + throw new IgniteException("Cache " + arg + " not found."); + + return ignite.context().encryption().groupKeyIds(cache.context().group().groupId()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java new file mode 100644 index 0000000000000..7317852395b5f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.Collection; +import java.util.Collections; +import org.apache.ignite.IgniteEncryption; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorOneNodeTask; + +/** + * The task for changing the encryption key for the cache group. + * + * @see IgniteEncryption#changeCacheGroupKey(Collection) + */ +public class VisorChangeCacheGroupKeyTask extends VisorOneNodeTask { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job(String arg) { + return new VisorChangeCacheGroupKeyJob(arg, debug); + } + + /** The job for getting the master key name. */ + private static class VisorChangeCacheGroupKeyJob extends VisorJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorChangeCacheGroupKeyJob(String arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected Void run(String grpName) throws IgniteException { + ignite.encryption().changeCacheGroupKey(Collections.singleton(grpName)).get(); + + return null; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java new file mode 100644 index 0000000000000..dfd0c7a2d706d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * The task for getting encryption status of the cache group. + */ +public class VisorEncryptionStatusTask extends VisorMultiNodeTask>, T2> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job(String arg) { + return new VisorGetCacheGroupKeysJob(arg, debug); + } + + /** {@inheritDoc} */ + @Nullable @Override protected Map> reduce0(List results) { + Map> resMap = new HashMap<>(); + + for (ComputeJobResult res : results) + resMap.put(res.getNode().id(), res.getData()); + + return resMap; + } + + /** The job for getting the master key name. */ + private static class VisorGetCacheGroupKeysJob extends VisorJob> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorGetCacheGroupKeysJob(String arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected T2 run(String arg) throws IgniteException { + IgniteInternalCache cache = ignite.context().cache().cache(arg); + + if (cache == null) + throw new IgniteException("Cache " + arg + " not found."); + + CacheGroupContext grp = cache.context().group(); + GridEncryptionManager encMgr = grp.shared().kernalContext().encryption(); + + if (!encMgr.reencryptionInProgress(grp.groupId())) + return new T2<>(0L, 0L); + + FilePageStoreManager mgr = (FilePageStoreManager)grp.shared().pageStore(); + + long completePages = 0; + long totalPages = 0; + + try { + for (int p = 0; p < grp.affinity().partitions(); p++) { + PageStore pageStore = mgr.getStore(grp.groupId(), p); + + if (!pageStore.exists()) + continue; + + long state = encMgr.getEncryptionState(grp.groupId(), p); + + totalPages += ReencryptStateUtils.pageCount(state); + completePages += ReencryptStateUtils.pageIndex(state); + } + + long state = encMgr.getEncryptionState(grp.groupId(), PageIdAllocator.INDEX_PARTITION); + + totalPages += ReencryptStateUtils.pageCount(state); + completePages += ReencryptStateUtils.pageIndex(state); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + + return new T2<>(completePages, totalPages); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java new file mode 100644 index 0000000000000..32b12bca4f062 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * View/change cache group re-encryption rate limit. + */ +public class VisorReencryptionRateTask extends VisorMultiNodeTask, Double> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job(Double arg) { + return new VisorStartReencryptionJob(arg, debug); + } + + /** {@inheritDoc} */ + @Nullable @Override protected Map reduce0(List results) { + Map errs = new HashMap<>(); + + for (ComputeJobResult res : results) + errs.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); + + return errs; + } + + /** The job for getting the master key name. */ + private static class VisorStartReencryptionJob extends VisorJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorStartReencryptionJob(Double arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected Double run(Double rate) throws IgniteException { + double prevRate = ignite.context().encryption().getReencryptionRate(); + + if (rate != null) + ignite.context().encryption().setReencryptionRate(rate); + + return prevRate; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java new file mode 100644 index 0000000000000..b62bfb25c85b6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * The task to start re-encryption of the specified cache group. + */ +public class VisorStartReencryptionTask extends VisorMultiNodeTask, Boolean> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job(Integer arg) { + return new VisorStartReencryptionJob(arg, debug); + } + + /** {@inheritDoc} */ + @Nullable @Override protected Map reduce0(List results) { + Map errs = new HashMap<>(); + + for (ComputeJobResult res : results) + errs.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); + + return errs; + } + + /** The job for getting the master key name. */ + private static class VisorStartReencryptionJob extends VisorJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorStartReencryptionJob(Integer arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected Boolean run(Integer grpId) throws IgniteException { + try { + if (!ignite.context().encryption().reencryptionFuture(grpId).isDone()) + return false; + + ignite.context().encryption().resumeReencryption(grpId); + + return true; + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java new file mode 100644 index 0000000000000..362e63050583b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * The task to stop re-encryption of the specified cache group. + */ +public class VisorStopReencryptionTask extends VisorMultiNodeTask, Boolean> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job(Integer arg) { + return new VisorStartReencryptionJob(arg, debug); + } + + /** {@inheritDoc} */ + @Nullable @Override protected Map reduce0(List results) { + Map errs = new HashMap<>(); + + for (ComputeJobResult res : results) + errs.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); + + return errs; + } + + /** The job for getting the master key name. */ + private static class VisorStartReencryptionJob extends VisorJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorStartReencryptionJob(Integer arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected Boolean run(Integer grpId) throws IgniteException { + try { + return ignite.context().encryption().reencryptionFuture(grpId).cancel(); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + } +} diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index 5950ed1f56943..9dad4eab5c912 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -71,6 +71,27 @@ This utility can do the following commands: Change the master key: control.(sh|bat) --encryption change_master_key newMasterKeyName + Change the encryption key of the cache group: + control.(sh|bat) --encryption change_cache_key cacheGroupName + + View encryption key identifiers of the cache group: + control.(sh|bat) --encryption cache_key_ids cacheGroupName + + View cache group encryption status: + control.(sh|bat) --encryption reencryption_status cacheGroupName + + Stop cache group re-encryption: + control.(sh|bat) --encryption reencryption_stop cacheGroupName + + Start cache group re-encryption: + control.(sh|bat) --encryption reencryption_start cacheGroupName + + View/change re-encryption rate limit: + control.(sh|bat) --encryption reencryption_rate [--limit limit] + + Parameters: + limit - decimal value to change rate limit (MB/s) + Kill compute task by session id: control.(sh|bat) --kill COMPUTE session_id diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index 5950ed1f56943..9dad4eab5c912 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -71,6 +71,27 @@ This utility can do the following commands: Change the master key: control.(sh|bat) --encryption change_master_key newMasterKeyName + Change the encryption key of the cache group: + control.(sh|bat) --encryption change_cache_key cacheGroupName + + View encryption key identifiers of the cache group: + control.(sh|bat) --encryption cache_key_ids cacheGroupName + + View cache group encryption status: + control.(sh|bat) --encryption reencryption_status cacheGroupName + + Stop cache group re-encryption: + control.(sh|bat) --encryption reencryption_stop cacheGroupName + + Start cache group re-encryption: + control.(sh|bat) --encryption reencryption_start cacheGroupName + + View/change re-encryption rate limit: + control.(sh|bat) --encryption reencryption_rate [--limit limit] + + Parameters: + limit - decimal value to change rate limit (MB/s) + Kill compute task by session id: control.(sh|bat) --kill COMPUTE session_id From c567fb6e19d63daeb8abc459d626d2050470e3f0 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Wed, 28 Oct 2020 13:08:17 +0300 Subject: [PATCH 090/110] IGNITE-13320 Control reencryption using single command. --- .../encryption/EncryptionSubcommands.java | 10 +- .../encryption/GroupReencryptionCommand.java | 208 ++++++++++++++++++ .../encryption/ReencryptionRateCommand.java | 4 +- .../encryption/ReencryptionStartCommand.java | 100 --------- .../encryption/ReencryptionStatusCommand.java | 104 --------- .../encryption/ReencryptionStopCommand.java | 100 --------- .../ignite/util/GridCommandHandlerTest.java | 30 ++- .../encryption/GridEncryptionManager.java | 2 - .../encryption/VisorEncryptionStatusTask.java | 119 ---------- .../VisorGroupReencryptionActionType.java | 45 ++++ .../VisorGroupReencryptionTask.java | 157 +++++++++++++ .../VisorGroupReencryptionTaskArg.java | 80 +++++++ .../VisorStartReencryptionTask.java | 83 ------- .../encryption/VisorStopReencryptionTask.java | 78 ------- ...mmandHandlerClusterByClassTest_help.output | 15 +- ...ndlerClusterByClassWithSSLTest_help.output | 15 +- 16 files changed, 521 insertions(+), 629 deletions(-) create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java delete mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java delete mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java delete mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTaskArg.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java index 2c0d3ea7fe308..40d0551c08d33 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java @@ -38,14 +38,8 @@ public enum EncryptionSubcommands { /** Subcommand to view current encryption key IDs for specified cache group. */ CACHE_GROUP_KEY_IDS("cache_key_ids", new CacheGroupKeysCommand()), - /** Subcommand to view re-encryption status of cache group. */ - REENCRYPTION_STATUS("reencryption_status", new ReencryptionStatusCommand()), - - /** Subcommand to stop cache group reencryption. */ - REENCRYPTION_STOP("reencryption_stop", new ReencryptionStopCommand()), - - /** Subcommand to start cache group reencryption. */ - REENCRYPTION_START("reencryption_start", new ReencryptionStartCommand()), + /** Subcommand to control the process of re-encryption of the cache group. */ + GROUP_REENCRYPTION("group_reencryption", new GroupReencryptionCommand()), /** Subcommand to view/change cache group re-encryption rate limit. */ REENCRYPTION_RATE("reencryption_rate", new ReencryptionRateCommand()); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java new file mode 100644 index 0000000000000..1d37951a7ba79 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandList; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionActionType; +import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionTask; +import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionTaskArg; + +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionActionType.SUSPEND; + +/** + * Subcommand to control the process of re-encryption of the cache group. + */ +public class GroupReencryptionCommand implements Command { + /** Cache group reencryption task argument. */ + private VisorGroupReencryptionTaskArg taskArg; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Map res = executeTaskByNameOnNode( + client, + VisorGroupReencryptionTask.class.getName(), + taskArg, + BROADCAST_UUID, + clientCfg + ); + + switch (taskArg.type()) { + case STATUS: + printStatusResult(taskArg.groupName(), res, log); + + break; + case SUSPEND: + case RESUME: + printSuspendResumeResult(taskArg.groupName(), taskArg.type() == SUSPEND, res, log); + + break; + default: + assert false : "Unknown type: " + taskArg.type(); + } + + return res; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** + * @param grpName Cache group name. + * @param suspend Suspend flag. + * @param res Response. + * @param log Logger. + */ + private void printSuspendResumeResult(String grpName, boolean suspend, Map res, Logger log) { + for (Map.Entry entry : res.entrySet()) { + String msg; + + if (entry.getValue() instanceof Throwable) { + msg = String.format("failed to %s re-encryption of the cache group \"%s\": %s.", + (suspend ? "suspend" : "resume"), grpName, ((Throwable)entry.getValue()).getMessage()); + } + else { + msg = String.format("re-encryption of the cache group \"%s\" has %sbeen %s.", + grpName, (((boolean)entry.getValue()) ? "" : "already "), suspend ? "suspended" : "resumed"); + } + + log.info(INDENT + "Node " + entry.getKey() + ": " + msg); + } + } + + /** + * @param grpName Cache group name. + * @param nodeStates Node ID(s) with number of bytes left for reencryption. + * @param log Logger. + */ + private void printStatusResult(String grpName, Map nodeStates, Logger log) { + log.info("Re-encryption status for the cache group: " + grpName); + + for (Map.Entry entry : nodeStates.entrySet()) { + log.info(INDENT + "Node: " + entry.getKey()); + + if (entry.getValue() instanceof Throwable) { + log.info(String.format("%sfailed to get re-encryption status of the cache group \"%s\": %s.", + DOUBLE_INDENT, grpName, ((Throwable)entry.getValue()).getMessage())); + + continue; + } + + long bytesLeft = (Long)entry.getValue(); + + if (bytesLeft == 0) { + log.info(DOUBLE_INDENT + "re-encryption completed or not required"); + + continue; + } + + log.info(String.format("%s%d KB of data left for re-encryption", DOUBLE_INDENT, bytesLeft / 1024)); + } + } + + /** {@inheritDoc} */ + @Override public VisorGroupReencryptionTaskArg arg() { + return taskArg; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + ReencryptionCommandArg cmdArg = ReencryptionCommandArg.STATUS; + String grpName = argIter.nextArg("Expected cache group name."); + + while (argIter.hasNextSubArg()) { + String arg = argIter.nextArg("Failed to read command argument."); + + cmdArg = CommandArgUtils.of(arg, ReencryptionCommandArg.class); + + if (cmdArg == null) + throw new IllegalArgumentException("Unexpected command argument: " + arg); + } + + taskArg = new VisorGroupReencryptionTaskArg(grpName, VisorGroupReencryptionActionType.valueOf(cmdArg.name())); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Control the process of re-encryption of the cache group:", CommandList.ENCRYPTION, + U.map("--status", "Display re-encryption status.", + "--suspend", "Suspend re-encryption.", + "--resume", "Resume re-encryption."), + EncryptionSubcommands.GROUP_REENCRYPTION.toString(), "cacheGroupName", + optional(ReencryptionCommandArg.STATUS, ReencryptionCommandArg.SUSPEND, ReencryptionCommandArg.RESUME)); + } + + /** {@inheritDoc} */ + @Override public String name() { + return EncryptionSubcommands.GROUP_REENCRYPTION.name(); + } + + /** + * Reencryption management command arguments name. + */ + private enum ReencryptionCommandArg implements CommandArg { + /** Suspend reencryption argument. */ + SUSPEND("--suspend"), + + /** Resume reencryption argument. */ + RESUME("--resume"), + + /** Reencryption status argument. */ + STATUS("--status"); + + /** Option name. */ + private final String name; + + /** + * @param name Argument name. + */ + ReencryptionCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index 58432409113e5..fefce0767afce 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -122,7 +122,7 @@ public class ReencryptionRateCommand implements Command { /** {@inheritDoc} */ @Override public void printUsage(Logger log) { Command.usage(log, "View/change re-encryption rate limit:", ENCRYPTION, - singletonMap("limit", "decimal value to change rate limit (MB/s)"), + singletonMap("limit", "Decimal value to change re-encryption rate limit (MB/s)."), REENCRYPTION_RATE.toString(), optional(ReencryptionRateCommandArg.LIMIT, "limit")); } @@ -132,7 +132,7 @@ public class ReencryptionRateCommand implements Command { } /** - * Warm-up command arguments name. + * Reencryption rate command arguments name. */ private enum ReencryptionRateCommandArg implements CommandArg { /** Re-encryption rate limit argument. */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java deleted file mode 100644 index 7cb437e71ec01..0000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStartCommand.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import java.util.Map; -import java.util.UUID; -import java.util.logging.Logger; -import org.apache.ignite.internal.client.GridClient; -import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.CommandArgIterator; -import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.util.typedef.internal.CU; -import org.apache.ignite.internal.visor.encryption.VisorStartReencryptionTask; - -import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; -import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; -import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; -import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_START; - -/** - * Start cache group reencryption subcommand. - */ -public class ReencryptionStartCommand implements Command { - /** Cache group name. */ - private String grpName; - - /** {@inheritDoc} */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { - try (GridClient client = Command.startClient(clientCfg)) { - Map resErrs = executeTaskByNameOnNode( - client, - VisorStartReencryptionTask.class.getName(), - CU.cacheId(grpName), - BROADCAST_UUID, - clientCfg - ); - - for (Map.Entry entry : resErrs.entrySet()) { - String msg; - - if (entry.getValue() instanceof Throwable) { - msg = "failed to start re-encryption of the cache group \"" + grpName + - "\" (" + ((Throwable)entry.getValue()).getMessage() + ")."; - } - else { - msg = "re-encryption of the cache group \"" + grpName + "\" has " + - (((boolean)entry.getValue()) ? "" : "already ") + "been started."; - } - - log.info(INDENT + "Node " + entry.getKey() + ": " + msg); - } - - return null; - } - catch (Throwable e) { - log.severe("Failed to perform operation."); - log.severe(CommandLogger.errorMessage(e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public String arg() { - return grpName; - } - - /** {@inheritDoc} */ - @Override public void parseArguments(CommandArgIterator argIter) { - grpName = argIter.nextArg("Expected cache group name."); - } - - /** {@inheritDoc} */ - @Override public void printUsage(Logger log) { - Command.usage(log, "Start cache group re-encryption:", ENCRYPTION, - REENCRYPTION_START.toString(), "cacheGroupName"); - } - - /** {@inheritDoc} */ - @Override public String name() { - return REENCRYPTION_START.name(); - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java deleted file mode 100644 index bde1569692004..0000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStatusCommand.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import java.util.Map; -import java.util.UUID; -import java.util.logging.Logger; -import org.apache.ignite.internal.client.GridClient; -import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.CommandArgIterator; -import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.util.typedef.T2; -import org.apache.ignite.internal.visor.encryption.VisorEncryptionStatusTask; - -import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; -import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; -import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; -import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; -import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STATUS; - -/** - * View cache group re-encryption status. - */ -public class ReencryptionStatusCommand implements Command { - /** Cache group name, */ - private String argCacheGrpName; - - /** {@inheritDoc} */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { - try (GridClient client = Command.startClient(clientCfg)) { - Map> reencryptStates = executeTaskByNameOnNode( - client, - VisorEncryptionStatusTask.class.getName(), - argCacheGrpName, - BROADCAST_UUID, - clientCfg - ); - - log.info("Encryption status for cache group: " + argCacheGrpName); - - for (Map.Entry> entry : reencryptStates.entrySet()) { - log.info(INDENT + "Node: " + entry.getKey()); - - long pagesEncrypted = entry.getValue().get1(); - long pagesTotal = entry.getValue().get2(); - - if (pagesTotal == 0) { - log.info(DOUBLE_INDENT + "Re-encryption completed or not required"); - - continue; - } - - log.info(DOUBLE_INDENT + String.format("left=%d total=%d (memory pages), completed=%.1f%%", - (pagesTotal - pagesEncrypted), pagesTotal, pagesEncrypted * 100 / (double)pagesTotal)); - } - - return reencryptStates; - } - catch (Throwable e) { - log.severe("Failed to perform operation."); - log.severe(CommandLogger.errorMessage(e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public String arg() { - return argCacheGrpName; - } - - /** {@inheritDoc} */ - @Override public void parseArguments(CommandArgIterator argIter) { - argCacheGrpName = argIter.nextArg("Expected cache group name."); - } - - /** {@inheritDoc} */ - @Override public void printUsage(Logger log) { - Command.usage(log, "View cache group encryption status:", ENCRYPTION, - REENCRYPTION_STATUS.toString(), "cacheGroupName"); - } - - /** {@inheritDoc} */ - @Override public String name() { - return REENCRYPTION_STATUS.name(); - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java deleted file mode 100644 index d0355e87c93cd..0000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionStopCommand.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import java.util.Map; -import java.util.UUID; -import java.util.logging.Logger; -import org.apache.ignite.internal.client.GridClient; -import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.CommandArgIterator; -import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.util.typedef.internal.CU; -import org.apache.ignite.internal.visor.encryption.VisorStopReencryptionTask; - -import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; -import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; -import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; -import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STOP; - -/** - * Stop cache group reencryption subcommand. - */ -public class ReencryptionStopCommand implements Command { - /** Cache group name. */ - private String grpName; - - /** {@inheritDoc} */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { - try (GridClient client = Command.startClient(clientCfg)) { - Map resErrs = executeTaskByNameOnNode( - client, - VisorStopReencryptionTask.class.getName(), - CU.cacheId(grpName), - BROADCAST_UUID, - clientCfg - ); - - for (Map.Entry entry : resErrs.entrySet()) { - String msg; - - if (entry.getValue() instanceof Throwable) { - msg = "failed to stop re-encryption of the cache group \"" + grpName + - "\" (" + ((Throwable)entry.getValue()).getMessage() + ")."; - } - else { - msg = "re-encryption of the cache group \"" + grpName + "\" has " + - (((boolean)entry.getValue()) ? "" : "already ") + "been stopped."; - } - - log.info(INDENT + "Node " + entry.getKey() + ": " + msg); - } - - return null; - } - catch (Throwable e) { - log.severe("Failed to perform operation."); - log.severe(CommandLogger.errorMessage(e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public String arg() { - return grpName; - } - - /** {@inheritDoc} */ - @Override public void parseArguments(CommandArgIterator argIter) { - grpName = argIter.nextArg("Expected cache group name."); - } - - /** {@inheritDoc} */ - @Override public void printUsage(Logger log) { - Command.usage(log, "Stop cache group re-encryption:", ENCRYPTION, - REENCRYPTION_STOP.toString(), "cacheGroupName"); - } - - /** {@inheritDoc} */ - @Override public String name() { - return REENCRYPTION_STOP.name(); - } -} diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index 2daca126b3d2f..9da5866dcaa1b 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -140,12 +140,10 @@ import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_OK; import static org.apache.ignite.internal.commandline.CommandHandler.EXIT_CODE_UNEXPECTED_ERROR; import static org.apache.ignite.internal.commandline.CommandList.DEACTIVATE; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STATUS; import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_CACHE_GROUP_KEY; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.GROUP_REENCRYPTION; import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RATE; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_START; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STOP; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.MASTER_KEY_NAME_2; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP; import static org.apache.ignite.internal.processors.cache.persistence.snapshot.AbstractSnapshotSelfTest.doSnapshotCancellationTest; @@ -2743,7 +2741,7 @@ public void testCHangeReencryptionRate() throws Exception { /** @throws Exception If failed. */ @Test - public void testReencryptionInterruptAndResume() throws Exception { + public void testReencryptionSuspendAndResume() throws Exception { encryptionEnabled = true; reencryptSpeed = 0.01; reencryptBatchSize = 1; @@ -2762,53 +2760,51 @@ public void testReencryptionInterruptAndResume() throws Exception { assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); - int ret = execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); + int ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME); assertEquals(EXIT_CODE_OK, ret); - Pattern ptrn = Pattern.compile("(?m)Node: [-0-9a-f]{36}\n\\s+left=(?\\d+) total=(?\\d+).+"); + Pattern ptrn = Pattern.compile("(?m)Node: [-0-9a-f]{36}\n\\s+(?\\d+) KB of data.+"); Matcher matcher = ptrn.matcher(testOut.toString()); int matchesCnt = 0; while (matcher.find()) { - assertEquals(2, matcher.groupCount()); + assertEquals(1, matcher.groupCount()); int pagesLeft = Integer.parseInt(matcher.group("left")); - int pagesTotal = Integer.parseInt(matcher.group("total")); assertTrue(pagesLeft > 0); - assertTrue(pagesLeft < pagesTotal); matchesCnt++; } assertEquals(srvNodes, matchesCnt); - ret = execute("--encryption", REENCRYPTION_STOP.toString(), DEFAULT_CACHE_NAME); + ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--suspend"); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), - "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been stopped.")); + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been suspended.")); assertFalse(isReencryptionStarted(DEFAULT_CACHE_NAME)); - ret = execute("--encryption", REENCRYPTION_STOP.toString(), DEFAULT_CACHE_NAME); + ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--suspend"); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), - "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been stopped.")); + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been suspended.")); - ret = execute("--encryption", REENCRYPTION_START.toString(), DEFAULT_CACHE_NAME); + ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--resume"); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), - "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been started.")); + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been resumed.")); assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); - ret = execute("--encryption", REENCRYPTION_START.toString(), DEFAULT_CACHE_NAME); + ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--resume"); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), - "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been started.")); + "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been resumed.")); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index 9c1f4fc969ad1..b9b17cf0db36a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -1185,8 +1185,6 @@ public void resumeReencryption(int grpId) throws IgniteCheckedException { if (!reencryptionInProgress(grpId)) throw new IgniteCheckedException("Re-encryption completed or not required [grpId=" + grpId + "]"); - - startReencryption(Collections.singleton(grpId)); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java deleted file mode 100644 index dfd0c7a2d706d..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionStatusTask.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.visor.encryption; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.IgniteException; -import org.apache.ignite.compute.ComputeJobResult; -import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; -import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; -import org.apache.ignite.internal.pagemem.PageIdAllocator; -import org.apache.ignite.internal.pagemem.store.PageStore; -import org.apache.ignite.internal.processors.cache.CacheGroupContext; -import org.apache.ignite.internal.processors.cache.IgniteInternalCache; -import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; -import org.apache.ignite.internal.util.typedef.T2; -import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.VisorMultiNodeTask; -import org.jetbrains.annotations.Nullable; - -/** - * The task for getting encryption status of the cache group. - */ -public class VisorEncryptionStatusTask extends VisorMultiNodeTask>, T2> { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** {@inheritDoc} */ - @Override protected VisorJob> job(String arg) { - return new VisorGetCacheGroupKeysJob(arg, debug); - } - - /** {@inheritDoc} */ - @Nullable @Override protected Map> reduce0(List results) { - Map> resMap = new HashMap<>(); - - for (ComputeJobResult res : results) - resMap.put(res.getNode().id(), res.getData()); - - return resMap; - } - - /** The job for getting the master key name. */ - private static class VisorGetCacheGroupKeysJob extends VisorJob> { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** - * Create job with specified argument. - * - * @param arg Job argument. - * @param debug Flag indicating whether debug information should be printed into node log. - */ - protected VisorGetCacheGroupKeysJob(String arg, boolean debug) { - super(arg, debug); - } - - /** {@inheritDoc} */ - @Override protected T2 run(String arg) throws IgniteException { - IgniteInternalCache cache = ignite.context().cache().cache(arg); - - if (cache == null) - throw new IgniteException("Cache " + arg + " not found."); - - CacheGroupContext grp = cache.context().group(); - GridEncryptionManager encMgr = grp.shared().kernalContext().encryption(); - - if (!encMgr.reencryptionInProgress(grp.groupId())) - return new T2<>(0L, 0L); - - FilePageStoreManager mgr = (FilePageStoreManager)grp.shared().pageStore(); - - long completePages = 0; - long totalPages = 0; - - try { - for (int p = 0; p < grp.affinity().partitions(); p++) { - PageStore pageStore = mgr.getStore(grp.groupId(), p); - - if (!pageStore.exists()) - continue; - - long state = encMgr.getEncryptionState(grp.groupId(), p); - - totalPages += ReencryptStateUtils.pageCount(state); - completePages += ReencryptStateUtils.pageIndex(state); - } - - long state = encMgr.getEncryptionState(grp.groupId(), PageIdAllocator.INDEX_PARTITION); - - totalPages += ReencryptStateUtils.pageCount(state); - completePages += ReencryptStateUtils.pageIndex(state); - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } - - return new T2<>(completePages, totalPages); - } - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java new file mode 100644 index 0000000000000..bc2f77c28f410 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import org.jetbrains.annotations.Nullable; + +/** + * Cache group reencryption task action type. + */ +public enum VisorGroupReencryptionActionType { + /** Get reencryption status. */ + STATUS, + + /** Suspend reencryption. */ + SUSPEND, + + /** Resume reencryption. */ + RESUME; + + /** Enumerated values. */ + private static final VisorGroupReencryptionActionType[] VALS = values(); + + /** + * @param ord Ordinal value. + * @return Enumerated value or {@code null} if ordinal out of range. + */ + @Nullable public static VisorGroupReencryptionActionType fromOrdinal(int ord) { + return ord >= 0 && ord < VALS.length ? VALS[ord] : null; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java new file mode 100644 index 0000000000000..82c730209d659 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; +import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; +import org.apache.ignite.internal.pagemem.PageIdAllocator; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * Task to control the process of re-encryption of the cache group. + */ +public class VisorGroupReencryptionTask + extends VisorMultiNodeTask, Object> +{ + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job(VisorGroupReencryptionTaskArg arg) { + return new VisorStartReencryptionJob(arg, debug); + } + + /** {@inheritDoc} */ + @Nullable @Override protected Map reduce0(List results) { + Map resMap = new HashMap<>(); + + for (ComputeJobResult res : results) + resMap.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); + + return resMap; + } + + /** The job for getting the master key name. */ + private static class VisorStartReencryptionJob extends VisorJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * Create job with specified argument. + * + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorStartReencryptionJob(VisorGroupReencryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected Object run(VisorGroupReencryptionTaskArg arg) throws IgniteException { + String grpName = arg.groupName(); + CacheGroupContext grp = ignite.context().cache().cacheGroup(CU.cacheId(grpName)); + + if (grp == null) { + IgniteInternalCache cache = ignite.context().cache().cache(grpName); + + if (cache == null) + throw new IgniteException("Cache group " + grpName + " not found."); + + grp = cache.context().group(); + + if (grp.sharedGroup()) { + throw new IgniteException("Cache or group \"" + grpName + "\" is a part of group \"" + + grp.name() + "\". Provide group name instead of cache name for shared groups."); + } + } + + try { + switch (arg.type()) { + case STATUS: + return bytesLeftForReencryption(grp); + + case SUSPEND: + return ignite.context().encryption().reencryptionFuture(grp.groupId()).cancel(); + + case RESUME: + if (!ignite.context().encryption().reencryptionFuture(grp.groupId()).isDone()) + return false; + + ignite.context().encryption().resumeReencryption(grp.groupId()); + + return true; + + default: + throw new UnsupportedOperationException("Not implemented task action: " + arg.type()); + } + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** + * @param grp Cache group context. + * @return Count of bytes left for reencryption. + */ + private Long bytesLeftForReencryption(CacheGroupContext grp) throws IgniteCheckedException { + GridEncryptionManager encMgr = grp.shared().kernalContext().encryption(); + + if (!grp.config().isEncryptionEnabled() || !encMgr.reencryptionInProgress(grp.groupId())) + return 0L; + + FilePageStoreManager mgr = (FilePageStoreManager)grp.shared().pageStore(); + + long completePages = 0; + long totalPages = 0; + + for (int p = 0; p < grp.affinity().partitions(); p++) { + PageStore pageStore = mgr.getStore(grp.groupId(), p); + + if (!pageStore.exists()) + continue; + + long state = encMgr.getEncryptionState(grp.groupId(), p); + + totalPages += ReencryptStateUtils.pageCount(state); + completePages += ReencryptStateUtils.pageIndex(state); + } + + long state = encMgr.getEncryptionState(grp.groupId(), PageIdAllocator.INDEX_PARTITION); + + totalPages += ReencryptStateUtils.pageCount(state); + completePages += ReencryptStateUtils.pageIndex(state); + + return (totalPages - completePages) * grp.dataRegion().pageMemory().pageSize(); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTaskArg.java new file mode 100644 index 0000000000000..a000609257a5f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTaskArg.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Cache group reencryption task argument. + */ +public class VisorGroupReencryptionTaskArg extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Cache group name, */ + private String grpName; + + /** Task action type. */ + private VisorGroupReencryptionActionType type; + + /** Default constructor. */ + public VisorGroupReencryptionTaskArg() { + // No-op. + } + + /** + * @param grpName Cache group name. + * @param type Task action type + */ + public VisorGroupReencryptionTaskArg(String grpName, VisorGroupReencryptionActionType type) { + this.grpName = grpName; + this.type = type; + } + + /** @return Cache group name, */ + public String groupName() { + return grpName; + } + + /** @return Task action type. */ + public VisorGroupReencryptionActionType type() { + return type; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeString(out, grpName); + U.writeEnum(out, type); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + grpName = U.readString(in); + type = VisorGroupReencryptionActionType.fromOrdinal(in.readByte()); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorGroupReencryptionTaskArg.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java deleted file mode 100644 index b62bfb25c85b6..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStartReencryptionTask.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.visor.encryption; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.IgniteException; -import org.apache.ignite.compute.ComputeJobResult; -import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.VisorMultiNodeTask; -import org.jetbrains.annotations.Nullable; - -/** - * The task to start re-encryption of the specified cache group. - */ -public class VisorStartReencryptionTask extends VisorMultiNodeTask, Boolean> { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** {@inheritDoc} */ - @Override protected VisorJob job(Integer arg) { - return new VisorStartReencryptionJob(arg, debug); - } - - /** {@inheritDoc} */ - @Nullable @Override protected Map reduce0(List results) { - Map errs = new HashMap<>(); - - for (ComputeJobResult res : results) - errs.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); - - return errs; - } - - /** The job for getting the master key name. */ - private static class VisorStartReencryptionJob extends VisorJob { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** - * Create job with specified argument. - * - * @param arg Job argument. - * @param debug Flag indicating whether debug information should be printed into node log. - */ - protected VisorStartReencryptionJob(Integer arg, boolean debug) { - super(arg, debug); - } - - /** {@inheritDoc} */ - @Override protected Boolean run(Integer grpId) throws IgniteException { - try { - if (!ignite.context().encryption().reencryptionFuture(grpId).isDone()) - return false; - - ignite.context().encryption().resumeReencryption(grpId); - - return true; - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } - } - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java deleted file mode 100644 index 362e63050583b..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorStopReencryptionTask.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.visor.encryption; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.IgniteException; -import org.apache.ignite.compute.ComputeJobResult; -import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.VisorMultiNodeTask; -import org.jetbrains.annotations.Nullable; - -/** - * The task to stop re-encryption of the specified cache group. - */ -public class VisorStopReencryptionTask extends VisorMultiNodeTask, Boolean> { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** {@inheritDoc} */ - @Override protected VisorJob job(Integer arg) { - return new VisorStartReencryptionJob(arg, debug); - } - - /** {@inheritDoc} */ - @Nullable @Override protected Map reduce0(List results) { - Map errs = new HashMap<>(); - - for (ComputeJobResult res : results) - errs.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); - - return errs; - } - - /** The job for getting the master key name. */ - private static class VisorStartReencryptionJob extends VisorJob { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** - * Create job with specified argument. - * - * @param arg Job argument. - * @param debug Flag indicating whether debug information should be printed into node log. - */ - protected VisorStartReencryptionJob(Integer arg, boolean debug) { - super(arg, debug); - } - - /** {@inheritDoc} */ - @Override protected Boolean run(Integer grpId) throws IgniteException { - try { - return ignite.context().encryption().reencryptionFuture(grpId).cancel(); - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } - } - } -} diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index 9dad4eab5c912..d0988228b2a34 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -77,20 +77,19 @@ This utility can do the following commands: View encryption key identifiers of the cache group: control.(sh|bat) --encryption cache_key_ids cacheGroupName - View cache group encryption status: - control.(sh|bat) --encryption reencryption_status cacheGroupName + Control the process of re-encryption of the cache group: + control.(sh|bat) --encryption group_reencryption cacheGroupName [--status --suspend --resume] - Stop cache group re-encryption: - control.(sh|bat) --encryption reencryption_stop cacheGroupName - - Start cache group re-encryption: - control.(sh|bat) --encryption reencryption_start cacheGroupName + Parameters: + --status - Display re-encryption status. + --suspend - Suspend re-encryption. + --resume - Resume re-encryption. View/change re-encryption rate limit: control.(sh|bat) --encryption reencryption_rate [--limit limit] Parameters: - limit - decimal value to change rate limit (MB/s) + limit - Decimal value to change re-encryption rate limit (MB/s). Kill compute task by session id: control.(sh|bat) --kill COMPUTE session_id diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index 9dad4eab5c912..d0988228b2a34 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -77,20 +77,19 @@ This utility can do the following commands: View encryption key identifiers of the cache group: control.(sh|bat) --encryption cache_key_ids cacheGroupName - View cache group encryption status: - control.(sh|bat) --encryption reencryption_status cacheGroupName + Control the process of re-encryption of the cache group: + control.(sh|bat) --encryption group_reencryption cacheGroupName [--status --suspend --resume] - Stop cache group re-encryption: - control.(sh|bat) --encryption reencryption_stop cacheGroupName - - Start cache group re-encryption: - control.(sh|bat) --encryption reencryption_start cacheGroupName + Parameters: + --status - Display re-encryption status. + --suspend - Suspend re-encryption. + --resume - Resume re-encryption. View/change re-encryption rate limit: control.(sh|bat) --encryption reencryption_rate [--limit limit] Parameters: - limit - decimal value to change rate limit (MB/s) + limit - Decimal value to change re-encryption rate limit (MB/s). Kill compute task by session id: control.(sh|bat) --kill COMPUTE session_id From a407eec9256cf7d399bccce72e536531dfe57f52 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 29 Oct 2020 12:05:29 +0300 Subject: [PATCH 091/110] IGNITE_13320 Optimize pagesLeft metric calculation. --- .../encryption/GridEncryptionManager.java | 25 ++++++++++ .../cache/CacheGroupMetricsImpl.java | 46 ++----------------- 2 files changed, 28 insertions(+), 43 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index b9b17cf0db36a..c94e05d589e32 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -40,6 +40,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.GridKernalContext; @@ -1129,6 +1130,30 @@ public long getEncryptionState(int grpId, int partId) { return states[Math.min(partId, states.length - 1)]; } + /** + * @param grpId Cache group ID. + * @return The number of bytes left for re-ecryption. + */ + public long getBytesLeftForReencryption(int grpId) { + long[] states = reencryptGroups.get(grpId); + + if (states == null) + return 0; + + long pagesCnt = 0; + + for (int i = 0; i < states.length; i++) { + long state = states[i]; + + if (state == 0) + continue; + + pagesCnt += ReencryptStateUtils.pageCount(state) - ReencryptStateUtils.pageIndex(state); + } + + return pagesCnt * ctx.config().getDataStorageConfiguration().getPageSize(); + } + /** * @param keyCnt Count of keys to generate. * @return Future that will contain results of generation. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java index 75fdd15c85d96..b7c0300b8defb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java @@ -25,15 +25,10 @@ import java.util.Map; import java.util.Set; import java.util.UUID; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.IgniteException; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; -import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; -import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; -import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -44,7 +39,6 @@ import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.metric.MetricRegistry; import org.apache.ignite.internal.processors.metric.impl.AtomicLongMetric; import org.apache.ignite.internal.processors.metric.impl.LongAdderMetric; @@ -187,9 +181,9 @@ public void onTopologyInitialized() { () -> !ctx.shared().kernalContext().encryption().reencryptionInProgress(ctx.groupId()), "The flag indicates whether reencryption is finished or not."); - mreg.register("ReencryptionPagesLeft", - this::getPagesLeftForReencryption, - "Number of pages left for reencryption."); + mreg.register("ReencryptionBytesLeft", + () -> ctx.shared().kernalContext().encryption().getBytesLeftForReencryption(ctx.groupId()), + "The number of bytes left for re-ecryption."); } } @@ -502,40 +496,6 @@ public long getSparseStorageSize() { return sparseStorageSize == null ? 0 : sparseStorageSize.value(); } - /** */ - public long getPagesLeftForReencryption() { - if (!ctx.shared().kernalContext().encryption().reencryptionInProgress(ctx.groupId())) - return 0; - - long pagesLeft = 0; - - FilePageStoreManager mgr = (FilePageStoreManager)ctx.shared().pageStore(); - - GridEncryptionManager encMgr = ctx.shared().kernalContext().encryption(); - - try { - for (int p = 0; p < ctx.affinity().partitions(); p++) { - PageStore pageStore = mgr.getStore(ctx.groupId(), p); - - if (!pageStore.exists()) - continue; - - long state = encMgr.getEncryptionState(ctx.groupId(), p); - - pagesLeft += ReencryptStateUtils.pageCount(state) - ReencryptStateUtils.pageIndex(state); - } - - long state = encMgr.getEncryptionState(ctx.groupId(), PageIdAllocator.INDEX_PARTITION); - - pagesLeft += ReencryptStateUtils.pageCount(state) - ReencryptStateUtils.pageIndex(state); - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } - - return pagesLeft; - } - /** Removes all metric for cache group. */ public void remove() { if (ctx.shared().kernalContext().isStopping()) From d1fcd546eed6dbefbf664abf5091a9f11011ebf0 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 29 Oct 2020 12:30:20 +0300 Subject: [PATCH 092/110] IGNITE-13320 Use common method to calculate number of bytes remaining. --- .../encryption/GridEncryptionManager.java | 2 +- .../VisorGroupReencryptionTask.java | 50 +++---------------- 2 files changed, 8 insertions(+), 44 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index c94e05d589e32..23e9aa502ec09 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -1205,7 +1205,7 @@ private void sendGenerateEncryptionKeyRequest(GenerateEncryptionKeyFuture fut) t */ public void resumeReencryption(int grpId) throws IgniteCheckedException { if (grpKeyChangeProc.inProgress()) - throw new IgniteCheckedException("Cannot force start reencryption during cache group key change."); + throw new IgniteCheckedException("Cannot resume re-encryption during cache group key change."); if (!reencryptionInProgress(grpId)) throw new IgniteCheckedException("Re-encryption completed or not required [grpId=" + grpId + "]"); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java index 82c730209d659..f4b710c0cb15c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java @@ -25,12 +25,8 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.compute.ComputeJobResult; import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; -import org.apache.ignite.internal.managers.encryption.ReencryptStateUtils; -import org.apache.ignite.internal.pagemem.PageIdAllocator; -import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; -import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.visor.VisorJob; import org.apache.ignite.internal.visor.VisorMultiNodeTask; @@ -94,19 +90,22 @@ protected VisorStartReencryptionJob(VisorGroupReencryptionTaskArg arg, boolean d } } + GridEncryptionManager encMgr = ignite.context().encryption(); + int grpId = grp.groupId(); + try { switch (arg.type()) { case STATUS: - return bytesLeftForReencryption(grp); + return encMgr.getBytesLeftForReencryption(grpId); case SUSPEND: - return ignite.context().encryption().reencryptionFuture(grp.groupId()).cancel(); + return encMgr.reencryptionFuture(grpId).cancel(); case RESUME: - if (!ignite.context().encryption().reencryptionFuture(grp.groupId()).isDone()) + if (!encMgr.reencryptionFuture(grpId).isDone()) return false; - ignite.context().encryption().resumeReencryption(grp.groupId()); + encMgr.resumeReencryption(grpId); return true; @@ -118,40 +117,5 @@ protected VisorStartReencryptionJob(VisorGroupReencryptionTaskArg arg, boolean d throw new IgniteException(e); } } - - /** - * @param grp Cache group context. - * @return Count of bytes left for reencryption. - */ - private Long bytesLeftForReencryption(CacheGroupContext grp) throws IgniteCheckedException { - GridEncryptionManager encMgr = grp.shared().kernalContext().encryption(); - - if (!grp.config().isEncryptionEnabled() || !encMgr.reencryptionInProgress(grp.groupId())) - return 0L; - - FilePageStoreManager mgr = (FilePageStoreManager)grp.shared().pageStore(); - - long completePages = 0; - long totalPages = 0; - - for (int p = 0; p < grp.affinity().partitions(); p++) { - PageStore pageStore = mgr.getStore(grp.groupId(), p); - - if (!pageStore.exists()) - continue; - - long state = encMgr.getEncryptionState(grp.groupId(), p); - - totalPages += ReencryptStateUtils.pageCount(state); - completePages += ReencryptStateUtils.pageIndex(state); - } - - long state = encMgr.getEncryptionState(grp.groupId(), PageIdAllocator.INDEX_PARTITION); - - totalPages += ReencryptStateUtils.pageCount(state); - completePages += ReencryptStateUtils.pageIndex(state); - - return (totalPages - completePages) * grp.dataRegion().pageMemory().pageSize(); - } } } From 3df4094a6bb15004f4cc403c102e49247c64178c Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 29 Oct 2020 13:35:25 +0300 Subject: [PATCH 093/110] IGNITE-13320 Use precalculated value for metric. --- .../encryption/CacheGroupPageScanner.java | 42 +++++++++++++++++-- .../encryption/GridEncryptionManager.java | 19 +-------- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java index 12e86834ea73f..38bb2eb557e3c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java @@ -25,6 +25,7 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; @@ -210,10 +211,13 @@ public IgniteInternalFuture schedule(int grpId) throws IgniteCheckedExcept } Set parts = new HashSet<>(); + long[] pagesLeft = new long[1]; forEachPageStore(grp, new IgniteInClosureX() { @Override public void applyx(Integer partId) { - if (ctx.encryption().getEncryptionState(grpId, partId) == 0) { + long encState = ctx.encryption().getEncryptionState(grpId, partId); + + if (encState == 0) { if (log.isDebugEnabled()) log.debug("Skipping partition reencryption [grp=" + grpId + ", p=" + partId + "]"); @@ -221,10 +225,12 @@ public IgniteInternalFuture schedule(int grpId) throws IgniteCheckedExcept } parts.add(partId); + + pagesLeft[0] += (ReencryptStateUtils.pageCount(encState) - ReencryptStateUtils.pageIndex(encState)); } }); - GroupScanTask grpScan = new GroupScanTask(grp, parts); + GroupScanTask grpScan = new GroupScanTask(grp, parts, pagesLeft[0]); singleExecSvc.submit(grpScan); @@ -313,6 +319,19 @@ public long[] pagesCount(CacheGroupContext grp) throws IgniteCheckedException { return partStates; } + /** + * @param grpId Cache group ID. + * @return Number of remaining memory pages to scan. + */ + public long remainingPagesCount(int grpId) { + GroupScanTask grpScanTask = grps.get(grpId); + + if (grpScanTask != null) + return grpScanTask.remainingPagesCount(); + + return 0; + } + /** * @return Re-encryption rate limit in megabytes per second ({@code 0} - unlimited). */ @@ -399,13 +418,17 @@ private class GroupScanTask extends GridFutureAdapter implements Runnable /** Page memory. */ private final PageMemoryEx pageMem; + /** Total memory pages left for reencryption. */ + private final AtomicLong remainingPagesCntr; + /** * @param grp Cache group. */ - public GroupScanTask(CacheGroupContext grp, Set parts) { + public GroupScanTask(CacheGroupContext grp, Set parts, long remainingPagesCnt) { this.grp = grp; this.parts = new GridConcurrentHashSet<>(parts); + remainingPagesCntr = new AtomicLong(remainingPagesCnt); pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); } @@ -421,6 +444,10 @@ public GroupScanTask(CacheGroupContext grp, Set parts) { * @return {@code True} if reencryption was cancelled. */ public synchronized boolean excludePartition(int partId) { + long state = ctx.encryption().getEncryptionState(groupId(), partId); + + remainingPagesCntr.addAndGet(ReencryptStateUtils.pageIndex(state) - ReencryptStateUtils.pageCount(state)); + return parts.remove(partId); } @@ -431,6 +458,13 @@ public int groupId() { return grp.groupId(); } + /** + * @return Number of remaining memory pages to scan. + */ + public long remainingPagesCount() { + return remainingPagesCntr.get(); + } + /** {@inheritDoc} */ @Override public void run() { try { @@ -489,6 +523,8 @@ private void scanPartition(int partId, int off, int cnt) throws IgniteCheckedExc } } + remainingPagesCntr.addAndGet(-pagesCnt); + ctx.encryption().setEncryptionState(grp, partId, off, cnt); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index 23e9aa502ec09..9fc3805af71da 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -40,7 +40,6 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.GridKernalContext; @@ -1135,23 +1134,7 @@ public long getEncryptionState(int grpId, int partId) { * @return The number of bytes left for re-ecryption. */ public long getBytesLeftForReencryption(int grpId) { - long[] states = reencryptGroups.get(grpId); - - if (states == null) - return 0; - - long pagesCnt = 0; - - for (int i = 0; i < states.length; i++) { - long state = states[i]; - - if (state == 0) - continue; - - pagesCnt += ReencryptStateUtils.pageCount(state) - ReencryptStateUtils.pageIndex(state); - } - - return pagesCnt * ctx.config().getDataStorageConfiguration().getPageSize(); + return pageScanner.remainingPagesCount(grpId) * ctx.config().getDataStorageConfiguration().getPageSize(); } /** From 3a3400474ca6e825b556f19969afbe971cc3dc23 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 29 Oct 2020 14:11:10 +0300 Subject: [PATCH 094/110] IGNITE-13320 Code cleanup. --- .../ChangeCacheGroupKeyCommand.java | 2 +- .../encryption/EncryptionCommands.java | 2 +- .../encryption/ReencryptionRateCommand.java | 14 +++++------ .../util/GridCommandHandlerAbstractTest.java | 8 +++--- .../ignite/util/GridCommandHandlerTest.java | 16 +++++------- .../EncryptionConfiguration.java | 2 +- .../encryption/CacheGroupPageScanner.java | 6 +++-- .../encryption/GridEncryptionManager.java | 17 ++++++++++--- .../encryption/VisorCacheGroupKeyIdsTask.java | 10 +++----- .../VisorChangeCacheGroupKeyTask.java | 6 ++--- .../VisorGroupReencryptionTask.java | 25 ++++++------------- .../encryption/VisorReencryptionRateTask.java | 16 ++++++------ 12 files changed, 57 insertions(+), 67 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java index 8e1f17b0bf2b4..cd6f1668016ec 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java @@ -47,7 +47,7 @@ public class ChangeCacheGroupKeyCommand implements Command { clientCfg ); - log.info("The encryption key has been changed for cache group \"" + argCacheGrpName + "\"."); + log.info("The encryption key has been changed for the cache group \"" + argCacheGrpName + "\"."); return null; } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java index 3c96d9777eb25..05fcd846bc8ab 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java @@ -25,7 +25,7 @@ import org.apache.ignite.internal.commandline.CommandList; /** - * Commands assosiated with encryption features. + * Commands related to encryption functions. * * @see EncryptionSubcommands */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index fefce0767afce..4cef118060f31 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -61,7 +61,7 @@ public class ReencryptionRateCommand implements Command { String msg; if (entry.getValue() instanceof Throwable) { - msg = " failed to " + (read ? "get" : "limit") + " reencryption rate (" + + msg = " failed to " + (read ? "get" : "limit") + " re-encryption rate (" + ((Throwable)entry.getValue()).getMessage() + ")."; } else { @@ -69,9 +69,9 @@ public class ReencryptionRateCommand implements Command { boolean unlimited = read ? prevRate == 0 : rateLimit == 0; if (unlimited) - msg = "reencryption rate is not limited."; + msg = "re-encryption rate is not limited."; else { - msg = "reencryption rate " + (read ? + msg = "re-encryption rate " + (read ? "is limited to " + prevRate : "has been limited to " + rateLimit) + " MB/s."; } @@ -105,7 +105,7 @@ public class ReencryptionRateCommand implements Command { ReencryptionRateCommandArg cmdArg = CommandArgUtils.of(arg, ReencryptionRateCommandArg.class); if (cmdArg == ReencryptionRateCommandArg.LIMIT) { - String rateLimitArg = argIter.nextArg("Expected decimal value for reencryption rate."); + String rateLimitArg = argIter.nextArg("Expected decimal value for re-encryption rate."); try { rateLimit = Double.parseDouble(rateLimitArg); @@ -132,18 +132,16 @@ public class ReencryptionRateCommand implements Command { } /** - * Reencryption rate command arguments name. + * Re-encryption rate command arguments name. */ private enum ReencryptionRateCommandArg implements CommandArg { /** Re-encryption rate limit argument. */ LIMIT("--limit"); - /** Option name. */ + /** Argument name. */ private final String name; /** - * Constructor. - * * @param name Argument name. */ ReencryptionRateCommandArg(String name) { diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java index cf31ea1e6124e..8dfbb371df405 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerAbstractTest.java @@ -255,12 +255,12 @@ protected boolean idleVerifyRes(Path p) { cfg.setEncryptionSpi(encSpi); - EncryptionConfiguration encrCfg = new EncryptionConfiguration(); + EncryptionConfiguration encCfg = new EncryptionConfiguration(); - encrCfg.setReencryptionRateLimit(reencryptSpeed); - encrCfg.setReencryptionBatchSize(reencryptBatchSize); + encCfg.setReencryptionRateLimit(reencryptSpeed); + encCfg.setReencryptionBatchSize(reencryptBatchSize); - dsCfg.setEncryptionConfiguration(encrCfg); + dsCfg.setEncryptionConfiguration(encCfg); } return cfg; diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index 9da5866dcaa1b..5c052465317cc 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -2694,7 +2694,7 @@ public void testCacheGroupKeyChange() throws Exception { assertEquals(EXIT_CODE_OK, ret); assertContains(log, testOut.toString(), - "The encryption key has been changed for cache group \"" + DEFAULT_CACHE_NAME + '"'); + "The encryption key has been changed for the cache group \"" + DEFAULT_CACHE_NAME + '"'); ret = execute("--encryption", CACHE_GROUP_KEY_IDS.toString(), DEFAULT_CACHE_NAME); @@ -2705,7 +2705,7 @@ public void testCacheGroupKeyChange() throws Exception { /** @throws Exception If failed. */ @Test - public void testCHangeReencryptionRate() throws Exception { + public void testChangeReencryptionRate() throws Exception { int srvNodes = 2; IgniteEx ignite = startGrids(srvNodes); @@ -2717,26 +2717,22 @@ public void testCHangeReencryptionRate() throws Exception { int ret = execute("--encryption", REENCRYPTION_RATE.toString()); assertEquals(EXIT_CODE_OK, ret); - - assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate is not limited.")); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate is not limited.")); ret = execute("--encryption", REENCRYPTION_RATE.toString(), "--limit", "0.01"); assertEquals(EXIT_CODE_OK, ret); - - assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate has been limited to 0.01 MB/s.")); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate has been limited to 0.01 MB/s.")); ret = execute("--encryption", REENCRYPTION_RATE.toString()); assertEquals(EXIT_CODE_OK, ret); - - assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate is limited to 0.01 MB/s.")); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate is limited to 0.01 MB/s.")); ret = execute("--encryption", REENCRYPTION_RATE.toString(), "--limit", "0"); assertEquals(EXIT_CODE_OK, ret); - - assertEquals(srvNodes, countSubstrs(testOut.toString(), "reencryption rate is not limited.")); + assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate is not limited.")); } /** @throws Exception If failed. */ diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java index 79e205eb5386f..6b9345d4c7550 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/EncryptionConfiguration.java @@ -75,7 +75,7 @@ public double getReencryptionRateLimit() { */ public EncryptionConfiguration setReencryptionRateLimit(double reencryptionRateLimit) { A.ensure(reencryptionRateLimit >= 0, - "Reencryption rate limit (" + reencryptionRateLimit + ") must be non-negative."); + "Re-encryption rate limit (" + reencryptionRateLimit + ") must be non-negative."); this.reencryptionRateLimit = reencryptionRateLimit; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java index 38bb2eb557e3c..d512a4ec2d524 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java @@ -506,8 +506,10 @@ private void scanPartition(int partId, int off, int cnt) throws IgniteCheckedExc while (off < cnt) { int pagesCnt = Math.min(batchSize, cnt - off); - if (limiter != null) - limiter.acquire(pagesCnt); + BasicRateLimiter limiter0 = limiter; + + if (limiter0 != null) + limiter0.acquire(pagesCnt); synchronized (this) { if (isDone() || !parts.contains(partId)) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java index 9fc3805af71da..d0d467fefb123 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/GridEncryptionManager.java @@ -1181,19 +1181,30 @@ private void sendGenerateEncryptionKeyRequest(GenerateEncryptionKeyFuture fut) t ctx.io().sendToGridTopic(rndNode.id(), TOPIC_GEN_ENC_KEY, req, SYSTEM_POOL); } + /** + * Suspend re-encryption of the cache group. + * + * @param grpId Cache group ID. + */ + public boolean suspendReencryption(int grpId) throws IgniteCheckedException { + return reencryptionFuture(grpId).cancel(); + } + /** * Forces re-encryption of the cache group. * * @param grpId Cache group ID. */ - public void resumeReencryption(int grpId) throws IgniteCheckedException { - if (grpKeyChangeProc.inProgress()) - throw new IgniteCheckedException("Cannot resume re-encryption during cache group key change."); + public boolean resumeReencryption(int grpId) throws IgniteCheckedException { + if (!reencryptionFuture(grpId).isDone()) + return false; if (!reencryptionInProgress(grpId)) throw new IgniteCheckedException("Re-encryption completed or not required [grpId=" + grpId + "]"); startReencryption(Collections.singleton(grpId)); + + return true; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java index ccd4238f8c97a..531a86d35dcc9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java @@ -37,7 +37,7 @@ public class VisorCacheGroupKeyIdsTask extends VisorMultiNodeTask> job(String arg) { - return new VisorGetCacheGroupKeysJob(arg, debug); + return new VisorCacheGroupKeyIdsJob(arg, debug); } /** {@inheritDoc} */ @@ -53,18 +53,16 @@ public class VisorCacheGroupKeyIdsTask extends VisorMultiNodeTask> { + /** The job for getting encryption key identifiers of the cache group. */ + private static class VisorCacheGroupKeyIdsJob extends VisorJob> { /** Serial version uid. */ private static final long serialVersionUID = 0L; /** - * Create job with specified argument. - * * @param arg Job argument. * @param debug Flag indicating whether debug information should be printed into node log. */ - protected VisorGetCacheGroupKeysJob(String arg, boolean debug) { + protected VisorCacheGroupKeyIdsJob(String arg, boolean debug) { super(arg, debug); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java index 7317852395b5f..1b8cca2027cc7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java @@ -25,7 +25,7 @@ import org.apache.ignite.internal.visor.VisorOneNodeTask; /** - * The task for changing the encryption key for the cache group. + * The task for changing the encryption key of the cache group. * * @see IgniteEncryption#changeCacheGroupKey(Collection) */ @@ -38,14 +38,12 @@ public class VisorChangeCacheGroupKeyTask extends VisorOneNodeTask return new VisorChangeCacheGroupKeyJob(arg, debug); } - /** The job for getting the master key name. */ + /** The job for changing the encryption key of the cache group. */ private static class VisorChangeCacheGroupKeyJob extends VisorJob { /** Serial version uid. */ private static final long serialVersionUID = 0L; /** - * Create job with specified argument. - * * @param arg Job argument. * @param debug Flag indicating whether debug information should be printed into node log. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java index f4b710c0cb15c..06c6b8edea996 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java @@ -24,7 +24,6 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.compute.ComputeJobResult; -import org.apache.ignite.internal.managers.encryption.GridEncryptionManager; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -43,7 +42,7 @@ public class VisorGroupReencryptionTask /** {@inheritDoc} */ @Override protected VisorJob job(VisorGroupReencryptionTaskArg arg) { - return new VisorStartReencryptionJob(arg, debug); + return new VisorGroupReencryptionJob(arg, debug); } /** {@inheritDoc} */ @@ -56,18 +55,16 @@ public class VisorGroupReencryptionTask return resMap; } - /** The job for getting the master key name. */ - private static class VisorStartReencryptionJob extends VisorJob { + /** The job to control the process of re-encryption of the cache group. */ + private static class VisorGroupReencryptionJob extends VisorJob { /** Serial version uid. */ private static final long serialVersionUID = 0L; /** - * Create job with specified argument. - * * @param arg Job argument. * @param debug Flag indicating whether debug information should be printed into node log. */ - protected VisorStartReencryptionJob(VisorGroupReencryptionTaskArg arg, boolean debug) { + protected VisorGroupReencryptionJob(VisorGroupReencryptionTaskArg arg, boolean debug) { super(arg, debug); } @@ -90,24 +87,16 @@ protected VisorStartReencryptionJob(VisorGroupReencryptionTaskArg arg, boolean d } } - GridEncryptionManager encMgr = ignite.context().encryption(); - int grpId = grp.groupId(); - try { switch (arg.type()) { case STATUS: - return encMgr.getBytesLeftForReencryption(grpId); + return ignite.context().encryption().getBytesLeftForReencryption(grp.groupId()); case SUSPEND: - return encMgr.reencryptionFuture(grpId).cancel(); + return ignite.context().encryption().suspendReencryption(grp.groupId()); case RESUME: - if (!encMgr.reencryptionFuture(grpId).isDone()) - return false; - - encMgr.resumeReencryption(grpId); - - return true; + return ignite.context().encryption().resumeReencryption(grp.groupId()); default: throw new UnsupportedOperationException("Not implemented task action: " + arg.type()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java index 32b12bca4f062..b3d316343f3fa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java @@ -36,31 +36,29 @@ public class VisorReencryptionRateTask extends VisorMultiNodeTask job(Double arg) { - return new VisorStartReencryptionJob(arg, debug); + return new VisorReencryptionRateJob(arg, debug); } /** {@inheritDoc} */ @Nullable @Override protected Map reduce0(List results) { - Map errs = new HashMap<>(); + Map resMap = new HashMap<>(); for (ComputeJobResult res : results) - errs.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); + resMap.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); - return errs; + return resMap; } - /** The job for getting the master key name. */ - private static class VisorStartReencryptionJob extends VisorJob { + /** The job for view/change cache group re-encryption rate limit. */ + private static class VisorReencryptionRateJob extends VisorJob { /** Serial version uid. */ private static final long serialVersionUID = 0L; /** - * Create job with specified argument. - * * @param arg Job argument. * @param debug Flag indicating whether debug information should be printed into node log. */ - protected VisorStartReencryptionJob(Double arg, boolean debug) { + protected VisorReencryptionRateJob(Double arg, boolean debug) { super(arg, debug); } From 60b4da2fae43591217680fa200b73c5db7a95d9e Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 29 Oct 2020 17:22:18 +0300 Subject: [PATCH 095/110] IGNITE-13320 Code cleanup (Unlimited rate flag). --- .../encryption/CacheGroupPageScanner.java | 40 ++++--------------- .../internal/util/BasicRateLimiter.java | 20 ++++++++-- .../CacheGroupReencryptionTest.java | 6 +-- 3 files changed, 27 insertions(+), 39 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java index d512a4ec2d524..c2a68950425d3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/encryption/CacheGroupPageScanner.java @@ -85,7 +85,7 @@ public class CacheGroupPageScanner implements CheckpointListener { private final int batchSize; /** Page scanning speed limiter. */ - private volatile BasicRateLimiter limiter; + private final BasicRateLimiter limiter; /** Stop flag. */ private boolean stopped; @@ -110,8 +110,7 @@ public CacheGroupPageScanner(GridKernalContext ctx) { double rateLimit = dsCfg.getEncryptionConfiguration().getReencryptionRateLimit(); - limiter = rateLimit > 0 ? new BasicRateLimiter(rateLimit * MB / - (dsCfg.getPageSize() == 0 ? DataStorageConfiguration.DFLT_PAGE_SIZE : dsCfg.getPageSize())) : null; + limiter = new BasicRateLimiter(calcPermits(rateLimit, dsCfg)); batchSize = dsCfg.getEncryptionConfiguration().getReencryptionBatchSize(); @@ -338,12 +337,8 @@ public long remainingPagesCount(int grpId) { public double getRate() { DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); - if (CU.isPersistenceEnabled(dsCfg)) { - BasicRateLimiter limiter0 = limiter; - - if (limiter0 != null) - return dsCfg.getPageSize() * limiter0.getRate() / MB; - } + if (CU.isPersistenceEnabled(dsCfg)) + return dsCfg.getPageSize() * limiter.getRate() / MB; return 0; } @@ -354,26 +349,8 @@ public double getRate() { public void setRate(double rate) { DataStorageConfiguration dsCfg = ctx.config().getDataStorageConfiguration(); - if (!CU.isPersistenceEnabled(dsCfg)) - return; - - BasicRateLimiter limiter0 = limiter; - - if (rate == 0 && limiter0 != null) { - limiter = null; - - return; - } - - double permits = calcPermits(rate, dsCfg); - - if (limiter0 != null) { - limiter0.setRate(permits); - - return; - } - - limiter = new BasicRateLimiter(permits); + if (CU.isPersistenceEnabled(dsCfg)) + limiter.setRate(calcPermits(rate, dsCfg)); } /** @@ -506,10 +483,7 @@ private void scanPartition(int partId, int off, int cnt) throws IgniteCheckedExc while (off < cnt) { int pagesCnt = Math.min(batchSize, cnt - off); - BasicRateLimiter limiter0 = limiter; - - if (limiter0 != null) - limiter0.acquire(pagesCnt); + limiter.acquire(pagesCnt); synchronized (this) { if (isDone() || !parts.contains(partId)) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java b/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java index 8429ccbbcb346..195264e6b1b71 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/BasicRateLimiter.java @@ -64,6 +64,11 @@ public class BasicRateLimiter { */ private long nextFreeTicketMicros; + /** + * The flag indicates that the rate is not limited. + */ + private volatile boolean unlimited; + /** * @param permitsPerSecond Estimated number of permits per second. */ @@ -74,11 +79,14 @@ public BasicRateLimiter(double permitsPerSecond) { /** * Updates the stable rate. * - * @param permitsPerSecond The new stable rate of this {@code RateLimiter}. + * @param permitsPerSecond The new stable rate of this {@code RateLimiter}, set {@code 0} for unlimited rate. * @throws IllegalArgumentException If {@code permitsPerSecond} is negative or zero. */ public void setRate(double permitsPerSecond) { - A.ensure(permitsPerSecond > 0, "Requested permits (" + permitsPerSecond + ") must be positive"); + A.ensure(permitsPerSecond >= 0, "Requested permits (" + permitsPerSecond + ") must be non-negative."); + + if (unlimited = (permitsPerSecond == 0)) + return; synchronized (mux) { resync(); @@ -88,9 +96,12 @@ public void setRate(double permitsPerSecond) { } /** - * @return The stable rate (as {@code permits per seconds}). + * @return The stable rate as {@code permits per seconds} ({@code 0} means that the rate is unlimited). */ public double getRate() { + if (unlimited) + return 0; + synchronized (mux) { return SECONDS.toMicros(1L) / stableIntervalMicros; } @@ -104,6 +115,9 @@ public double getRate() { * @throws IllegalArgumentException If the requested number of permits is negative or zero. */ public void acquire(int permits) throws IgniteInterruptedCheckedException { + if (unlimited) + return; + long microsToWait = reserve(permits); try { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java index 1f7cf36aaedab..1980982cb1091 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java @@ -758,12 +758,12 @@ private void validateMetrics(IgniteEx node, boolean finished) { MetricRegistry registry = node.context().metric().registry(metricName(CacheGroupMetricsImpl.CACHE_GROUP_METRICS_PREFIX, cacheName())); - LongMetric pagesLeft = registry.findMetric("ReencryptionPagesLeft"); + LongMetric bytesLeft = registry.findMetric("ReencryptionBytesLeft"); if (finished) - assertEquals(0, pagesLeft.value()); + assertEquals(0, bytesLeft.value()); else - assertTrue(pagesLeft.value() > 0); + assertTrue(bytesLeft.value() > 0); BooleanMetric reencryptionFinished = registry.findMetric("ReencryptionFinished"); From bf9b5bdb865b363b1a093412f2e15d5e267040ca Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 29 Oct 2020 17:34:28 +0300 Subject: [PATCH 096/110] IGNITE-13320 Test for unlimited rate. --- .../internal/util/BasicRateLimiterTest.java | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java index c136cb8f9db5b..9f4bca931df50 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/BasicRateLimiterTest.java @@ -67,6 +67,21 @@ private void checkRate(BasicRateLimiter limiter, int totalOps) throws IgniteInte assertEquals(1, Math.round((double)timeSpent / 1000 / totalOps * permitsPerSec)); } + /** + * Check that the rate can be set as unlimited. + */ + @Test + public void testUnlimitedRate() throws IgniteInterruptedCheckedException { + BasicRateLimiter limiter = new BasicRateLimiter(0); + limiter.acquire(Integer.MAX_VALUE); + + limiter.setRate(1); + limiter.acquire(1); + + limiter.setRate(0); + limiter.acquire(Integer.MAX_VALUE); + } + /** * Check rate limit with multiple threads. */ From c777f13d9a3f9e65a3a5a8b9dfd9664935b713d3 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Fri, 30 Oct 2020 16:02:43 +0300 Subject: [PATCH 097/110] IGNITE-12843 Code cleanup (review notes). --- .../encryption/CacheGroupKeysCommand.java | 14 ++++++-------- .../ChangeCacheGroupKeyCommand.java | 4 ++-- .../encryption/ChangeMasterKeyCommand.java | 2 +- .../encryption/EncryptionCommands.java | 2 +- .../encryption/EncryptionSubcommands.java | 19 +++++++++++++------ .../encryption/GetMasterKeyNameCommand.java | 2 +- .../encryption/GroupReencryptionCommand.java | 4 ++-- .../encryption/ReencryptionRateCommand.java | 2 +- 8 files changed, 27 insertions(+), 22 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java index 4d74e1ad1ad30..ccfba92647b2a 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java @@ -56,20 +56,18 @@ public class CacheGroupKeysCommand implements Command { log.info("Encryption key identifiers for cache: " + argCacheGrpName); - for (Map.Entry> entry : keyIdsMap.entrySet()) { - log.info(INDENT + "Node: " + entry.getKey()); - - List keyIds = entry.getValue(); + keyIdsMap.forEach((nodeId, keyIds) -> { + log.info(INDENT + "Node: " + nodeId); if (F.isEmpty(keyIds)) { log.info(DOUBLE_INDENT + "---"); - continue; + return; } for (int i = 0; i < keyIds.size(); i++) log.info(DOUBLE_INDENT + keyIds.get(i) + (i == 0 ? " (active)" : "")); - } + }); return keyIdsMap; } @@ -88,7 +86,7 @@ public class CacheGroupKeysCommand implements Command { /** {@inheritDoc} */ @Override public void parseArguments(CommandArgIterator argIter) { - argCacheGrpName = argIter.nextArg("Expected cache group name."); + argCacheGrpName = argIter.nextArg("Сache group name is expected."); } /** {@inheritDoc} */ @@ -99,6 +97,6 @@ public class CacheGroupKeysCommand implements Command { /** {@inheritDoc} */ @Override public String name() { - return CACHE_GROUP_KEY_IDS.name(); + return CACHE_GROUP_KEY_IDS.text().toUpperCase(); } } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java index cd6f1668016ec..c9e9ff4c1d85f 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java @@ -72,7 +72,7 @@ public class ChangeCacheGroupKeyCommand implements Command { /** {@inheritDoc} */ @Override public void parseArguments(CommandArgIterator argIter) { - argCacheGrpName = argIter.nextArg("Expected cache group name."); + argCacheGrpName = argIter.nextArg("Сache group name is expected."); } /** {@inheritDoc} */ @@ -83,6 +83,6 @@ public class ChangeCacheGroupKeyCommand implements Command { /** {@inheritDoc} */ @Override public String name() { - return CHANGE_CACHE_GROUP_KEY.name(); + return CHANGE_CACHE_GROUP_KEY.text().toUpperCase(); } } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java index a1f5dd483ce01..b7f5f0b216211 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java @@ -82,6 +82,6 @@ public class ChangeMasterKeyCommand implements Command { /** {@inheritDoc} */ @Override public String name() { - return CHANGE_MASTER_KEY.name(); + return CHANGE_MASTER_KEY.text().toUpperCase(); } } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java index 05fcd846bc8ab..0022ed6e50b0a 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java @@ -48,7 +48,7 @@ public class EncryptionCommands extends AbstractCommand { cmd.subcommand().parseArguments(argIter); if (argIter.hasNextSubArg()) - throw new IllegalArgumentException("Unexpected argument of --encryptiopn subcommand: " + argIter.peekNextArg()); + throw new IllegalArgumentException("Unexpected argument of --encryption subcommand: " + argIter.peekNextArg()); this.cmd = cmd; } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java index 40d0551c08d33..9c474055e5a80 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java @@ -48,22 +48,29 @@ public enum EncryptionSubcommands { private final String name; /** Command. */ - private final Command command; + private final Command cmd; /** * @param name Encryption subcommand name. - * @param command Command implementation. + * @param cmd Command implementation. */ - EncryptionSubcommands(String name, Command command) { + EncryptionSubcommands(String name, Command cmd) { this.name = name; - this.command = command; + this.cmd = cmd; + } + + /** + * @return Name. + */ + public String text() { + return name; } /** * @return Cache subcommand implementation. */ - public Command subcommand() { - return command; + public Command subcommand() { + return cmd; } /** diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java index 58a466201c8f0..029afa5b98ef5 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java @@ -67,6 +67,6 @@ public class GetMasterKeyNameCommand implements Command { /** {@inheritDoc} */ @Override public String name() { - return GET_MASTER_KEY_NAME.name(); + return GET_MASTER_KEY_NAME.text().toUpperCase(); } } diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java index 1d37951a7ba79..8791d71933d06 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java @@ -143,7 +143,7 @@ private void printStatusResult(String grpName, Map nodeStates, Log /** {@inheritDoc} */ @Override public void parseArguments(CommandArgIterator argIter) { ReencryptionCommandArg cmdArg = ReencryptionCommandArg.STATUS; - String grpName = argIter.nextArg("Expected cache group name."); + String grpName = argIter.nextArg("Сache group name is expected."); while (argIter.hasNextSubArg()) { String arg = argIter.nextArg("Failed to read command argument."); @@ -169,7 +169,7 @@ private void printStatusResult(String grpName, Map nodeStates, Log /** {@inheritDoc} */ @Override public String name() { - return EncryptionSubcommands.GROUP_REENCRYPTION.name(); + return EncryptionSubcommands.GROUP_REENCRYPTION.text().toUpperCase(); } /** diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index 4cef118060f31..07ee8242a13c3 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -128,7 +128,7 @@ public class ReencryptionRateCommand implements Command { /** {@inheritDoc} */ @Override public String name() { - return REENCRYPTION_RATE.name(); + return REENCRYPTION_RATE.text().toUpperCase(); } /** From d66717168e9468337a9b243576e014a0758df2e9 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Fri, 30 Oct 2020 17:08:55 +0300 Subject: [PATCH 098/110] IGNITE-13320 Better arguments check in GroupReencryptionCommand. --- .../encryption/GroupReencryptionCommand.java | 15 +++++++++++---- ...idCommandHandlerClusterByClassTest_help.output | 2 +- ...ndHandlerClusterByClassWithSSLTest_help.output | 2 +- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java index 8791d71933d06..bf63a5edc75d0 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.commandline.encryption; +import java.util.Arrays; import java.util.Map; import java.util.UUID; import java.util.logging.Logger; @@ -28,6 +29,7 @@ import org.apache.ignite.internal.commandline.CommandLogger; import org.apache.ignite.internal.commandline.argument.CommandArg; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.util.lang.GridFunc; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionActionType; import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionTask; @@ -145,13 +147,18 @@ private void printStatusResult(String grpName, Map nodeStates, Log ReencryptionCommandArg cmdArg = ReencryptionCommandArg.STATUS; String grpName = argIter.nextArg("Сache group name is expected."); - while (argIter.hasNextSubArg()) { + if (argIter.hasNextSubArg()) { String arg = argIter.nextArg("Failed to read command argument."); cmdArg = CommandArgUtils.of(arg, ReencryptionCommandArg.class); if (cmdArg == null) throw new IllegalArgumentException("Unexpected command argument: " + arg); + + if (argIter.hasNextSubArg()) { + throw new IllegalArgumentException("Only one of the following options is expected: " + + GridFunc.concat(Arrays.asList(ReencryptionCommandArg.values()), ", ")); + } } taskArg = new VisorGroupReencryptionTaskArg(grpName, VisorGroupReencryptionActionType.valueOf(cmdArg.name())); @@ -160,9 +167,9 @@ private void printStatusResult(String grpName, Map nodeStates, Log /** {@inheritDoc} */ @Override public void printUsage(Logger log) { Command.usage(log, "Control the process of re-encryption of the cache group:", CommandList.ENCRYPTION, - U.map("--status", "Display re-encryption status.", - "--suspend", "Suspend re-encryption.", - "--resume", "Resume re-encryption."), + U.map(ReencryptionCommandArg.STATUS.argName(), "Display re-encryption status (default action).", + ReencryptionCommandArg.SUSPEND.argName(), "Suspend re-encryption.", + ReencryptionCommandArg.RESUME.argName(), "Resume re-encryption."), EncryptionSubcommands.GROUP_REENCRYPTION.toString(), "cacheGroupName", optional(ReencryptionCommandArg.STATUS, ReencryptionCommandArg.SUSPEND, ReencryptionCommandArg.RESUME)); } diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index d0988228b2a34..171303dcd9604 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -81,7 +81,7 @@ This utility can do the following commands: control.(sh|bat) --encryption group_reencryption cacheGroupName [--status --suspend --resume] Parameters: - --status - Display re-encryption status. + --status - Display re-encryption status (default action). --suspend - Suspend re-encryption. --resume - Resume re-encryption. diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index d0988228b2a34..171303dcd9604 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -81,7 +81,7 @@ This utility can do the following commands: control.(sh|bat) --encryption group_reencryption cacheGroupName [--status --suspend --resume] Parameters: - --status - Display re-encryption status. + --status - Display re-encryption status (default action). --suspend - Suspend re-encryption. --resume - Resume re-encryption. From aa9830d2cde02176149fdcef434e1433d34ca23f Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Mon, 2 Nov 2020 23:53:10 +0300 Subject: [PATCH 099/110] IGNITE-13320 Wrap to DTO + rafactoring. --- .../CacheGroupEncryptionCommand.java | 257 ++++++++++++++++++ .../encryption/CacheGroupKeysCommand.java | 102 ------- .../ChangeCacheGroupKeyCommand.java | 22 +- .../encryption/EncryptionSubcommands.java | 14 +- .../encryption/GroupReencryptionCommand.java | 215 --------------- .../encryption/ReencryptionRateCommand.java | 60 ++-- .../ignite/util/GridCommandHandlerTest.java | 39 ++- .../VisorCacheGroupEncryptionTask.java | 135 +++++++++ ... => VisorCacheGroupEncryptionTaskArg.java} | 28 +- .../VisorCacheGroupEncryptionTaskResult.java | 96 +++++++ .../encryption/VisorCacheGroupKeyIdsTask.java | 79 ------ .../VisorChangeCacheGroupKeyTask.java | 12 +- .../encryption/VisorEncryptionKeyIdsTask.java | 84 ++++++ .../VisorGroupReencryptionActionType.java | 45 --- .../VisorGroupReencryptionTask.java | 110 -------- .../encryption/VisorReencryptionRateTask.java | 86 +++++- .../VisorReencryptionRateTaskArg.java | 74 +++++ .../VisorReencryptionResumeTask.java | 61 +++++ .../VisorReencryptionStatusTask.java | 89 ++++++ .../VisorReencryptionSuspendTask.java | 84 ++++++ .../resources/META-INF/classnames.properties | 1 + ...mmandHandlerClusterByClassTest_help.output | 13 +- ...ndlerClusterByClassWithSSLTest_help.output | 13 +- 23 files changed, 1069 insertions(+), 650 deletions(-) create mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java delete mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java delete mode 100644 modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java rename modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/{VisorGroupReencryptionTaskArg.java => VisorCacheGroupEncryptionTaskArg.java} (66%) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTaskArg.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java new file mode 100644 index 0000000000000..6cea495127613 --- /dev/null +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.encryption; + +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandList; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskArg; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult; +import org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionResumeTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask; + +import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; + +/** + * Base cache group encryption multinode subcommand. + * + * @param Command result type. + * @param Multinode task result. + */ +public abstract class CacheGroupEncryptionCommand> + implements Command { + /** Cache group reencryption task argument. */ + private VisorCacheGroupEncryptionTaskArg taskArg; + + /** {@inheritDoc} */ + @Override public VisorCacheGroupEncryptionTaskArg arg() { + return taskArg; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + String grpName = argIter.nextArg("Сache group name is expected."); + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected command argument: " + argIter.peekNextArg()); + + taskArg = new VisorCacheGroupEncryptionTaskArg(grpName); + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + S res = executeTaskByNameOnNode( + client, + visorTaskName(), + taskArg, + BROADCAST_UUID, + clientCfg + ); + + printResults(res, taskArg.groupName(), log); + + return res; + } + catch (Throwable e) { + log.severe("Failed to perform operation."); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** + * @param res Response. + * @param grpName Cache group name. + * @param log Logger. + */ + protected void printResults(S res, String grpName, Logger log) { + Map exceptions = res.exceptions(); + + for (Map.Entry entry : exceptions.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + + log.info(String.format("%sfailed to execute command for the cache group \"%s\": %s.", + DOUBLE_INDENT, grpName, entry.getValue().getMessage())); + } + + Map results = res.results(); + + for (Map.Entry entry : results.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + + printNodeResult(entry.getValue(), grpName, log); + } + } + + /** + * @param res Response. + * @param grpName Cache group name. + * @param log Logger. + */ + protected abstract void printNodeResult(T res, String grpName, Logger log); + + /** + * @return Visor task name. + */ + protected abstract String visorTaskName(); + + /** Subcommand to Display re-encryption status of the cache group. */ + protected static class ReencryptionStatus extends + CacheGroupEncryptionCommand> { + /** {@inheritDoc} */ + @Override protected void printNodeResult(Long bytesLeft, String grpName, Logger log) { + if (bytesLeft == -1) + log.info(DOUBLE_INDENT + "re-encryption completed or not required"); + else + if (bytesLeft == 0) + log.info(DOUBLE_INDENT + "re-encryption will be completed after the next checkpoint"); + else + log.info(String.format("%s%d KB of data left for re-encryption", DOUBLE_INDENT, bytesLeft / 1024)); + } + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorReencryptionStatusTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return EncryptionSubcommands.REENCRYPTION_STATUS.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Display re-encryption status of the cache group:", CommandList.ENCRYPTION, + EncryptionSubcommands.REENCRYPTION_STATUS.toString(), "cacheGroupName"); + } + } + + /** Subcommand to view current encryption key IDs of the cache group. */ + protected static class CacheKeyIds extends + CacheGroupEncryptionCommand, VisorCacheGroupEncryptionTaskResult>> { + + /** {@inheritDoc} */ + @Override protected void printResults( + VisorCacheGroupEncryptionTaskResult> res, + String grpName, + Logger log + ) { + log.info("Encryption key identifiers for cache: " + grpName); + + super.printResults(res, grpName, log); + } + + /** {@inheritDoc} */ + @Override protected void printNodeResult(List keyIds, String grpName, Logger log) { + if (F.isEmpty(keyIds)) { + log.info(DOUBLE_INDENT + "---"); + + return; + } + + for (int i = 0; i < keyIds.size(); i++) + log.info(DOUBLE_INDENT + keyIds.get(i) + (i == 0 ? " (active)" : "")); + } + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorEncryptionKeyIdsTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CACHE_GROUP_KEY_IDS.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "View encryption key identifiers of the cache group:", ENCRYPTION, + CACHE_GROUP_KEY_IDS.toString(), "cacheGroupName"); + } + } + + /** Subcommand to suspend re-encryption of the cache group. */ + protected static class SuspendReencryption extends + CacheGroupEncryptionCommand> { + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorReencryptionSuspendTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return EncryptionSubcommands.REENCRYPTION_SUSPEND.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Suspend re-encryption of the cache group:", CommandList.ENCRYPTION, + EncryptionSubcommands.REENCRYPTION_SUSPEND.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override protected void printNodeResult(Boolean success, String grpName, Logger log) { + log.info(String.format("%sre-encryption of the cache group \"%s\" has %sbeen suspended.", + DOUBLE_INDENT, grpName, (success ? "" : "already "))); + } + } + + /** Subcommand to resume re-encryption of the cache group. */ + protected static class ResumeReencryption extends + CacheGroupEncryptionCommand> { + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return VisorReencryptionResumeTask.class.getName(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return EncryptionSubcommands.REENCRYPTION_RESUME.text().toUpperCase(); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage(log, "Resume re-encryption of the cache group:", CommandList.ENCRYPTION, + EncryptionSubcommands.REENCRYPTION_RESUME.toString(), "cacheGroupName"); + } + + /** {@inheritDoc} */ + @Override protected void printNodeResult(Boolean success, String grpName, Logger log) { + log.info(String.format("%sre-encryption of the cache group \"%s\" has %sbeen resumed.", + DOUBLE_INDENT, grpName, (success ? "" : "already "))); + } + } +} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java deleted file mode 100644 index ccfba92647b2a..0000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupKeysCommand.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.logging.Logger; -import org.apache.ignite.internal.client.GridClient; -import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.CommandArgIterator; -import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.util.typedef.F; -import org.apache.ignite.internal.visor.encryption.VisorCacheGroupKeyIdsTask; - -import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; -import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; -import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; -import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; -import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; - -/** - * View cache group encryption key identifiers subcommand. - */ -public class CacheGroupKeysCommand implements Command { - /** Cache group name, */ - private String argCacheGrpName; - - /** {@inheritDoc} */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { - try (GridClient client = Command.startClient(clientCfg)) { - Map> keyIdsMap = executeTaskByNameOnNode( - client, - VisorCacheGroupKeyIdsTask.class.getName(), - argCacheGrpName, - BROADCAST_UUID, - clientCfg - ); - - log.info("Encryption key identifiers for cache: " + argCacheGrpName); - - keyIdsMap.forEach((nodeId, keyIds) -> { - log.info(INDENT + "Node: " + nodeId); - - if (F.isEmpty(keyIds)) { - log.info(DOUBLE_INDENT + "---"); - - return; - } - - for (int i = 0; i < keyIds.size(); i++) - log.info(DOUBLE_INDENT + keyIds.get(i) + (i == 0 ? " (active)" : "")); - }); - - return keyIdsMap; - } - catch (Throwable e) { - log.severe("Failed to perform operation."); - log.severe(CommandLogger.errorMessage(e)); - - throw e; - } - } - - /** {@inheritDoc} */ - @Override public String arg() { - return argCacheGrpName; - } - - /** {@inheritDoc} */ - @Override public void parseArguments(CommandArgIterator argIter) { - argCacheGrpName = argIter.nextArg("Сache group name is expected."); - } - - /** {@inheritDoc} */ - @Override public void printUsage(Logger log) { - Command.usage(log, "View encryption key identifiers of the cache group:", ENCRYPTION, - CACHE_GROUP_KEY_IDS.toString(), "cacheGroupName"); - } - - /** {@inheritDoc} */ - @Override public String name() { - return CACHE_GROUP_KEY_IDS.text().toUpperCase(); - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java index c9e9ff4c1d85f..dcc19e750314d 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java @@ -23,6 +23,7 @@ import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskArg; import org.apache.ignite.internal.visor.encryption.VisorChangeCacheGroupKeyTask; import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; @@ -32,9 +33,9 @@ /** * Change cache group key encryption subcommand. */ -public class ChangeCacheGroupKeyCommand implements Command { - /** Cache group name. */ - private String argCacheGrpName; +public class ChangeCacheGroupKeyCommand implements Command { + /** Change cache group key task argument. */ + private VisorCacheGroupEncryptionTaskArg taskArg; /** {@inheritDoc} */ @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { @@ -42,12 +43,12 @@ public class ChangeCacheGroupKeyCommand implements Command { executeTaskByNameOnNode( client, VisorChangeCacheGroupKeyTask.class.getName(), - argCacheGrpName, + taskArg, null, clientCfg ); - log.info("The encryption key has been changed for the cache group \"" + argCacheGrpName + "\"."); + log.info("The encryption key has been changed for the cache group \"" + taskArg.groupName() + "\"."); return null; } @@ -66,13 +67,18 @@ public class ChangeCacheGroupKeyCommand implements Command { } /** {@inheritDoc} */ - @Override public String arg() { - return argCacheGrpName; + @Override public VisorCacheGroupEncryptionTaskArg arg() { + return taskArg; } /** {@inheritDoc} */ @Override public void parseArguments(CommandArgIterator argIter) { - argCacheGrpName = argIter.nextArg("Сache group name is expected."); + String argCacheGrpName = argIter.nextArg("Сache group name is expected."); + + taskArg = new VisorCacheGroupEncryptionTaskArg(argCacheGrpName); + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected command argument: " + argIter.peekNextArg()); } /** {@inheritDoc} */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java index 9c474055e5a80..0187a66c5da16 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java @@ -35,11 +35,17 @@ public enum EncryptionSubcommands { /** Subcommand to change the current encryption key for specified cache group. */ CHANGE_CACHE_GROUP_KEY("change_cache_key", new ChangeCacheGroupKeyCommand()), - /** Subcommand to view current encryption key IDs for specified cache group. */ - CACHE_GROUP_KEY_IDS("cache_key_ids", new CacheGroupKeysCommand()), + /** Subcommand to view current encryption key IDs of the cache group. */ + CACHE_GROUP_KEY_IDS("cache_key_ids", new CacheGroupEncryptionCommand.CacheKeyIds()), - /** Subcommand to control the process of re-encryption of the cache group. */ - GROUP_REENCRYPTION("group_reencryption", new GroupReencryptionCommand()), + /** Subcommand to display re-encryption status of the cache group. */ + REENCRYPTION_STATUS("reencryption_status", new CacheGroupEncryptionCommand.ReencryptionStatus()), + + /** Subcommand to suspend re-encryption of the cache group. */ + REENCRYPTION_SUSPEND("suspend_reencryption", new CacheGroupEncryptionCommand.SuspendReencryption()), + + /** Subcommand to resume re-encryption of the cache group. */ + REENCRYPTION_RESUME("resume_reencryption", new CacheGroupEncryptionCommand.ResumeReencryption()), /** Subcommand to view/change cache group re-encryption rate limit. */ REENCRYPTION_RATE("reencryption_rate", new ReencryptionRateCommand()); diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java deleted file mode 100644 index bf63a5edc75d0..0000000000000 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GroupReencryptionCommand.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline.encryption; - -import java.util.Arrays; -import java.util.Map; -import java.util.UUID; -import java.util.logging.Logger; -import org.apache.ignite.internal.client.GridClient; -import org.apache.ignite.internal.client.GridClientConfiguration; -import org.apache.ignite.internal.commandline.Command; -import org.apache.ignite.internal.commandline.CommandArgIterator; -import org.apache.ignite.internal.commandline.CommandList; -import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.commandline.argument.CommandArg; -import org.apache.ignite.internal.commandline.argument.CommandArgUtils; -import org.apache.ignite.internal.util.lang.GridFunc; -import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionActionType; -import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionTask; -import org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionTaskArg; - -import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; -import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; -import static org.apache.ignite.internal.commandline.CommandLogger.optional; -import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; -import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; -import static org.apache.ignite.internal.visor.encryption.VisorGroupReencryptionActionType.SUSPEND; - -/** - * Subcommand to control the process of re-encryption of the cache group. - */ -public class GroupReencryptionCommand implements Command { - /** Cache group reencryption task argument. */ - private VisorGroupReencryptionTaskArg taskArg; - - /** {@inheritDoc} */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { - try (GridClient client = Command.startClient(clientCfg)) { - Map res = executeTaskByNameOnNode( - client, - VisorGroupReencryptionTask.class.getName(), - taskArg, - BROADCAST_UUID, - clientCfg - ); - - switch (taskArg.type()) { - case STATUS: - printStatusResult(taskArg.groupName(), res, log); - - break; - case SUSPEND: - case RESUME: - printSuspendResumeResult(taskArg.groupName(), taskArg.type() == SUSPEND, res, log); - - break; - default: - assert false : "Unknown type: " + taskArg.type(); - } - - return res; - } - catch (Throwable e) { - log.severe("Failed to perform operation."); - log.severe(CommandLogger.errorMessage(e)); - - throw e; - } - } - - /** - * @param grpName Cache group name. - * @param suspend Suspend flag. - * @param res Response. - * @param log Logger. - */ - private void printSuspendResumeResult(String grpName, boolean suspend, Map res, Logger log) { - for (Map.Entry entry : res.entrySet()) { - String msg; - - if (entry.getValue() instanceof Throwable) { - msg = String.format("failed to %s re-encryption of the cache group \"%s\": %s.", - (suspend ? "suspend" : "resume"), grpName, ((Throwable)entry.getValue()).getMessage()); - } - else { - msg = String.format("re-encryption of the cache group \"%s\" has %sbeen %s.", - grpName, (((boolean)entry.getValue()) ? "" : "already "), suspend ? "suspended" : "resumed"); - } - - log.info(INDENT + "Node " + entry.getKey() + ": " + msg); - } - } - - /** - * @param grpName Cache group name. - * @param nodeStates Node ID(s) with number of bytes left for reencryption. - * @param log Logger. - */ - private void printStatusResult(String grpName, Map nodeStates, Logger log) { - log.info("Re-encryption status for the cache group: " + grpName); - - for (Map.Entry entry : nodeStates.entrySet()) { - log.info(INDENT + "Node: " + entry.getKey()); - - if (entry.getValue() instanceof Throwable) { - log.info(String.format("%sfailed to get re-encryption status of the cache group \"%s\": %s.", - DOUBLE_INDENT, grpName, ((Throwable)entry.getValue()).getMessage())); - - continue; - } - - long bytesLeft = (Long)entry.getValue(); - - if (bytesLeft == 0) { - log.info(DOUBLE_INDENT + "re-encryption completed or not required"); - - continue; - } - - log.info(String.format("%s%d KB of data left for re-encryption", DOUBLE_INDENT, bytesLeft / 1024)); - } - } - - /** {@inheritDoc} */ - @Override public VisorGroupReencryptionTaskArg arg() { - return taskArg; - } - - /** {@inheritDoc} */ - @Override public void parseArguments(CommandArgIterator argIter) { - ReencryptionCommandArg cmdArg = ReencryptionCommandArg.STATUS; - String grpName = argIter.nextArg("Сache group name is expected."); - - if (argIter.hasNextSubArg()) { - String arg = argIter.nextArg("Failed to read command argument."); - - cmdArg = CommandArgUtils.of(arg, ReencryptionCommandArg.class); - - if (cmdArg == null) - throw new IllegalArgumentException("Unexpected command argument: " + arg); - - if (argIter.hasNextSubArg()) { - throw new IllegalArgumentException("Only one of the following options is expected: " + - GridFunc.concat(Arrays.asList(ReencryptionCommandArg.values()), ", ")); - } - } - - taskArg = new VisorGroupReencryptionTaskArg(grpName, VisorGroupReencryptionActionType.valueOf(cmdArg.name())); - } - - /** {@inheritDoc} */ - @Override public void printUsage(Logger log) { - Command.usage(log, "Control the process of re-encryption of the cache group:", CommandList.ENCRYPTION, - U.map(ReencryptionCommandArg.STATUS.argName(), "Display re-encryption status (default action).", - ReencryptionCommandArg.SUSPEND.argName(), "Suspend re-encryption.", - ReencryptionCommandArg.RESUME.argName(), "Resume re-encryption."), - EncryptionSubcommands.GROUP_REENCRYPTION.toString(), "cacheGroupName", - optional(ReencryptionCommandArg.STATUS, ReencryptionCommandArg.SUSPEND, ReencryptionCommandArg.RESUME)); - } - - /** {@inheritDoc} */ - @Override public String name() { - return EncryptionSubcommands.GROUP_REENCRYPTION.text().toUpperCase(); - } - - /** - * Reencryption management command arguments name. - */ - private enum ReencryptionCommandArg implements CommandArg { - /** Suspend reencryption argument. */ - SUSPEND("--suspend"), - - /** Resume reencryption argument. */ - RESUME("--resume"), - - /** Reencryption status argument. */ - STATUS("--status"); - - /** Option name. */ - private final String name; - - /** - * @param name Argument name. - */ - ReencryptionCommandArg(String name) { - this.name = name; - } - - /** {@inheritDoc} */ - @Override public String argName() { - return name; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return name; - } - } -} diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index 07ee8242a13c3..bccdc02969d3c 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -20,6 +20,7 @@ import java.util.Map; import java.util.UUID; import java.util.logging.Logger; +import org.apache.ignite.IgniteException; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.commandline.Command; @@ -27,10 +28,13 @@ import org.apache.ignite.internal.commandline.CommandLogger; import org.apache.ignite.internal.commandline.argument.CommandArg; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult; import org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTaskArg; import static java.util.Collections.singletonMap; import static org.apache.ignite.internal.commandline.CommandList.ENCRYPTION; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; import static org.apache.ignite.internal.commandline.CommandLogger.optional; import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; @@ -40,44 +44,44 @@ /** * View/change cache group re-encryption rate limit subcommand. */ -public class ReencryptionRateCommand implements Command { - /** Re-encryption rate limit in megabytes per second. */ - private Double rateLimit; +public class ReencryptionRateCommand implements Command { + /** Re-encryption rate task argument. */ + private VisorReencryptionRateTaskArg taskArg; /** {@inheritDoc} */ @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { try (GridClient client = Command.startClient(clientCfg)) { - Map results = executeTaskByNameOnNode( + VisorCacheGroupEncryptionTaskResult res = executeTaskByNameOnNode( client, VisorReencryptionRateTask.class.getName(), - rateLimit, + taskArg, BROADCAST_UUID, clientCfg ); - for (Map.Entry entry : results.entrySet()) { - boolean read = rateLimit == null; + boolean read = taskArg.rate() == null; - String msg; + Map exceptions = res.exceptions(); - if (entry.getValue() instanceof Throwable) { - msg = " failed to " + (read ? "get" : "limit") + " re-encryption rate (" + - ((Throwable)entry.getValue()).getMessage() + ")."; - } + for (Map.Entry entry : exceptions.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + log.info(String.format("%sfailed to %s re-encryption rate: %s.", + DOUBLE_INDENT, (read ? "get" : "limit"), entry.getValue().getMessage())); + } + + Map results = res.results(); + + for (Map.Entry entry : results.entrySet()) { + log.info(INDENT + "Node " + entry.getKey() + ":"); + + double rateLimit = read ? entry.getValue() : taskArg.rate(); + + if (rateLimit == 0) + log.info(DOUBLE_INDENT + "re-encryption rate is not limited."); else { - double prevRate = (double)entry.getValue(); - boolean unlimited = read ? prevRate == 0 : rateLimit == 0; - - if (unlimited) - msg = "re-encryption rate is not limited."; - else { - msg = "re-encryption rate " + (read ? - "is limited to " + prevRate : - "has been limited to " + rateLimit) + " MB/s."; - } + log.info(String.format("%sre-encryption rate %s limited to %.2f MB/s.", + DOUBLE_INDENT, (read ? "is" : "has been"), rateLimit)); } - - log.info(INDENT + "Node " + entry.getKey() + ": " + msg); } return null; @@ -91,13 +95,13 @@ public class ReencryptionRateCommand implements Command { } /** {@inheritDoc} */ - @Override public Double arg() { - return rateLimit; + @Override public VisorReencryptionRateTaskArg arg() { + return taskArg; } /** {@inheritDoc} */ @Override public void parseArguments(CommandArgIterator argIter) { - rateLimit = null; + Double rateLimit = null; while (argIter.hasNextSubArg()) { String arg = argIter.nextArg("Failed to read command argument."); @@ -117,6 +121,8 @@ public class ReencryptionRateCommand implements Command { else throw new IllegalArgumentException("Unexpected command argument: " + arg); } + + taskArg = new VisorReencryptionRateTaskArg(rateLimit); } /** {@inheritDoc} */ diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index 5c052465317cc..057fe4aeeb0c6 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -101,6 +101,7 @@ import org.apache.ignite.internal.processors.cache.warmup.WarmUpTestPluginProvider; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.lang.GridFunc; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -142,8 +143,10 @@ import static org.apache.ignite.internal.commandline.CommandList.DEACTIVATE; import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CACHE_GROUP_KEY_IDS; import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.CHANGE_CACHE_GROUP_KEY; -import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.GROUP_REENCRYPTION; import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RATE; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_RESUME; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_STATUS; +import static org.apache.ignite.internal.commandline.encryption.EncryptionSubcommands.REENCRYPTION_SUSPEND; import static org.apache.ignite.internal.encryption.AbstractEncryptionTest.MASTER_KEY_NAME_2; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP; import static org.apache.ignite.internal.processors.cache.persistence.snapshot.AbstractSnapshotSelfTest.doSnapshotCancellationTest; @@ -2682,7 +2685,11 @@ public void testCacheGroupKeyChange() throws Exception { ignite.cluster().state(ACTIVE); - createCacheAndPreload(ignite, 10); + List srvGrids = GridFunc.asList(grid(0), grid(1)); + + enableCheckpoints(srvGrids, false); + + createCacheAndPreload(ignite, 1000); int ret = execute("--encryption", CACHE_GROUP_KEY_IDS.toString(), DEFAULT_CACHE_NAME); @@ -2701,6 +2708,22 @@ public void testCacheGroupKeyChange() throws Exception { assertEquals(testOut.toString(), EXIT_CODE_OK, ret); assertContains(log, testOut.toString(), "Encryption key identifiers for cache: " + DEFAULT_CACHE_NAME); assertEquals(srvNodes, countSubstrs(testOut.toString(), "1 (active)")); + + GridTestUtils.waitForCondition(() -> { + execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); + + return srvNodes == countSubstrs(testOut.toString(), + "re-encryption will be completed after the next checkpoint"); + }, getTestTimeout()); + + enableCheckpoints(srvGrids, true); + forceCheckpoint(srvGrids); + + GridTestUtils.waitForCondition(() -> { + execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); + + return srvNodes == countSubstrs(testOut.toString(), "re-encryption completed or not required"); + }, getTestTimeout()); } /** @throws Exception If failed. */ @@ -2756,11 +2779,11 @@ public void testReencryptionSuspendAndResume() throws Exception { assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); - int ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME); + int ret = execute("--encryption", REENCRYPTION_STATUS.toString(), DEFAULT_CACHE_NAME); assertEquals(EXIT_CODE_OK, ret); - Pattern ptrn = Pattern.compile("(?m)Node: [-0-9a-f]{36}\n\\s+(?\\d+) KB of data.+"); + Pattern ptrn = Pattern.compile("(?m)Node [-0-9a-f]{36}:\n\\s+(?\\d+) KB of data.+"); Matcher matcher = ptrn.matcher(testOut.toString()); int matchesCnt = 0; @@ -2776,27 +2799,27 @@ public void testReencryptionSuspendAndResume() throws Exception { assertEquals(srvNodes, matchesCnt); - ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--suspend"); + ret = execute("--encryption", REENCRYPTION_SUSPEND.toString(), DEFAULT_CACHE_NAME); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been suspended.")); assertFalse(isReencryptionStarted(DEFAULT_CACHE_NAME)); - ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--suspend"); + ret = execute("--encryption", REENCRYPTION_SUSPEND.toString(), DEFAULT_CACHE_NAME); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has already been suspended.")); - ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--resume"); + ret = execute("--encryption", REENCRYPTION_RESUME.toString(), DEFAULT_CACHE_NAME); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption of the cache group \"" + DEFAULT_CACHE_NAME + "\" has been resumed.")); assertTrue(isReencryptionStarted(DEFAULT_CACHE_NAME)); - ret = execute("--encryption", GROUP_REENCRYPTION.toString(), DEFAULT_CACHE_NAME, "--resume"); + ret = execute("--encryption", REENCRYPTION_RESUME.toString(), DEFAULT_CACHE_NAME); assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java new file mode 100644 index 0000000000000..6118d0c29546d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * Visor encrypted cache group multinode task. + */ +public abstract class VisorCacheGroupEncryptionTask + extends VisorMultiNodeTask, R> +{ + /** {@inheritDoc} */ + @Nullable @Override protected VisorCacheGroupEncryptionTaskResult reduce0(List results) { + Map jobResults = new HashMap<>(); + Map exceptions = new HashMap<>(); + + for (ComputeJobResult res : results) { + UUID nodeId = res.getNode().id(); + + if (res.getException() != null) { + exceptions.put(nodeId, res.getException()); + + continue; + } + + VisorSingleFieldDto dtoRes = res.getData(); + + jobResults.put(nodeId, dtoRes.value()); + } + + return new VisorCacheGroupEncryptionTaskResult<>(jobResults, exceptions); + } + + /** */ + protected abstract static class VisorSingleFieldDto extends IgniteDataTransferObject { + /** Object value. */ + private T val; + + /** + * @return Object value. + */ + protected T value() { + return val; + } + + /** + * @param val Data object. + * @return {@code this} for chaining. + */ + protected VisorSingleFieldDto value(T val) { + this.val = val; + + return this; + } + } + + /** + * @param Type of job result. + */ + protected abstract static class VisorReencryptionBaseJob + extends VisorJob> { + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionBaseJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run(VisorCacheGroupEncryptionTaskArg arg) throws IgniteException { + try { + String grpName = arg.groupName(); + CacheGroupContext grp = ignite.context().cache().cacheGroup(CU.cacheId(grpName)); + + if (grp == null) { + IgniteInternalCache cache = ignite.context().cache().cache(grpName); + + if (cache == null) + throw new IgniteException("Cache group " + grpName + " not found."); + + grp = cache.context().group(); + + if (grp.sharedGroup()) { + throw new IgniteException("Cache or group \"" + grpName + "\" is a part of group \"" + + grp.name() + "\". Provide group name instead of cache name for shared groups."); + } + } + + return run0(grp); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** + * Executes internal logic of the job. + * + * @param grp Cache group. + * @return Result. + * @throws IgniteCheckedException In case of error. + */ + protected abstract VisorSingleFieldDto run0(CacheGroupContext grp) throws IgniteCheckedException; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskArg.java similarity index 66% rename from modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTaskArg.java rename to modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskArg.java index a000609257a5f..ecea9ed090709 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTaskArg.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskArg.java @@ -25,56 +25,44 @@ import org.apache.ignite.internal.util.typedef.internal.U; /** - * Cache group reencryption task argument. + * Cache group aware task argument. */ -public class VisorGroupReencryptionTaskArg extends IgniteDataTransferObject { +public class VisorCacheGroupEncryptionTaskArg extends IgniteDataTransferObject { /** */ private static final long serialVersionUID = 0L; - /** Cache group name, */ + /** Cache group name. */ private String grpName; - /** Task action type. */ - private VisorGroupReencryptionActionType type; - /** Default constructor. */ - public VisorGroupReencryptionTaskArg() { + public VisorCacheGroupEncryptionTaskArg() { // No-op. } /** * @param grpName Cache group name. - * @param type Task action type */ - public VisorGroupReencryptionTaskArg(String grpName, VisorGroupReencryptionActionType type) { + public VisorCacheGroupEncryptionTaskArg(String grpName) { this.grpName = grpName; - this.type = type; } - /** @return Cache group name, */ + /** @return Cache group name. */ public String groupName() { return grpName; } - /** @return Task action type. */ - public VisorGroupReencryptionActionType type() { - return type; - } - /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { U.writeString(out, grpName); - U.writeEnum(out, type); } /** {@inheritDoc} */ - @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { grpName = U.readString(in); - type = VisorGroupReencryptionActionType.fromOrdinal(in.readByte()); } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(VisorGroupReencryptionTaskArg.class, this); + return S.toString(VisorCacheGroupEncryptionTaskArg.class, this); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java new file mode 100644 index 0000000000000..70b18053f08c4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Multinode cache group encryption task result. + * + * @param Job result type. + */ +@SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType") +public class VisorCacheGroupEncryptionTaskResult extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Per node job result. */ + @GridToStringInclude + private Map results; + + /** Per node execution problems. */ + @GridToStringInclude + private Map exceptions; + + /** + * @param results Per node job result. + * @param exceptions Per node execution problems. + */ + public VisorCacheGroupEncryptionTaskResult(Map results, Map exceptions) { + this.results = results; + this.exceptions = exceptions; + } + + /** */ + public VisorCacheGroupEncryptionTaskResult() { + // No-op. + } + + /** @return Per node job result. */ + public Map results() { + if (results == null) + results = new HashMap<>(); + + return results; + } + + /** @return Per node execution problems. */ + public Map exceptions() { + if (exceptions == null) + exceptions = new HashMap<>(); + + return exceptions; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeMap(out, results); + U.writeMap(out, exceptions); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + results = U.readMap(in); + exceptions = U.readMap(in); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorCacheGroupEncryptionTaskResult.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java deleted file mode 100644 index 531a86d35dcc9..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupKeyIdsTask.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.visor.encryption; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.apache.ignite.IgniteException; -import org.apache.ignite.compute.ComputeJobResult; -import org.apache.ignite.internal.processors.cache.IgniteInternalCache; -import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.VisorMultiNodeTask; -import org.jetbrains.annotations.Nullable; - -/** - * The task for getting encryption key identifiers of the cache group. - */ -public class VisorCacheGroupKeyIdsTask extends VisorMultiNodeTask>, List> { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** {@inheritDoc} */ - @Override protected VisorJob> job(String arg) { - return new VisorCacheGroupKeyIdsJob(arg, debug); - } - - /** {@inheritDoc} */ - @Nullable @Override protected Map> reduce0(List results) { - Map> resMap = new HashMap<>(); - - for (ComputeJobResult res : results) { - List keyIds = res.getData(); - - resMap.put(res.getNode().id(), keyIds); - } - - return resMap; - } - - /** The job for getting encryption key identifiers of the cache group. */ - private static class VisorCacheGroupKeyIdsJob extends VisorJob> { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** - * @param arg Job argument. - * @param debug Flag indicating whether debug information should be printed into node log. - */ - protected VisorCacheGroupKeyIdsJob(String arg, boolean debug) { - super(arg, debug); - } - - /** {@inheritDoc} */ - @Override protected List run(String arg) throws IgniteException { - IgniteInternalCache cache = ignite.context().cache().cache(arg); - - if (cache == null) - throw new IgniteException("Cache " + arg + " not found."); - - return ignite.context().encryption().groupKeyIds(cache.context().group().groupId()); - } - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java index 1b8cca2027cc7..d6659412f6427 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorChangeCacheGroupKeyTask.java @@ -29,17 +29,17 @@ * * @see IgniteEncryption#changeCacheGroupKey(Collection) */ -public class VisorChangeCacheGroupKeyTask extends VisorOneNodeTask { +public class VisorChangeCacheGroupKeyTask extends VisorOneNodeTask { /** Serial version uid. */ private static final long serialVersionUID = 0L; /** {@inheritDoc} */ - @Override protected VisorJob job(String arg) { + @Override protected VisorJob job(VisorCacheGroupEncryptionTaskArg arg) { return new VisorChangeCacheGroupKeyJob(arg, debug); } /** The job for changing the encryption key of the cache group. */ - private static class VisorChangeCacheGroupKeyJob extends VisorJob { + private static class VisorChangeCacheGroupKeyJob extends VisorJob { /** Serial version uid. */ private static final long serialVersionUID = 0L; @@ -47,13 +47,13 @@ private static class VisorChangeCacheGroupKeyJob extends VisorJob * @param arg Job argument. * @param debug Flag indicating whether debug information should be printed into node log. */ - protected VisorChangeCacheGroupKeyJob(String arg, boolean debug) { + protected VisorChangeCacheGroupKeyJob(VisorCacheGroupEncryptionTaskArg arg, boolean debug) { super(arg, debug); } /** {@inheritDoc} */ - @Override protected Void run(String grpName) throws IgniteException { - ignite.encryption().changeCacheGroupKey(Collections.singleton(grpName)).get(); + @Override protected Void run(VisorCacheGroupEncryptionTaskArg taskArg) throws IgniteException { + ignite.encryption().changeCacheGroupKey(Collections.singleton(taskArg.groupName())).get(); return null; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java new file mode 100644 index 0000000000000..311c139111689 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; +import org.jetbrains.annotations.Nullable; + +/** + * Get current encryption key IDs of the cache group. + */ +@GridInternal +public class VisorEncryptionKeyIdsTask extends VisorCacheGroupEncryptionTask>> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob>> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorEncryptionKeyIdsJob(arg, debug); + } + + /** The job for get current encryption key IDs of the cache group. */ + private static class VisorEncryptionKeyIdsJob extends VisorReencryptionBaseJob> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorEncryptionKeyIdsJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto> run0(CacheGroupContext grp) { + return new IntArrayResult().value(ignite.context().encryption().groupKeyIds(grp.groupId())); + } + } + + /** */ + protected static class IntArrayResult extends VisorSingleFieldDto> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** */ + public IntArrayResult() { + // No-op. + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeCollection(out, value()); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + value(U.readList(in)); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java deleted file mode 100644 index bc2f77c28f410..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionActionType.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.visor.encryption; - -import org.jetbrains.annotations.Nullable; - -/** - * Cache group reencryption task action type. - */ -public enum VisorGroupReencryptionActionType { - /** Get reencryption status. */ - STATUS, - - /** Suspend reencryption. */ - SUSPEND, - - /** Resume reencryption. */ - RESUME; - - /** Enumerated values. */ - private static final VisorGroupReencryptionActionType[] VALS = values(); - - /** - * @param ord Ordinal value. - * @return Enumerated value or {@code null} if ordinal out of range. - */ - @Nullable public static VisorGroupReencryptionActionType fromOrdinal(int ord) { - return ord >= 0 && ord < VALS.length ? VALS[ord] : null; - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java deleted file mode 100644 index 06c6b8edea996..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorGroupReencryptionTask.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.visor.encryption; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.IgniteException; -import org.apache.ignite.compute.ComputeJobResult; -import org.apache.ignite.internal.processors.cache.CacheGroupContext; -import org.apache.ignite.internal.processors.cache.IgniteInternalCache; -import org.apache.ignite.internal.util.typedef.internal.CU; -import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.VisorMultiNodeTask; -import org.jetbrains.annotations.Nullable; - -/** - * Task to control the process of re-encryption of the cache group. - */ -public class VisorGroupReencryptionTask - extends VisorMultiNodeTask, Object> -{ - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** {@inheritDoc} */ - @Override protected VisorJob job(VisorGroupReencryptionTaskArg arg) { - return new VisorGroupReencryptionJob(arg, debug); - } - - /** {@inheritDoc} */ - @Nullable @Override protected Map reduce0(List results) { - Map resMap = new HashMap<>(); - - for (ComputeJobResult res : results) - resMap.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); - - return resMap; - } - - /** The job to control the process of re-encryption of the cache group. */ - private static class VisorGroupReencryptionJob extends VisorJob { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** - * @param arg Job argument. - * @param debug Flag indicating whether debug information should be printed into node log. - */ - protected VisorGroupReencryptionJob(VisorGroupReencryptionTaskArg arg, boolean debug) { - super(arg, debug); - } - - /** {@inheritDoc} */ - @Override protected Object run(VisorGroupReencryptionTaskArg arg) throws IgniteException { - String grpName = arg.groupName(); - CacheGroupContext grp = ignite.context().cache().cacheGroup(CU.cacheId(grpName)); - - if (grp == null) { - IgniteInternalCache cache = ignite.context().cache().cache(grpName); - - if (cache == null) - throw new IgniteException("Cache group " + grpName + " not found."); - - grp = cache.context().group(); - - if (grp.sharedGroup()) { - throw new IgniteException("Cache or group \"" + grpName + "\" is a part of group \"" + - grp.name() + "\". Provide group name instead of cache name for shared groups."); - } - } - - try { - switch (arg.type()) { - case STATUS: - return ignite.context().encryption().getBytesLeftForReencryption(grp.groupId()); - - case SUSPEND: - return ignite.context().encryption().suspendReencryption(grp.groupId()); - - case RESUME: - return ignite.context().encryption().resumeReencryption(grp.groupId()); - - default: - throw new UnsupportedOperationException("Not implemented task action: " + arg.type()); - } - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } - } - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java index b3d316343f3fa..425fab52eb2e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTask.java @@ -17,40 +17,62 @@ package org.apache.ignite.internal.visor.encryption; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import org.apache.ignite.IgniteException; import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.visor.VisorJob; import org.apache.ignite.internal.visor.VisorMultiNodeTask; import org.jetbrains.annotations.Nullable; /** - * View/change cache group re-encryption rate limit. + * View/change cache group re-encryption rate limit . */ -public class VisorReencryptionRateTask extends VisorMultiNodeTask, Double> { +@GridInternal +public class VisorReencryptionRateTask extends VisorMultiNodeTask, VisorReencryptionRateTask.ReencryptionRateJobResult> +{ /** Serial version uid. */ private static final long serialVersionUID = 0L; /** {@inheritDoc} */ - @Override protected VisorJob job(Double arg) { + @Override protected VisorJob job( + VisorReencryptionRateTaskArg arg) { return new VisorReencryptionRateJob(arg, debug); } /** {@inheritDoc} */ - @Nullable @Override protected Map reduce0(List results) { - Map resMap = new HashMap<>(); + @Nullable @Override protected VisorCacheGroupEncryptionTaskResult reduce0(List results) { + Map jobResults = new HashMap<>(); + Map exceptions = new HashMap<>(); - for (ComputeJobResult res : results) - resMap.put(res.getNode().id(), res.getException() != null ? res.getException() : res.getData()); + for (ComputeJobResult res : results) { + UUID nodeId = res.getNode().id(); - return resMap; + if (res.getException() != null) { + exceptions.put(nodeId, res.getException()); + + continue; + } + + ReencryptionRateJobResult dtoRes = res.getData(); + + jobResults.put(nodeId, dtoRes.limit()); + } + + return new VisorCacheGroupEncryptionTaskResult<>(jobResults, exceptions); } /** The job for view/change cache group re-encryption rate limit. */ - private static class VisorReencryptionRateJob extends VisorJob { + private static class VisorReencryptionRateJob + extends VisorJob { /** Serial version uid. */ private static final long serialVersionUID = 0L; @@ -58,18 +80,54 @@ private static class VisorReencryptionRateJob extends VisorJob { * @param arg Job argument. * @param debug Flag indicating whether debug information should be printed into node log. */ - protected VisorReencryptionRateJob(Double arg, boolean debug) { + protected VisorReencryptionRateJob(VisorReencryptionRateTaskArg arg, boolean debug) { super(arg, debug); } /** {@inheritDoc} */ - @Override protected Double run(Double rate) throws IgniteException { + @Override protected ReencryptionRateJobResult run(VisorReencryptionRateTaskArg arg) throws IgniteException { double prevRate = ignite.context().encryption().getReencryptionRate(); - if (rate != null) - ignite.context().encryption().setReencryptionRate(rate); + if (arg.rate() != null) + ignite.context().encryption().setReencryptionRate(arg.rate()); + + return new ReencryptionRateJobResult(prevRate); + } + } + + /** */ + protected static class ReencryptionRateJobResult extends IgniteDataTransferObject { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Re-encryption rate limit. */ + private Double limit; - return prevRate; + /** */ + public ReencryptionRateJobResult() { + // No-op. + } + + /** */ + public ReencryptionRateJobResult(Double limit) { + this.limit = limit; + } + + /** + * @return Re-encryption rate limit. + */ + public Double limit() { + return limit; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeDouble(limit); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + limit = in.readDouble(); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTaskArg.java new file mode 100644 index 0000000000000..2471296fcc65a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionRateTaskArg.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.jetbrains.annotations.Nullable; + +/** + * Re-encryption rate task argument. + */ +public class VisorReencryptionRateTaskArg extends IgniteDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Re-encryption rate limit in megabytes per second. */ + private Double rate; + + /** Default constructor. */ + public VisorReencryptionRateTaskArg() { + // No-op. + } + + /** + * @param rate Re-encryption rate limit in megabytes per second. + */ + public VisorReencryptionRateTaskArg(Double rate) { + this.rate = rate; + } + + /** + * @return Re-encryption rate limit in megabytes per second. + */ + public @Nullable Double rate() { + return rate; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeBoolean(rate != null); + + if (rate != null) + out.writeDouble(rate); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + if (in.readBoolean()) + rate = in.readDouble(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorReencryptionRateTaskArg.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java new file mode 100644 index 0000000000000..e7ddff23f0b3b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; +import org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask.VisorReencryptionSuspendResumeJobResult; +import org.jetbrains.annotations.Nullable; + +/** + * Resume re-encryption of the cache group. + */ +@GridInternal +public class VisorReencryptionResumeTask extends VisorCacheGroupEncryptionTask> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorReencryptionResumeJob(arg, debug); + } + + /** The job to resume re-encryption of the cache group. */ + private static class VisorReencryptionResumeJob extends VisorReencryptionBaseJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionResumeJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run0(CacheGroupContext grp) throws IgniteCheckedException { + return new VisorReencryptionSuspendResumeJobResult().value( + ignite.context().encryption().resumeReencryption(grp.groupId())); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java new file mode 100644 index 0000000000000..9282d7a4c371f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; +import org.jetbrains.annotations.Nullable; + +/** + * Get re-encryption status of the cache group. + */ +@GridInternal +public class VisorReencryptionStatusTask extends VisorCacheGroupEncryptionTask> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorReencryptionStatusJob(arg, debug); + } + + /** The job to get re-encryption status of the cache group. */ + private static class VisorReencryptionStatusJob extends VisorReencryptionBaseJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionStatusJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run0(CacheGroupContext grp) { + long res; + + if (!ignite.context().encryption().reencryptionInProgress(grp.groupId())) + res = -1; + else + res = ignite.context().encryption().getBytesLeftForReencryption(grp.groupId()); + + return new VisorReencryptionStatusResult().value(res); + } + } + + /** */ + protected static class VisorReencryptionStatusResult extends VisorSingleFieldDto { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** */ + public VisorReencryptionStatusResult() { + // No-op. + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeLong(value()); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + value(in.readLong()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java new file mode 100644 index 0000000000000..c3d8093f067af --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.encryption; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; +import org.jetbrains.annotations.Nullable; + +/** + * Suspend re-encryption of the cache group. + */ +@GridInternal +public class VisorReencryptionSuspendTask extends VisorCacheGroupEncryptionTask> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob> job( + VisorCacheGroupEncryptionTaskArg arg) { + return new VisorReencryptionSuspendJob(arg, debug); + } + + /** The job to suspend re-encryption of the cache group. */ + private static class VisorReencryptionSuspendJob extends VisorReencryptionBaseJob { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Job argument. + * @param debug Flag indicating whether debug information should be printed into node log. + */ + protected VisorReencryptionSuspendJob(@Nullable VisorCacheGroupEncryptionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorSingleFieldDto run0(CacheGroupContext grp) throws IgniteCheckedException { + return new VisorReencryptionSuspendResumeJobResult().value( + ignite.context().encryption().suspendReencryption(grp.groupId())); + } + } + + /** */ + protected static class VisorReencryptionSuspendResumeJobResult extends VisorSingleFieldDto { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** */ + public VisorReencryptionSuspendResumeJobResult() { + // No-op. + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeBoolean(value()); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + value(in.readBoolean()); + } + } +} diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties index 92d01dda9d92d..2148755a9d96d 100644 --- a/modules/core/src/main/resources/META-INF/classnames.properties +++ b/modules/core/src/main/resources/META-INF/classnames.properties @@ -2124,6 +2124,7 @@ org.apache.ignite.internal.visor.debug.VisorThreadDumpTaskResult org.apache.ignite.internal.visor.debug.VisorThreadInfo org.apache.ignite.internal.visor.debug.VisorThreadLockInfo org.apache.ignite.internal.visor.debug.VisorThreadMonitorInfo +org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult org.apache.ignite.internal.visor.event.VisorGridDeploymentEvent org.apache.ignite.internal.visor.event.VisorGridDiscoveryEvent org.apache.ignite.internal.visor.event.VisorGridEvent diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index 171303dcd9604..f001165ffdaea 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -77,13 +77,14 @@ This utility can do the following commands: View encryption key identifiers of the cache group: control.(sh|bat) --encryption cache_key_ids cacheGroupName - Control the process of re-encryption of the cache group: - control.(sh|bat) --encryption group_reencryption cacheGroupName [--status --suspend --resume] + Display re-encryption status of the cache group: + control.(sh|bat) --encryption reencryption_status cacheGroupName - Parameters: - --status - Display re-encryption status (default action). - --suspend - Suspend re-encryption. - --resume - Resume re-encryption. + Suspend re-encryption of the cache group: + control.(sh|bat) --encryption suspend_reencryption cacheGroupName + + Resume re-encryption of the cache group: + control.(sh|bat) --encryption resume_reencryption cacheGroupName View/change re-encryption rate limit: control.(sh|bat) --encryption reencryption_rate [--limit limit] diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index 171303dcd9604..f001165ffdaea 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -77,13 +77,14 @@ This utility can do the following commands: View encryption key identifiers of the cache group: control.(sh|bat) --encryption cache_key_ids cacheGroupName - Control the process of re-encryption of the cache group: - control.(sh|bat) --encryption group_reencryption cacheGroupName [--status --suspend --resume] + Display re-encryption status of the cache group: + control.(sh|bat) --encryption reencryption_status cacheGroupName - Parameters: - --status - Display re-encryption status (default action). - --suspend - Suspend re-encryption. - --resume - Resume re-encryption. + Suspend re-encryption of the cache group: + control.(sh|bat) --encryption suspend_reencryption cacheGroupName + + Resume re-encryption of the cache group: + control.(sh|bat) --encryption resume_reencryption cacheGroupName View/change re-encryption rate limit: control.(sh|bat) --encryption reencryption_rate [--limit limit] From 2b60814f6605418b743a35dc6aee74132107a2f8 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Wed, 4 Nov 2020 10:51:06 +0300 Subject: [PATCH 100/110] IGNITE-13320 Fix after rebase. --- .../encryption/CacheGroupEncryptionCommand.java | 3 ++- .../commandline/encryption/ChangeCacheGroupKeyCommand.java | 3 ++- .../commandline/encryption/ChangeMasterKeyCommand.java | 3 ++- .../commandline/encryption/EncryptionCommands.java | 7 +++---- .../commandline/encryption/GetMasterKeyNameCommand.java | 3 ++- .../commandline/encryption/ReencryptionRateCommand.java | 3 ++- 6 files changed, 13 insertions(+), 9 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java index 6cea495127613..8870ca3fdcc34 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandList; @@ -50,7 +51,7 @@ * @param Multinode task result. */ public abstract class CacheGroupEncryptionCommand> - implements Command { + extends AbstractCommand { /** Cache group reencryption task argument. */ private VisorCacheGroupEncryptionTaskArg taskArg; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java index dcc19e750314d..8518e5d629019 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeCacheGroupKeyCommand.java @@ -20,6 +20,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -33,7 +34,7 @@ /** * Change cache group key encryption subcommand. */ -public class ChangeCacheGroupKeyCommand implements Command { +public class ChangeCacheGroupKeyCommand extends AbstractCommand { /** Change cache group key task argument. */ private VisorCacheGroupEncryptionTaskArg taskArg; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java index b7f5f0b216211..a48dc4bcbb4d3 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ChangeMasterKeyCommand.java @@ -20,6 +20,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -32,7 +33,7 @@ /** * Change master key encryption subcommand. */ -public class ChangeMasterKeyCommand implements Command { +public class ChangeMasterKeyCommand extends AbstractCommand { /** New master key name. */ private String argMasterKeyName; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java index 0022ed6e50b0a..fbae770bdc2ed 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionCommands.java @@ -20,7 +20,6 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.commandline.AbstractCommand; -import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandList; @@ -29,7 +28,7 @@ * * @see EncryptionSubcommands */ -public class EncryptionCommands extends AbstractCommand { +public class EncryptionCommands extends AbstractCommand { /** Subcommand. */ private EncryptionSubcommands cmd; @@ -54,8 +53,8 @@ public class EncryptionCommands extends AbstractCommand { } /** {@inheritDoc} */ - @Override public Object arg() { - return null; + @Override public EncryptionSubcommands arg() { + return cmd; } /** {@inheritDoc} */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java index 029afa5b98ef5..02bb8ed9750ef 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/GetMasterKeyNameCommand.java @@ -20,6 +20,7 @@ import java.util.logging.Logger; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandLogger; import org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask; @@ -31,7 +32,7 @@ /** * Get master key name encryption subcommand. */ -public class GetMasterKeyNameCommand implements Command { +public class GetMasterKeyNameCommand extends AbstractCommand { /** {@inheritDoc} */ @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { try (GridClient client = Command.startClient(clientCfg)) { diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index bccdc02969d3c..f13ede9490cba 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -23,6 +23,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.AbstractCommand; import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; @@ -44,7 +45,7 @@ /** * View/change cache group re-encryption rate limit subcommand. */ -public class ReencryptionRateCommand implements Command { +public class ReencryptionRateCommand extends AbstractCommand { /** Re-encryption rate task argument. */ private VisorReencryptionRateTaskArg taskArg; From 8f26cc8ef6f728b2ef2c1b2d02771d8b49d2f786 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 5 Nov 2020 11:12:43 +0300 Subject: [PATCH 101/110] IGNITE-13320 Simplified generics args for VisorCacheGroupEncryptionTask. --- .../visor/encryption/VisorCacheGroupEncryptionTask.java | 6 ++++-- .../visor/encryption/VisorEncryptionKeyIdsTask.java | 3 +-- .../visor/encryption/VisorReencryptionResumeTask.java | 3 +-- .../visor/encryption/VisorReencryptionStatusTask.java | 3 +-- .../visor/encryption/VisorReencryptionSuspendTask.java | 3 +-- 5 files changed, 8 insertions(+), 10 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java index 6118d0c29546d..37d5096ef1c7e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTask.java @@ -34,9 +34,11 @@ /** * Visor encrypted cache group multinode task. + * + * @param The type of the task result. */ -public abstract class VisorCacheGroupEncryptionTask - extends VisorMultiNodeTask, R> +public abstract class VisorCacheGroupEncryptionTask extends VisorMultiNodeTask, VisorCacheGroupEncryptionTask.VisorSingleFieldDto> { /** {@inheritDoc} */ @Nullable @Override protected VisorCacheGroupEncryptionTaskResult reduce0(List results) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java index 311c139111689..9b8d7e6a796b6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java @@ -25,14 +25,13 @@ import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; import org.jetbrains.annotations.Nullable; /** * Get current encryption key IDs of the cache group. */ @GridInternal -public class VisorEncryptionKeyIdsTask extends VisorCacheGroupEncryptionTask>> { +public class VisorEncryptionKeyIdsTask extends VisorCacheGroupEncryptionTask> { /** Serial version uid. */ private static final long serialVersionUID = 0L; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java index e7ddff23f0b3b..171130c26d6df 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionResumeTask.java @@ -21,7 +21,6 @@ import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; import org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask.VisorReencryptionSuspendResumeJobResult; import org.jetbrains.annotations.Nullable; @@ -29,7 +28,7 @@ * Resume re-encryption of the cache group. */ @GridInternal -public class VisorReencryptionResumeTask extends VisorCacheGroupEncryptionTask> { +public class VisorReencryptionResumeTask extends VisorCacheGroupEncryptionTask { /** Serial version uid. */ private static final long serialVersionUID = 0L; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java index 9282d7a4c371f..df6004747382b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionStatusTask.java @@ -23,14 +23,13 @@ import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; import org.jetbrains.annotations.Nullable; /** * Get re-encryption status of the cache group. */ @GridInternal -public class VisorReencryptionStatusTask extends VisorCacheGroupEncryptionTask> { +public class VisorReencryptionStatusTask extends VisorCacheGroupEncryptionTask { /** Serial version uid. */ private static final long serialVersionUID = 0L; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java index c3d8093f067af..edbfd464893c9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorReencryptionSuspendTask.java @@ -24,14 +24,13 @@ import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.visor.VisorJob; -import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTask.VisorSingleFieldDto; import org.jetbrains.annotations.Nullable; /** * Suspend re-encryption of the cache group. */ @GridInternal -public class VisorReencryptionSuspendTask extends VisorCacheGroupEncryptionTask> { +public class VisorReencryptionSuspendTask extends VisorCacheGroupEncryptionTask { /** Serial version uid. */ private static final long serialVersionUID = 0L; From 6cb60691678963f3f20bdb38331a30c953836549 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 5 Nov 2020 12:57:17 +0300 Subject: [PATCH 102/110] IGNITE-13320 (minor) Code cleanup. --- .../commandline/encryption/CacheGroupEncryptionCommand.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java index 8870ca3fdcc34..abc0e35fc66f9 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java @@ -136,8 +136,7 @@ protected static class ReencryptionStatus extends @Override protected void printNodeResult(Long bytesLeft, String grpName, Logger log) { if (bytesLeft == -1) log.info(DOUBLE_INDENT + "re-encryption completed or not required"); - else - if (bytesLeft == 0) + else if (bytesLeft == 0) log.info(DOUBLE_INDENT + "re-encryption will be completed after the next checkpoint"); else log.info(String.format("%s%d KB of data left for re-encryption", DOUBLE_INDENT, bytesLeft / 1024)); From fba0ccd6cc2b3f3d95682d3b293d6bd49cfd6b56 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 5 Nov 2020 14:53:50 +0300 Subject: [PATCH 103/110] IGNITE-13320 Update classname.properties --- .../encryption/VisorEncryptionKeyIdsTask.java | 6 +++--- .../resources/META-INF/classnames.properties | 20 ++++++++++++++++++- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java index 9b8d7e6a796b6..ca5a25475fa90 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorEncryptionKeyIdsTask.java @@ -56,17 +56,17 @@ protected VisorEncryptionKeyIdsJob(@Nullable VisorCacheGroupEncryptionTaskArg ar /** {@inheritDoc} */ @Override protected VisorSingleFieldDto> run0(CacheGroupContext grp) { - return new IntArrayResult().value(ignite.context().encryption().groupKeyIds(grp.groupId())); + return new VisorEncryptionKeyIdsResult().value(ignite.context().encryption().groupKeyIds(grp.groupId())); } } /** */ - protected static class IntArrayResult extends VisorSingleFieldDto> { + protected static class VisorEncryptionKeyIdsResult extends VisorSingleFieldDto> { /** Serial version uid. */ private static final long serialVersionUID = 0L; /** */ - public IntArrayResult() { + public VisorEncryptionKeyIdsResult() { // No-op. } diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties index 2148755a9d96d..3ec37dd7bd8ff 100644 --- a/modules/core/src/main/resources/META-INF/classnames.properties +++ b/modules/core/src/main/resources/META-INF/classnames.properties @@ -2124,7 +2124,6 @@ org.apache.ignite.internal.visor.debug.VisorThreadDumpTaskResult org.apache.ignite.internal.visor.debug.VisorThreadInfo org.apache.ignite.internal.visor.debug.VisorThreadLockInfo org.apache.ignite.internal.visor.debug.VisorThreadMonitorInfo -org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult org.apache.ignite.internal.visor.event.VisorGridDeploymentEvent org.apache.ignite.internal.visor.event.VisorGridDiscoveryEvent org.apache.ignite.internal.visor.event.VisorGridEvent @@ -2310,6 +2309,25 @@ org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask org.apache.ignite.internal.visor.encryption.VisorGetMasterKeyNameTask$VisorGetMasterKeyNameJob org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask org.apache.ignite.internal.visor.encryption.VisorChangeMasterKeyTask$VisorChangeMasterKeyJob +org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskArg +org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult +org.apache.ignite.internal.visor.encryption.VisorChangeCacheGroupKeyTask +org.apache.ignite.internal.visor.encryption.VisorChangeCacheGroupKeyTask$VisorChangeCacheGroupKeyJob +org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask +org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask$VisorEncryptionKeyIdsJob +org.apache.ignite.internal.visor.encryption.VisorEncryptionKeyIdsTask$VisorEncryptionKeyIdsResult +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask$VisorReencryptionRateJob +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask$ReencryptionRateJobResult +org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTaskArg +org.apache.ignite.internal.visor.encryption.VisorReencryptionResumeTask +org.apache.ignite.internal.visor.encryption.VisorReencryptionResumeTask$VisorReencryptionResumeJob +org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask +org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask$VisorReencryptionStatusJob +org.apache.ignite.internal.visor.encryption.VisorReencryptionStatusTask$VisorReencryptionStatusResult +org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask +org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask$VisorReencryptionSuspendJob +org.apache.ignite.internal.visor.encryption.VisorReencryptionSuspendTask$VisorReencryptionSuspendResumeJobResult org.apache.ignite.internal.visor.util.VisorClusterGroupEmptyException org.apache.ignite.internal.visor.util.VisorEventMapper org.apache.ignite.internal.visor.util.VisorExceptionWrapper From 1b2b8d553452334766413d2f54b5216d8d0c119c Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Thu, 5 Nov 2020 19:17:06 +0300 Subject: [PATCH 104/110] IGNITE-13320 Simplify generics args. --- .../CacheGroupEncryptionCommand.java | 21 +++++++------------ 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java index abc0e35fc66f9..e131febc443e2 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java @@ -48,10 +48,8 @@ * Base cache group encryption multinode subcommand. * * @param Command result type. - * @param Multinode task result. */ -public abstract class CacheGroupEncryptionCommand> - extends AbstractCommand { +public abstract class CacheGroupEncryptionCommand extends AbstractCommand { /** Cache group reencryption task argument. */ private VisorCacheGroupEncryptionTaskArg taskArg; @@ -73,7 +71,7 @@ public abstract class CacheGroupEncryptionCommand res = executeTaskByNameOnNode( client, visorTaskName(), taskArg, @@ -98,7 +96,7 @@ public abstract class CacheGroupEncryptionCommand res, String grpName, Logger log) { Map exceptions = res.exceptions(); for (Map.Entry entry : exceptions.entrySet()) { @@ -130,8 +128,7 @@ protected void printResults(S res, String grpName, Logger log) { protected abstract String visorTaskName(); /** Subcommand to Display re-encryption status of the cache group. */ - protected static class ReencryptionStatus extends - CacheGroupEncryptionCommand> { + protected static class ReencryptionStatus extends CacheGroupEncryptionCommand { /** {@inheritDoc} */ @Override protected void printNodeResult(Long bytesLeft, String grpName, Logger log) { if (bytesLeft == -1) @@ -160,9 +157,7 @@ else if (bytesLeft == 0) } /** Subcommand to view current encryption key IDs of the cache group. */ - protected static class CacheKeyIds extends - CacheGroupEncryptionCommand, VisorCacheGroupEncryptionTaskResult>> { - + protected static class CacheKeyIds extends CacheGroupEncryptionCommand> { /** {@inheritDoc} */ @Override protected void printResults( VisorCacheGroupEncryptionTaskResult> res, @@ -204,8 +199,7 @@ protected static class CacheKeyIds extends } /** Subcommand to suspend re-encryption of the cache group. */ - protected static class SuspendReencryption extends - CacheGroupEncryptionCommand> { + protected static class SuspendReencryption extends CacheGroupEncryptionCommand { /** {@inheritDoc} */ @Override protected String visorTaskName() { return VisorReencryptionSuspendTask.class.getName(); @@ -230,8 +224,7 @@ protected static class SuspendReencryption extends } /** Subcommand to resume re-encryption of the cache group. */ - protected static class ResumeReencryption extends - CacheGroupEncryptionCommand> { + protected static class ResumeReencryption extends CacheGroupEncryptionCommand { /** {@inheritDoc} */ @Override protected String visorTaskName() { return VisorReencryptionResumeTask.class.getName(); From 56a6012ff5460c4093edfd660e18d5c27190809c Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Mon, 30 Nov 2020 10:54:43 +0300 Subject: [PATCH 105/110] IGNITE-13320 Review notes. --- .../encryption/EncryptionSubcommands.java | 2 +- .../encryption/ReencryptionRateCommand.java | 7 +++---- .../VisorCacheGroupEncryptionTaskResult.java | 12 +++--------- .../GridCommandHandlerClusterByClassTest_help.output | 2 +- ...mmandHandlerClusterByClassWithSSLTest_help.output | 2 +- 5 files changed, 9 insertions(+), 16 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java index 0187a66c5da16..c8d09419e39ea 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/EncryptionSubcommands.java @@ -48,7 +48,7 @@ public enum EncryptionSubcommands { REENCRYPTION_RESUME("resume_reencryption", new CacheGroupEncryptionCommand.ResumeReencryption()), /** Subcommand to view/change cache group re-encryption rate limit. */ - REENCRYPTION_RATE("reencryption_rate", new ReencryptionRateCommand()); + REENCRYPTION_RATE("reencryption_rate_limit", new ReencryptionRateCommand()); /** Subcommand name. */ private final String name; diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index f13ede9490cba..5443f8602a6a3 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -60,17 +60,16 @@ public class ReencryptionRateCommand extends AbstractCommand exceptions = res.exceptions(); for (Map.Entry entry : exceptions.entrySet()) { log.info(INDENT + "Node " + entry.getKey() + ":"); - log.info(String.format("%sfailed to %s re-encryption rate: %s.", - DOUBLE_INDENT, (read ? "get" : "limit"), entry.getValue().getMessage())); + log.info(DOUBLE_INDENT + + "failed to get/set re-encryption rate limit: " + entry.getValue().getMessage()); } Map results = res.results(); + boolean read = taskArg.rate() == null; for (Map.Entry entry : results.entrySet()) { log.info(INDENT + "Node " + entry.getKey() + ":"); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java index 70b18053f08c4..f29a99b295742 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/encryption/VisorCacheGroupEncryptionTaskResult.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import java.util.HashMap; +import java.util.Collections; import java.util.Map; import java.util.UUID; import org.apache.ignite.IgniteException; @@ -63,18 +63,12 @@ public VisorCacheGroupEncryptionTaskResult() { /** @return Per node job result. */ public Map results() { - if (results == null) - results = new HashMap<>(); - - return results; + return results == null ? Collections.emptyMap() : results; } /** @return Per node execution problems. */ public Map exceptions() { - if (exceptions == null) - exceptions = new HashMap<>(); - - return exceptions; + return exceptions == null ? Collections.emptyMap() : exceptions; } /** {@inheritDoc} */ diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output index f001165ffdaea..e5df88aa64d62 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassTest_help.output @@ -87,7 +87,7 @@ This utility can do the following commands: control.(sh|bat) --encryption resume_reencryption cacheGroupName View/change re-encryption rate limit: - control.(sh|bat) --encryption reencryption_rate [--limit limit] + control.(sh|bat) --encryption reencryption_rate_limit [--limit limit] Parameters: limit - Decimal value to change re-encryption rate limit (MB/s). diff --git a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output index f001165ffdaea..e5df88aa64d62 100644 --- a/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output +++ b/modules/core/src/test/resources/org.apache.ignite.util/GridCommandHandlerClusterByClassWithSSLTest_help.output @@ -87,7 +87,7 @@ This utility can do the following commands: control.(sh|bat) --encryption resume_reencryption cacheGroupName View/change re-encryption rate limit: - control.(sh|bat) --encryption reencryption_rate [--limit limit] + control.(sh|bat) --encryption reencryption_rate_limit [--limit limit] Parameters: limit - Decimal value to change re-encryption rate limit (MB/s). From 1d47d548dd1b84a26215b552e3445e531b6fc9b5 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Tue, 1 Dec 2020 18:49:07 +0300 Subject: [PATCH 106/110] IGNITE-13320 Simplify reencryption_rate_limit command syntax. --- .../encryption/ReencryptionRateCommand.java | 53 +++---------------- .../ignite/util/GridCommandHandlerTest.java | 4 +- ...mmandHandlerClusterByClassTest_help.output | 4 +- ...ndlerClusterByClassWithSSLTest_help.output | 4 +- 4 files changed, 13 insertions(+), 52 deletions(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index 5443f8602a6a3..2d67c65723129 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -27,8 +27,6 @@ import org.apache.ignite.internal.commandline.Command; import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; -import org.apache.ignite.internal.commandline.argument.CommandArg; -import org.apache.ignite.internal.commandline.argument.CommandArgUtils; import org.apache.ignite.internal.visor.encryption.VisorCacheGroupEncryptionTaskResult; import org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTask; import org.apache.ignite.internal.visor.encryption.VisorReencryptionRateTaskArg; @@ -104,22 +102,13 @@ public class ReencryptionRateCommand extends AbstractCommand Date: Wed, 2 Dec 2020 12:48:39 +0300 Subject: [PATCH 107/110] IGNITE-13320 Notice user about unexpected behaviour of suspend/rate_limit command. --- .../encryption/CacheGroupEncryptionCommand.java | 14 ++++++++++++++ .../encryption/ReencryptionRateCommand.java | 8 ++++++++ 2 files changed, 22 insertions(+) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java index e131febc443e2..d4c09b896f96f 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/CacheGroupEncryptionCommand.java @@ -221,6 +221,20 @@ protected static class SuspendReencryption extends CacheGroupEncryptionCommand res, + String grpName, + Logger log + ) { + super.printResults(res, grpName, log); + + log.info(""); + log.info("Note: the re-encryption suspend status is not persisted, re-encryption will be started " + + "automatically after the node is restarted."); + log.info(""); + } } /** Subcommand to resume re-encryption of the cache group. */ diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index 2d67c65723129..a75342721181c 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -82,6 +82,14 @@ public class ReencryptionRateCommand extends AbstractCommand Date: Fri, 4 Dec 2020 11:02:34 +0300 Subject: [PATCH 108/110] IGNITE-13320 (minor) Codestyle. --- .../commandline/encryption/ReencryptionRateCommand.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java index a75342721181c..7eb9f79304984 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/encryption/ReencryptionRateCommand.java @@ -114,7 +114,8 @@ public class ReencryptionRateCommand extends AbstractCommand Date: Fri, 4 Dec 2020 13:38:43 +0300 Subject: [PATCH 109/110] IGNITE-13320 (minor) Test improvement. --- .../internal/encryption/CacheGroupReencryptionTest.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java index 1980982cb1091..19c8351184175 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/encryption/CacheGroupReencryptionTest.java @@ -385,7 +385,7 @@ public void testPartitionEvictionDuringReencryption() throws Exception { @Test public void testPartitionFileDestroy() throws Exception { backups = 1; - pageScanRate = 1; + pageScanRate = 0.2; pageScanBatchSize = 10; T2 nodes = startTestGrids(true); @@ -409,6 +409,10 @@ public void testPartitionFileDestroy() throws Exception { assertTrue(isReencryptionInProgress(Collections.singleton(cacheName()))); + // Set unlimited re-encryption rate. + nodes.get1().context().encryption().setReencryptionRate(0); + nodes.get2().context().encryption().setReencryptionRate(0); + checkGroupKey(CU.cacheId(cacheName()), INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS); } From 2675dcaeb1780cb099e35fcec3c7f9b2c955ece6 Mon Sep 17 00:00:00 2001 From: Pavel Pereslegin Date: Mon, 7 Dec 2020 17:49:29 +0300 Subject: [PATCH 110/110] IGNITE-13320 Fix test (double format mismatch). --- .../org/apache/ignite/util/GridCommandHandlerTest.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java index 5501eb078c0db..f95d79a535185 100644 --- a/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java +++ b/modules/control-utility/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java @@ -2742,15 +2742,19 @@ public void testChangeReencryptionRate() throws Exception { assertEquals(EXIT_CODE_OK, ret); assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate is not limited.")); - ret = execute("--encryption", REENCRYPTION_RATE.toString(), "0.01"); + double newRate = 0.01; + + ret = execute("--encryption", REENCRYPTION_RATE.toString(), Double.toString(newRate)); assertEquals(EXIT_CODE_OK, ret); - assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate has been limited to 0.01 MB/s.")); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + String.format("re-encryption rate has been limited to %.2f MB/s.", newRate))); ret = execute("--encryption", REENCRYPTION_RATE.toString()); assertEquals(EXIT_CODE_OK, ret); - assertEquals(srvNodes, countSubstrs(testOut.toString(), "re-encryption rate is limited to 0.01 MB/s.")); + assertEquals(srvNodes, countSubstrs(testOut.toString(), + String.format("re-encryption rate is limited to %.2f MB/s.", newRate))); ret = execute("--encryption", REENCRYPTION_RATE.toString(), "0");