From 4153f0dc6bf91dd64f5893079fd43f00b6c00df5 Mon Sep 17 00:00:00 2001 From: pereslegin-pa Date: Fri, 15 Mar 2019 17:19:06 +0300 Subject: [PATCH 1/7] IGNITE-7935 fake InvokeAll implementation. --- .../jmh/pagemem/JmhBatchUpdatesBenchmark.java | 424 ++++++++++++++++ .../apache/ignite/IgniteSystemProperties.java | 3 + .../ignite/internal/GridKernalContext.java | 4 + .../internal/GridKernalContextImpl.java | 11 + .../apache/ignite/internal/IgniteKernal.java | 3 + .../processors/cache/BatchedCacheEntries.java | 472 ++++++++++++++++++ .../processors/cache/CacheMetricsImpl.java | 9 + .../processors/cache/GridCacheEntryEx.java | 12 + .../processors/cache/GridCacheMapEntry.java | 85 ++++ .../cache/IgniteCacheOffheapManager.java | 33 ++ .../cache/IgniteCacheOffheapManagerImpl.java | 98 +++- .../preloader/GridDhtPartitionDemander.java | 223 ++++++++- .../preloader/GridDhtPartitionSupplier.java | 10 + .../dht/preloader/GridDhtPreloader.java | 5 + .../cache/persistence/DataStructure.java | 21 + .../persistence/GridCacheOffheapManager.java | 21 +- .../IgniteCacheDatabaseSharedManager.java | 5 +- .../cache/persistence/RowStore.java | 27 +- .../freelist/AbstractFreeList.java | 418 +++++++++++++++- .../freelist/CacheFreeListImpl.java | 5 +- .../cache/persistence/freelist/FreeList.java | 10 + .../persistence/metastorage/MetaStorage.java | 2 +- .../cache/persistence/tree/BPlusTree.java | 11 + .../tree/io/AbstractDataPageIO.java | 73 ++- .../persistence/tree/util/PageHandler.java | 95 ++++ .../reader/StandaloneGridKernalContext.java | 6 + .../cache/query/GridCacheQueryManager.java | 10 + .../processors/cache/tree/CacheDataTree.java | 133 +++++ .../datastreamer/DataStreamerImpl.java | 207 +++++++- .../processors/diag/DiagnosticProcessor.java | 166 ++++++ .../processors/diag/DiagnosticTopics.java | 77 +++ .../ignite/internal/util/IgniteTree.java | 31 ++ .../tcp/TcpCommunicationSpi.java | 15 + .../cache/GridCacheTestEntryEx.java | 6 + .../database/CacheFreeListImplSelfTest.java | 2 +- .../FreeListPreloadWithBatchUpdatesTest.java | 389 +++++++++++++++ 36 files changed, 3080 insertions(+), 42 deletions(-) create mode 100644 modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java new file mode 100644 index 0000000000000..522cb1c9ac6b4 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java @@ -0,0 +1,424 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.pagemem; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemander; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPreloader; +import org.apache.ignite.logger.NullLogger; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +/** + * Batch updates in pagemem through preloader. + * + * todo benchmark for internal testing purposes. + */ +@BenchmarkMode(Mode.AverageTime) +@Fork(value = 1, jvmArgsAppend = {"-Xms3g", "-Xmx3g", "-server", "-XX:+AggressiveOpts", "-XX:MaxMetaspaceSize=256m"}) +@Measurement(iterations = 11) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Threads(1) +@Warmup(iterations = 15) +public class JmhBatchUpdatesBenchmark { + /** */ + private static final long DEF_REG_SIZE = 3 * 1024 * 1024 * 1024L; + + /** */ + private static final int BATCH_SIZE = 500; + + /** */ + private static final String REG_BATCH = "batch-region"; + + /** */ + private static final String REG_SINGLE = "single-region"; + + /** */ + private static final String CACHE_BATCH = "batch"; + + /** */ + private static final String CACHE_SINGLE = "single"; + + /** */ + private static final String NODE_NAME = "srv0"; + + /** */ + private static int iteration = 0; + + /** */ + public enum RANGE { + /** */ + r0_4(0, 4), + + /** */ + r4_16(4, 16), + + /** */ + r16_64(16, 64), + + /** */ + r100_200(100, 200), + + /** */ + r200_500(200, 500), + + /** */ + r500_800(500, 800), + + /** */ + r800_1200(800, 1200), + + /** */ + r2000_3000(2_000, 3_000), + + /** */ + r1000_8000(1_000, 8_000), + + /** Large objects only. */ + r4000_16000(4_000, 16_000), + + /** Mixed objects, mostly large objects. */ + r0_32000(100, 32_000); + + /** */ + private final int min; + + /** */ + private final int max; + + /** */ + RANGE(int min, int max) { + this.min = min; + this.max = max; + } + } + + + /** + * Create Ignite configuration. + * + * @return Ignite configuration. + */ + private IgniteConfiguration getConfiguration(String cfgName) { + IgniteConfiguration cfg = new IgniteConfiguration(); + + cfg.setGridLogger(new NullLogger()); + + cfg.setIgniteInstanceName(cfgName); + + DataRegionConfiguration reg1 = new DataRegionConfiguration(); + reg1.setInitialSize(DEF_REG_SIZE); + reg1.setMaxSize(DEF_REG_SIZE); + reg1.setName(REG_BATCH); + + DataRegionConfiguration reg2 = new DataRegionConfiguration(); + reg2.setInitialSize(DEF_REG_SIZE); + reg2.setMaxSize(DEF_REG_SIZE); + reg2.setName(REG_SINGLE); + + DataStorageConfiguration storeCfg = new DataStorageConfiguration(); + + storeCfg.setDataRegionConfigurations(reg1, reg2); + + cfg.setDataStorageConfiguration(storeCfg); + + cfg.setCacheConfiguration(ccfg(false), ccfg(true)); + + return cfg; + } + + /** + * @return Cache configuration. + */ + private CacheConfiguration ccfg(boolean batch) { + return new CacheConfiguration(batch ? CACHE_BATCH : CACHE_SINGLE) + .setAffinity(new RendezvousAffinityFunction(false, 1)) + .setCacheMode(CacheMode.REPLICATED) + .setAtomicityMode(CacheAtomicityMode.ATOMIC) + .setDataRegionName(batch ? REG_BATCH : REG_SINGLE); + } + + + /** + * Test single updates. + * + * @param data Data that will be preloaded. + * @param preloader Data preloader. + */ + @Benchmark + public void checkSingle(Data data, Preloader preloader) throws IgniteCheckedException { + preloader.demanderSingle.preloadEntriesSingle(null, 0, data.singleData, data.cctxSingle.topology().readyTopologyVersion()); + } + + /** + * Test batch updates. + * + * @param data Data that will be preloaded. + * @param preloader Data preloader. + */ + @Benchmark + public void checkBatch(Data data, Preloader preloader) throws IgniteCheckedException { + preloader.demanderBatch.preloadEntriesBatch(null, 0, data.batchData, data.cctxBatch.topology().readyTopologyVersion()); + } + + + /** + * Start 2 servers and 1 client. + */ + @Setup(Level.Trial) + public void setup() { + Ignition.start(getConfiguration(NODE_NAME)); + } + + /** + * Stop all grids after tests. + */ + @TearDown(Level.Trial) + public void tearDown() { + Ignition.stopAll(true); + } + + /** + * Create streamer on client cache. + */ + @State(Scope.Benchmark) + public static class Preloader { + /** */ + final GridDhtPartitionDemander demanderBatch = demander(CACHE_BATCH); + + /** */ + final GridDhtPartitionDemander demanderSingle = demander(CACHE_SINGLE); + + /** */ + GridDhtPartitionDemander demander(String name) { + GridCacheContext cctx = ((IgniteEx)Ignition.ignite(NODE_NAME)).cachex(name).context(); + + GridDhtPreloader preloader = (GridDhtPreloader)cctx.group().preloader(); + + return getFieldValue(preloader, "demander"); + } + + /** + * Get object field value via reflection. + * + * @param obj Object or class to get field value from. + * @param fieldNames Field names to get value for: obj->field1->field2->...->fieldN. + * @param Expected field class. + * @return Field value. + * @throws IgniteException In case of error. + */ + public static T getFieldValue(Object obj, String... fieldNames) throws IgniteException { + assert obj != null; + assert fieldNames != null; + assert fieldNames.length >= 1; + + try { + for (String fieldName : fieldNames) { + Class cls = obj instanceof Class ? (Class)obj : obj.getClass(); + + try { + obj = findField(cls, obj, fieldName); + } + catch (NoSuchFieldException e) { + throw new RuntimeException(e); + } + } + + return (T)obj; + } + catch (IllegalAccessException e) { + throw new IgniteException("Failed to get object field [obj=" + obj + + ", fieldNames=" + Arrays.toString(fieldNames) + ']', e); + } + } + + /** + * @param cls Class for searching. + * @param obj Target object. + * @param fieldName Field name for search. + * @return Field from object if it was found. + */ + private static Object findField(Class cls, Object obj, + String fieldName) throws NoSuchFieldException, IllegalAccessException { + // Resolve inner field. + Field field = cls.getDeclaredField(fieldName); + + boolean accessible = field.isAccessible(); + + if (!accessible) + field.setAccessible(true); + + return field.get(obj); + } + } + + /** + * Prepare and clean collection with streaming data. + */ + @State(Scope.Thread) + public static class Data { + /** */ + @Param + private RANGE range; + + /** */ + private int[] sizes; + + /** */ + Collection batchData = new ArrayList<>(BATCH_SIZE); + + /** */ + Collection singleData = new ArrayList<>(BATCH_SIZE); + + /** */ + GridCacheContext cctxBatch = ((IgniteEx)Ignition.ignite(NODE_NAME)).cachex(CACHE_BATCH).context(); + + /** */ + GridCacheContext cctxSingle = ((IgniteEx)Ignition.ignite(NODE_NAME)).cachex(CACHE_SINGLE).context(); + + /** */ + @Setup(Level.Trial) + public void setup() { + sizes = sizes(range.min, range.max, BATCH_SIZE); + } + + /** + * Prepare collection. + */ + @Setup(Level.Iteration) + public void prepare() { + int iter = iteration++; + + int off = iter * BATCH_SIZE; + + batchData = prepareBatch(cctxBatch, off, BATCH_SIZE, sizes); + singleData = prepareBatch(cctxSingle, off, BATCH_SIZE, sizes); + } + + /** + * Clean collection after each test. + */ + @TearDown(Level.Iteration) + public void cleanCollection() { + batchData = null; + singleData = null; + } + + /** */ + int[] sizes(int minObjSize, int maxObjSize, int batchSize) { + int sizes[] = new int[batchSize]; + int minSize = maxObjSize; + int maxSize = minObjSize; + + int delta = maxObjSize - minObjSize; + + for (int i = 0; i < batchSize; i++) { + int size = sizes[i] = minObjSize + (delta > 0 ? ThreadLocalRandom.current().nextInt(delta) : 0); + + if (size < minSize) + minSize = size; + + if (size > maxSize) + maxSize = size; + } + + return sizes; + } + + /** + * Generates rebalance info objects. + * + * @param cctx Cache context. + * @param off Offset. + * @param cnt Count. + * @param sizes Object sizes. + * @return List of generated objects. + */ + private List prepareBatch(GridCacheContext cctx, int off, int cnt, int[] sizes) { + List infos = new ArrayList<>(); + + for (int i = off; i < off + cnt; i++) { + int size = sizes[i - off]; + + KeyCacheObject key = cctx.toCacheKeyObject(i); + CacheObject val = cctx.toCacheObject(new byte[size]); + + GridCacheEntryInfo info = new GridCacheEntryInfo(); + info.key(key); + info.value(val); + info.cacheId(cctx.cacheId()); + info.version(cctx.shared().versions().startVersion()); + + infos.add(info); + } + + return infos; + } + } + + /** + * Run benchmark. + * + * @param args Args. + */ + public static void main(String[] args) throws RunnerException { + final Options options = new OptionsBuilder() + .include(JmhBatchUpdatesBenchmark.class.getSimpleName()) + .build(); + + new Runner(options).run(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index 9f1e063a90d02..6f9536948a4e3 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -1095,6 +1095,9 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_DISCOVERY_DISABLE_CACHE_METRICS_UPDATE = "IGNITE_DISCOVERY_DISABLE_CACHE_METRICS_UPDATE"; + /** */ + public static final String IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE = "IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE"; + /** * Maximum number of different partitions to be extracted from between expression within sql query. * In case of limit exceeding all partitions will be used. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index 744f85857a203..ead2402741614 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -46,6 +46,7 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; +import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.HadoopHelper; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -483,6 +484,9 @@ public interface GridKernalContext extends Iterable { */ public FailureProcessor failure(); + /** */ + public DiagnosticProcessor diagnostic(); + /** * Print grid kernal memory stats (sizes of internal structures, etc.). * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index 85e02f93ce057..148b700a9227c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -48,6 +48,7 @@ import org.apache.ignite.internal.managers.failover.GridFailoverManager; import org.apache.ignite.internal.managers.indexing.GridIndexingManager; import org.apache.ignite.internal.managers.loadbalancer.GridLoadBalancerManager; +import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; import org.apache.ignite.internal.processors.service.ServiceProcessorAdapter; import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor; @@ -433,6 +434,9 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** Failure processor. */ private FailureProcessor failureProc; + /** */ + private DiagnosticProcessor diagProc; + /** Recovery mode flag. Flag is set to {@code false} when discovery manager started. */ private boolean recoveryMode = true; @@ -598,6 +602,8 @@ else if (comp instanceof GridEncryptionManager) else if (comp instanceof FailureProcessor) failureProc = (FailureProcessor)comp; + else if (comp instanceof DiagnosticProcessor) + diagProc = (DiagnosticProcessor)comp; else if (comp instanceof GridTaskProcessor) taskProc = (GridTaskProcessor)comp; else if (comp instanceof GridJobProcessor) @@ -1220,6 +1226,11 @@ void disconnected(boolean disconnected) { return failureProc; } + /** {@inheritDoc} */ + @Override public DiagnosticProcessor diagnostic() { + return diagProc; + } + /** {@inheritDoc} */ @Override public Thread.UncaughtExceptionHandler uncaughtExceptionHandler() { return hnd; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index 9462c50194bf7..cb6ba9aa81e69 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -145,6 +145,7 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; +import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.Hadoop; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -976,6 +977,8 @@ public void start( startProcessor(new FailureProcessor(ctx)); + startProcessor(new DiagnosticProcessor(ctx)); + startProcessor(new PoolProcessor(ctx)); // Closure processor should be started before all others diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java new file mode 100644 index 0000000000000..e0e5aad22ef6e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java @@ -0,0 +1,472 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Set; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; +import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow; +import org.apache.ignite.internal.processors.cache.tree.DataRow; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.internal.processors.dr.GridDrType; +import org.apache.ignite.internal.util.IgniteTree; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.T3; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.processors.cache.GridCacheMapEntry.ATOMIC_VER_COMPARATOR; +import static org.apache.ignite.internal.util.IgniteTree.OperationType.NOOP; +import static org.apache.ignite.internal.util.IgniteTree.OperationType.PUT; +import static org.apache.ignite.internal.util.IgniteTree.OperationType.REMOVE; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_TREE_FINISH_UPDATE1; + +/** + * Batch of cache entries to optimize page memory processing. + */ +public class BatchedCacheEntries { + /** */ +// private final int partId; + + /** */ + private final GridDhtLocalPartition part; + + /** */ + private final GridCacheContext cctx; + + /** */ + private final LinkedHashMap infos = new LinkedHashMap<>(); + + /** */ + private final AffinityTopologyVersion topVer; + + /** */ + private final boolean preload; + + /** */ + private List entries; + + /** */ + private int skipped; + + /** */ + public BatchedCacheEntries(AffinityTopologyVersion topVer, int partId, GridCacheContext cctx, boolean preload) { + this.topVer = topVer; + this.cctx = cctx; + this.preload = preload; + this.part = cctx.topology().localPartition(partId, topVer, true, true); + } + + /** */ + public void addEntry(KeyCacheObject key, CacheObject val, long expTime, long ttl, GridCacheVersion ver, GridDrType drType) { + // todo remove `key` duplication (Map keys() { + return infos.keySet(); + } + + /** */ + public Collection values() { + return infos.values(); + } + + /** */ +// public int part() { +// return partId; +// } + + /** */ + public GridDhtLocalPartition part() { + return part; + } + + /** */ + public GridCacheContext context() { + return cctx; + } + + /** */ + public BatchedCacheMapEntryInfo get(KeyCacheObject key) { + return infos.get(key); + } + + /** */ + public boolean preload() { + return preload; + } + + /** */ + public boolean needUpdate(KeyCacheObject key, CacheDataRow row) throws GridCacheEntryRemovedException { + BatchedCacheMapEntryInfo info = infos.get(key); + + return info.needUpdate(row); + } + + public void onRemove(KeyCacheObject key) { + // todo - remove from original collection + ++skipped; + } + + public void onError(KeyCacheObject key, IgniteCheckedException e) { + // todo - remove from original collection + ++skipped; + } + + public boolean skip(KeyCacheObject key) { + // todo + return false; + } + + public List lock() { + entries = lockEntries(infos.values(), topVer); + + return entries; + } + + public void unlock() { + unlockEntries(infos.values(), topVer); + } + + public int size() { + return infos.size() - skipped; + } + + private List lockEntries(Collection list, AffinityTopologyVersion topVer) + throws GridDhtInvalidPartitionException { +// if (req.size() == 1) { +// KeyCacheObject key = req.key(0); +// +// while (true) { +// GridDhtCacheEntry entry = entryExx(key, topVer); +// +// entry.lockEntry(); +// +// if (entry.obsolete()) +// entry.unlockEntry(); +// else +// return Collections.singletonList(entry); +// } +// } +// else { + List locked = new ArrayList<>(list.size()); + + while (true) { + for (BatchedCacheMapEntryInfo info : list) { + GridDhtCacheEntry entry = (GridDhtCacheEntry)cctx.cache().entryEx(info.key(), topVer); + + locked.add(entry); + + info.cacheEntry(entry); + } + + boolean retry = false; + + for (int i = 0; i < locked.size(); i++) { + GridCacheMapEntry entry = locked.get(i); + + if (entry == null) + continue; + + // todo ensure free space + // todo check obsolete + + entry.lockEntry(); + + if (entry.obsolete()) { + // Unlock all locked. + for (int j = 0; j <= i; j++) { + if (locked.get(j) != null) + locked.get(j).unlockEntry(); + } + + // Clear entries. + locked.clear(); + + // Retry. + retry = true; + + break; + } + } + + if (!retry) + return locked; + } +// } + } + + /** + * Releases java-level locks on cache entries + * todo carefully think about possible reorderings in locking/unlocking. + * + * @param locked Locked entries. + * @param topVer Topology version. + */ + private void unlockEntries(Collection locked, AffinityTopologyVersion topVer) { + // Process deleted entries before locks release. + assert cctx.deferredDelete() : this; + + // Entries to skip eviction manager notification for. + // Enqueue entries while holding locks. + // todo Common skip list. + Collection skip = null; + + int size = locked.size(); + + try { + for (BatchedCacheMapEntryInfo info : locked) { + GridCacheMapEntry entry = info.cacheEntry(); + + if (entry != null && entry.deleted()) { + if (skip == null) + skip = U.newHashSet(locked.size()); + + skip.add(entry.key()); + } + + try { + info.updateCacheEntry(); + } catch (IgniteCheckedException e) { + skip.add(entry.key()); + } + } + } + finally { + // At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is + // an attempt to use cleaned resources. + // That's why releasing locks in the finally block.. + for (BatchedCacheMapEntryInfo info : locked) { + GridCacheMapEntry entry = info.cacheEntry(); + if (entry != null) + entry.unlockEntry(); + } + } + + // Try evict partitions. + for (BatchedCacheMapEntryInfo info : locked) { + GridDhtCacheEntry entry = info.cacheEntry(); + if (entry != null) + entry.onUnlock(); + } + + if (skip != null && skip.size() == size) + // Optimization. + return; + + // Must touch all entries since update may have deleted entries. + // Eviction manager will remove empty entries. + for (BatchedCacheMapEntryInfo info : locked) { + GridCacheMapEntry entry = info.cacheEntry(); + if (entry != null && (skip == null || !skip.contains(entry.key()))) + entry.touch(); + } + } + + /** */ + public class UpdateClosure implements IgniteCacheOffheapManager.OffheapInvokeAllClosure { + /** */ + private final List> resBatch = new ArrayList<>(entries.size()); + + /** */ + private final int cacheId = context().group().storeCacheIdInDataPage() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID; + + /** */ + private final int partId = part().id(); + + /** {@inheritDoc} */ + @Override public void call(@Nullable Collection> rows) throws IgniteCheckedException { + List newRows = new ArrayList<>(16); + + for (T2 t2 : rows) { + CacheDataRow oldRow = t2.get1(); + + KeyCacheObject key = t2.get2().key(); + + BatchedCacheMapEntryInfo newRowInfo = get(key); + + try { + if (newRowInfo.needUpdate(oldRow)) { + CacheDataRow newRow; + + CacheObject val = newRowInfo.value(); + + if (val != null) { + if (oldRow != null) { + // todo think about batch updates + newRow = context().offheap().dataStore(part()).createRow( + cctx, + key, + newRowInfo.value(), + newRowInfo.version(), + newRowInfo.expireTime(), + oldRow); + } + else { + CacheObjectContext coCtx = cctx.cacheObjectContext(); + // todo why we need this + val.valueBytes(coCtx); + key.valueBytes(coCtx); + + if (key.partition() == -1) + key.partition(partId); + + newRow = new DataRow(key, val, newRowInfo.version(), partId, newRowInfo.expireTime(), cacheId); + + newRows.add(newRow); + } + + IgniteTree.OperationType treeOp = oldRow != null && oldRow.link() == newRow.link() ? + NOOP : PUT; + + resBatch.add(new T3<>(treeOp, oldRow, newRow)); + } + else { + // todo we should pass key somehow to remove old row (because in particular case oldRow should not contain key) + newRow = new DataRow(key, null, null, 0, 0, 0); + + resBatch.add(new T3<>(oldRow != null ? REMOVE : NOOP, oldRow, newRow)); + } + } + } + catch (GridCacheEntryRemovedException e) { + onRemove(key); + } + } + + if (!newRows.isEmpty()) + context().offheap().dataStore(part()).rowStore().addRows(newRows, cctx.group().statisticsHolderData()); + } + + @Override public Collection> result() { + return resBatch; + } + + @Override public boolean apply(CacheDataRow row) { + return false; + } + } + + public static class BatchedCacheMapEntryInfo { + // todo think about remove + private final BatchedCacheEntries batch; + private final KeyCacheObject key; + private final CacheObject val; + private final long expTime; + private final long ttl; + private final GridCacheVersion ver; + private final GridDrType drType; + + private GridDhtCacheEntry entry; + + private boolean update; + + public BatchedCacheMapEntryInfo( + BatchedCacheEntries batch, + KeyCacheObject key, + CacheObject val, + long expTime, + long ttl, + GridCacheVersion ver, + GridDrType drType + ) { + this.batch = batch; + this.key = key; + this.val = val; + this.expTime = expTime; + this.ver = ver; + this.drType = drType; + this.ttl = ttl; + } + + public KeyCacheObject key() { + return key; + } + + public GridCacheVersion version() { + return ver; + } + + public CacheObject value() { + return val; + } + + public long expireTime() { + return expTime; + } + + public GridDhtCacheEntry cacheEntry() { + return entry; + } + + public void cacheEntry(GridDhtCacheEntry entry) { + this.entry = entry; + } + + public void updateCacheEntry() throws IgniteCheckedException { + if (!update) + return; + + entry.finishPreload(val, expTime, ttl, ver, batch.topVer, drType, null, batch.preload); + } + +// public void update(boolean update) { +// this.update = update; +// } + + public boolean needUpdate(CacheDataRow row) throws GridCacheEntryRemovedException { + GridCacheVersion currVer = row != null ? row.version() : entry.version(); + + GridCacheContext cctx = batch.context(); + + boolean isStartVer = cctx.versions().isStartVersion(currVer); + + boolean update0; + + if (cctx.group().persistenceEnabled()) { + if (!isStartVer) { + if (cctx.atomic()) + update0 = ATOMIC_VER_COMPARATOR.compare(currVer, version()) < 0; + else + update0 = currVer.compareTo(version()) < 0; + } + else + update0 = true; + } + else + update0 = (isStartVer && row == null); + + // todo update0 |= (!preload && deletedUnlocked()); + + update = update0; + + return update0; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java index 8ce21c59de3d1..aa88b2e1bb04e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java @@ -1163,6 +1163,15 @@ public void onRebalanceKeyReceived() { rebalancingKeysRate.onHit(); } + /** + * Rebalance entry store callback. + */ + public void onRebalanceKeysReceived(long batchSize) { + rebalancedKeys.addAndGet(batchSize); + + rebalancingKeysRate.onHits(batchSize); + } + /** * Rebalance supply message callback. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java index 9aec3996c3204..cf8eef32c3f5a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java @@ -805,6 +805,18 @@ public boolean initialValue(CacheObject val, GridDrType drType, boolean fromStore) throws IgniteCheckedException, GridCacheEntryRemovedException; + + public void finishPreload( + @Nullable CacheObject val, + long expTime, + long ttl, + GridCacheVersion ver, + AffinityTopologyVersion topVer, + GridDrType drType, + MvccVersion mvccVer, + boolean preload + ) throws IgniteCheckedException; + /** * Create versioned entry for this cache entry. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index a3eda189b8518..87d31502a188a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -3504,6 +3504,91 @@ else if (deletedUnlocked()) } } + /** {@inheritDoc} */ + @Override public void finishPreload( + @Nullable CacheObject val, + long expTime, + long ttl, + GridCacheVersion ver, + AffinityTopologyVersion topVer, + GridDrType drType, + MvccVersion mvccVer, + boolean preload + ) throws IgniteCheckedException { + boolean fromStore = false; + boolean walEnabled = !cctx.isNear() && cctx.group().persistenceEnabled() && cctx.group().walEnabled(); + + update(val, expTime, ttl, ver, true); + + boolean skipQryNtf = false; + + if (val == null) { + skipQryNtf = true; + + if (cctx.deferredDelete() && !deletedUnlocked() && !isInternal()) + deletedUnlocked(true); + } + else if (deletedUnlocked()) + deletedUnlocked(false); + + long updateCntr = 0; + + if (!preload) + updateCntr = nextPartitionCounter(topVer, true, null); + + if (walEnabled) { + if (cctx.mvccEnabled()) { + cctx.shared().wal().log(new MvccDataRecord(new MvccDataEntry( + cctx.cacheId(), + key, + val, + val == null ? DELETE : GridCacheOperation.CREATE, + null, + ver, + expTime, + partition(), + updateCntr, + mvccVer == null ? MvccUtils.INITIAL_VERSION : mvccVer + ))); + } else { + cctx.shared().wal().log(new DataRecord(new DataEntry( + cctx.cacheId(), + key, + val, + val == null ? DELETE : GridCacheOperation.CREATE, + null, + ver, + expTime, + partition(), + updateCntr + ))); + } + } + + drReplicate(drType, val, ver, topVer); + + if (!skipQryNtf) { + cctx.continuousQueries().onEntryUpdated( + key, + val, + null, + this.isInternal() || !this.context().userCache(), + this.partition(), + true, + true, + updateCntr, + null, + topVer); + } + + onUpdateFinished(updateCntr); + + if (!fromStore && cctx.store().isLocal()) { + if (val != null) + cctx.store().put(null, key, val, ver); + } + } + /** * @param cntr Updated partition counter. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java index b7e8ec717fc38..ac529633edb1d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache; +import java.util.Collection; import java.util.List; import java.util.Map; import javax.cache.Cache; @@ -34,6 +35,7 @@ import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; import org.apache.ignite.internal.processors.cache.persistence.partstate.PartitionRecoverState; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; +import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult; import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow; @@ -47,6 +49,8 @@ import org.apache.ignite.internal.util.lang.GridIterator; import org.apache.ignite.internal.util.lang.IgniteInClosure2X; import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.lang.IgnitePredicate; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** @@ -188,6 +192,20 @@ public boolean expire(GridCacheContext cctx, IgniteInClosure2X keys, + GridDhtLocalPartition part, + OffheapInvokeAllClosure c + ) throws IgniteCheckedException; + /** * @param cctx Cache context. * @param key Key. @@ -579,6 +597,13 @@ interface OffheapInvokeClosure extends IgniteTree.InvokeClosure { @Nullable public CacheDataRow oldRow(); } + /** + * + */ + interface OffheapInvokeAllClosure extends IgniteTree.InvokeAllClosure, IgnitePredicate { +// boolean preload(); + } + /** * */ @@ -861,6 +886,14 @@ MvccUpdateResult mvccLock( */ public void invoke(GridCacheContext cctx, KeyCacheObject key, OffheapInvokeClosure c) throws IgniteCheckedException; + /** + * @param cctx Cache context. + * @param keys Keys. + * @param c Closure. + * @throws IgniteCheckedException If failed. + */ + public void invokeAll(GridCacheContext cctx, Collection keys, OffheapInvokeAllClosure c) throws IgniteCheckedException; + /** * * @param cctx Cache context. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 30fcb7c7631ab..59708b9b53cef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -18,10 +18,12 @@ package org.apache.ignite.internal.processors.cache; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; @@ -103,6 +105,7 @@ import org.apache.ignite.internal.util.lang.GridIterator; import org.apache.ignite.internal.util.lang.IgniteInClosure2X; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T3; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; @@ -447,6 +450,16 @@ private Iterator cacheData(boolean primary, boolean backup, Affi dataStore(part).invoke(cctx, key, c); } + /** {@inheritDoc} */ + @Override public void invokeAll( + GridCacheContext cctx, + Collection keys, + GridDhtLocalPartition part, + OffheapInvokeAllClosure c) + throws IgniteCheckedException { + dataStore(part).invokeAll(cctx, keys, c); + } + /** {@inheritDoc} */ @Override public void update( GridCacheContext cctx, @@ -1589,6 +1602,8 @@ void decrementSize(int cacheId) { * @param dataRow New row. * @return {@code True} if it is possible to update old row data. * @throws IgniteCheckedException If failed. + * + * todo think about this meth */ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow oldRow, DataRow dataRow) throws IgniteCheckedException { @@ -1599,7 +1614,7 @@ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow ol return false; // Use grp.sharedGroup() flag since it is possible cacheId is not yet set here. - boolean sizeWithCacheId = grp.sharedGroup(); +// boolean sizeWithCacheId = grp.sharedGroup(); int oldLen = oldRow.size(); @@ -1627,6 +1642,20 @@ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow ol } } + + /** {@inheritDoc} */ + @Override public void invokeAll(GridCacheContext cctx, Collection keys, OffheapInvokeAllClosure c) + throws IgniteCheckedException { + int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID; + + List searchRows = new ArrayList<>(keys.size()); + + for (KeyCacheObject key : keys) + searchRows.add(new SearchRow(cacheId, key)); + + invokeAll0(cctx, searchRows, c); + } + /** * @param cctx Cache context. * @param row Search row. @@ -1666,6 +1695,62 @@ private void invoke0(GridCacheContext cctx, CacheSearchRow row, OffheapInvokeClo } } + /** + * @param cctx Cache context. + * @param rows Search rows. + * @param c Closure. + * @throws IgniteCheckedException If failed. + */ + private void invokeAll0(GridCacheContext cctx, List rows, OffheapInvokeAllClosure c) + throws IgniteCheckedException { + if (!busyLock.enterBusy()) + throw new NodeStoppingException("Operation has been cancelled (node is stopping)."); + + try { + assert cctx.shared().database().checkpointLockIsHeldByThread(); + +// ctx.kernalContext().diagnostic().beginTrack(PRELOAD_TREE_INVOKE); + + dataTree.invokeAll(rows, CacheDataRowAdapter.RowData.NO_KEY, c); + +// ctx.kernalContext().diagnostic().endTrack(PRELOAD_TREE_INVOKE); + + for (T3 tuple : c.result()) { + IgniteTree.OperationType opType = tuple.get1(); + + CacheDataRow oldRow = tuple.get2(); + + CacheDataRow newRow = tuple.get3(); + + switch (opType) { + case PUT: { + assert newRow != null : tuple; + + finishUpdate(cctx, newRow, oldRow); + break; + } + + case REMOVE: { + finishRemove(cctx, newRow.key(), oldRow); + + break; + } + + case NOOP: + break; + + default: + assert false : opType; + } + } + + + } + finally { + busyLock.leaveBusy(); + } + } + /** {@inheritDoc} */ @Override public CacheDataRow createRow( GridCacheContext cctx, @@ -1705,7 +1790,7 @@ private void invoke0(GridCacheContext cctx, CacheSearchRow row, OffheapInvokeClo * @param cacheId Cache id. * @return Made data row. */ - @NotNull private DataRow makeDataRow(KeyCacheObject key, CacheObject val, GridCacheVersion ver, long expireTime, + @NotNull public DataRow makeDataRow(KeyCacheObject key, CacheObject val, GridCacheVersion ver, long expireTime, int cacheId) { if (key.partition() == -1) key.partition(partId); @@ -2533,13 +2618,17 @@ private void updatePendingEntries(GridCacheContext cctx, CacheDataRow newRow, @N if (oldRow != null) { assert oldRow.link() != 0 : oldRow; - if (pendingTree() != null && oldRow.expireTime() != 0) + if (pendingTree() != null && oldRow.expireTime() != 0) { +// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_PENDING_TREE_REMOVE); pendingTree().removex(new PendingRow(cacheId, oldRow.expireTime(), oldRow.link())); +// cctx.kernalContext().diagnostic().endTrack(PRELOAD_PENDING_TREE_REMOVE); + } } if (pendingTree() != null && expireTime != 0) { +// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_PENDING_TREE_PUT); pendingTree().putx(new PendingRow(cacheId, expireTime, newRow.link())); - +// cctx.kernalContext().diagnostic().endTrack(PRELOAD_PENDING_TREE_PUT); hasPendingEntries = true; } } @@ -2810,6 +2899,7 @@ private void afterRowFound(@Nullable CacheDataRow row, KeyCacheObject key) throw dataTree.destroy(new IgniteInClosure() { @Override public void apply(CacheSearchRow row) { try { +// log.info("Remove row: " + row.key().hashCode() + " link " + row.link()); rowStore.removeRow(row.link(), grp.statisticsHolderData()); } catch (IgniteCheckedException e) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index 61f1e06f9dccb..3b6b24b6f1f97 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cache.CacheRebalanceMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; @@ -40,6 +41,7 @@ import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.BatchedCacheEntries; import org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheMetricsImpl; @@ -79,6 +81,10 @@ import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STARTED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STOPPED; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG; +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH; +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_SINGLE; +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.TOTAL; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_PRELOAD; @@ -86,6 +92,16 @@ * Thread pool for requesting partitions from other nodes and populating local cache. */ public class GridDhtPartitionDemander { + /** todo explain the origin */ + private static final int BATCH_PRELOAD_THRESHOLD = 5; + + /** */ + private static final int CHECKPOINT_THRESHOLD = 200; + + /** */ + private static final boolean batchPageWriteEnabled = + IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, false); + /** */ private final GridCacheSharedContext ctx; @@ -462,8 +478,8 @@ private void requestPartitions(final RebalanceFuture fut, GridDhtPreloaderAssign parts = fut.remaining.get(node.id()); U.log(log, "Prepared rebalancing [grp=" + grp.cacheOrGroupName() - + ", mode=" + cfg.getRebalanceMode() + ", supplier=" + node.id() + ", partitionsCount=" + parts.size() - + ", topVer=" + fut.topologyVersion() + ", parallelism=" + totalStripes + "]"); + + ", mode=" + cfg.getRebalanceMode() + ", supplier=" + node.id() + ", partitionsCount=" + parts.size() + + ", topVer=" + fut.topologyVersion() + ", parallelism=" + totalStripes + "]"); } int stripes = totalStripes; @@ -472,6 +488,8 @@ private void requestPartitions(final RebalanceFuture fut, GridDhtPreloaderAssign for (int i = 0; i < stripes; i++) stripePartitions.add(new IgniteDhtDemandedPartitionsMap()); + ctx.kernalContext().diagnostic().beginTrack(TOTAL); + // Reserve one stripe for historical partitions. if (parts.hasHistorical()) { stripePartitions.set(stripes - 1, new IgniteDhtDemandedPartitionsMap(parts.historicalMap(), null)); @@ -655,6 +673,8 @@ public void handleSupplyMessage( final UUID nodeId, final GridDhtPartitionSupplyMessage supplyMsg ) { + ctx.kernalContext().diagnostic().beginTrack(DEMANDER_PROCESS_MSG); + AffinityTopologyVersion topVer = supplyMsg.topologyVersion(); final RebalanceFuture fut = rebalanceFut; @@ -766,10 +786,10 @@ public void handleSupplyMessage( part.lock(); try { - Iterator infos = e.getValue().infos().iterator(); + Collection infos = e.getValue().infos(); if (grp.mvccEnabled()) - mvccPreloadEntries(topVer, node, p, infos); + mvccPreloadEntries(topVer, node, p, infos.iterator()); else preloadEntries(topVer, node, p, infos); @@ -848,6 +868,130 @@ public void handleSupplyMessage( catch (IgniteSpiException | IgniteCheckedException e) { LT.error(log, e, "Error during rebalancing [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + ", err=" + e + ']'); + } finally { + ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG); + } + } + + /** + * todo should be removed (kept for benchamrking) + */ + public void preloadEntriesSingle(ClusterNode from, + int p, + Collection entries, + AffinityTopologyVersion topVer + ) throws IgniteCheckedException { + GridCacheContext cctx = null; + + ctx.kernalContext().diagnostic().beginTrack(DEMANDER_PROCESS_MSG_SINGLE); + try { + // Loop through all received entries and try to preload them. + for (GridCacheEntryInfo entry : entries) { + + if (cctx == null || (grp.sharedGroup() && entry.cacheId() != cctx.cacheId())) { + cctx = grp.sharedGroup() ? grp.shared().cacheContext(entry.cacheId()) : grp.singleCacheContext(); + + if (cctx == null) + continue; + else if (cctx.isNear()) + cctx = cctx.dhtCache().context(); + } + + if (!preloadEntry(from, p, entry, topVer, cctx)) { + if (log.isTraceEnabled()) + log.trace("Got entries for invalid partition during " + + "preloading (will skip) [p=" + p + ", entry=" + entry + ']'); + + break; + } + + for (GridCacheContext cctx0 : grp.caches()) { + if (cctx0.statisticsEnabled()) + cctx0.cache().metrics0().onRebalanceKeyReceived(); + } + } + } finally { + ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG_SINGLE); + } + } + + /** + * @param from Node which sent entry. + * @param p Partition id. + * @param entries Preloaded entries. + * @param topVer Topology version. + * + * @throws IgniteCheckedException If failed. + */ + public void preloadEntriesBatch(ClusterNode from, + int p, + Collection entries, + AffinityTopologyVersion topVer + ) throws IgniteCheckedException { + ctx.kernalContext().diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH); + + try { + if (entries.isEmpty()) + return; + + Map cctxMap = new HashMap<>(); + + // Map by context. + for (GridCacheEntryInfo info : entries) { + try { + GridCacheContext cctx0 = grp.sharedGroup() ? ctx.cacheContext(info.cacheId()) : grp.singleCacheContext(); + + if (cctx0 == null) + return; + + if (cctx0.isNear()) + cctx0 = cctx0.dhtCache().context(); + + final GridCacheContext cctx = cctx0; + + if (log.isTraceEnabled()) + log.trace("Rebalancing key [key=" + info.key() + ", part=" + p + ", node=" + from.id() + ']'); + + BatchedCacheEntries batch = cctxMap.get(cctx.cacheId()); + + if (batch == null) { + // todo lock should be called for ALL group + cctx.group().listenerLock().readLock().lock(); + + cctxMap.put(cctx.cacheId(), batch = new BatchedCacheEntries(topVer, p, cctx, true)); + } + + batch.addEntry(info.key(), info.value(), info.expireTime(), info.ttl(), info.version(), DR_PRELOAD); + } + catch (GridDhtInvalidPartitionException ignored) { + if (log.isDebugEnabled()) + log.debug("Partition became invalid during rebalancing (will ignore): " + p); + } + } + + for (BatchedCacheEntries batch : cctxMap.values()) { + assert batch.size() > BATCH_PRELOAD_THRESHOLD : batch.size(); + + GridCacheContext cctx = batch.context(); + + batch.lock(); + + try { + cctx.offheap().invokeAll(cctx, batch.keys(), batch.part(), batch.new UpdateClosure()); + } + finally { + batch.unlock(); + + cctx.group().listenerLock().readLock().unlock(); + + for (GridCacheContext cctx0 : grp.caches()) { + if (cctx0.statisticsEnabled()) + cctx0.cache().metrics0().onRebalanceKeysReceived(batch.size()); + } + } + } + } finally { + ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH); } } @@ -942,14 +1086,52 @@ private void mvccPreloadEntries(AffinityTopologyVersion topVer, ClusterNode node * * @param node Node which sent entry. * @param p Partition id. - * @param infos Entries info for preload. + * @param infosCol Entries info for preload. * @param topVer Topology version. * @throws IgniteInterruptedCheckedException If interrupted. */ private void preloadEntries(AffinityTopologyVersion topVer, ClusterNode node, int p, - Iterator infos) throws IgniteCheckedException { + Collection infosCol) throws IgniteCheckedException { GridCacheContext cctx = null; + int size = infosCol.size(); + + boolean batchEnabled = + batchPageWriteEnabled && size > BATCH_PRELOAD_THRESHOLD; + + int nBatch = 0; + int total = size / CHECKPOINT_THRESHOLD; + + Iterator infos = infosCol.iterator(); + + // Loop through all received entries and try to preload them. + while (infos.hasNext()) { + ctx.database().checkpointReadLock(); + + boolean tail = (nBatch++ >= (total - 1)); + + try { + List infosBatch = new ArrayList<>(CHECKPOINT_THRESHOLD); + + for (int i = 0; i < (tail ? CHECKPOINT_THRESHOLD + (size % CHECKPOINT_THRESHOLD) : CHECKPOINT_THRESHOLD); i++) { + if (!infos.hasNext()) + break; + + GridCacheEntryInfo entry = infos.next(); + + infosBatch.add(entry); + } + + if (batchEnabled && infosBatch.size() > BATCH_PRELOAD_THRESHOLD) + preloadEntriesBatch(node, p, infosBatch, topVer); + else + preloadEntriesSingle(node, p, infosBatch, topVer); + } + finally { + ctx.database().checkpointReadUnlock(); + } + } + // Loop through all received entries and try to preload them. while (infos.hasNext()) { ctx.database().checkpointReadLock(); @@ -1011,6 +1193,8 @@ private boolean preloadEntry( ) throws IgniteCheckedException { assert ctx.database().checkpointLockIsHeldByThread(); +// ctx.kernalContext().diagnostic().beginTrack(PRELOAD_ENTRY); + try { GridCacheEntryEx cached = null; @@ -1227,8 +1411,20 @@ public static class RebalanceFuture extends GridFutureAdapter { this.rebalanceId = rebalanceId; ctx = grp.shared(); + +// ctx.kernalContext().diagnostic().beginTrack(TOTAL); } +// @Override protected boolean onDone(@Nullable Boolean res, @Nullable Throwable err, boolean cancel) { +// if (ctx != null) { // can be dummy +// ctx.kernalContext().diagnostic().endTrack(TOTAL); +// +// ctx.kernalContext().diagnostic().printStats(); +// } +// +// return super.onDone(res, err, cancel); +// } + /** * Dummy future. Will be done by real one. */ @@ -1387,12 +1583,18 @@ private void partitionDone(UUID nodeId, int p, boolean updateState) { int remainingRoutines = remaining.size() - 1; U.log(log, "Completed " + ((remainingRoutines == 0 ? "(final) " : "") + - "rebalancing [grp=" + grp.cacheOrGroupName() + - ", supplier=" + nodeId + - ", topVer=" + topologyVersion() + - ", progress=" + (routines - remainingRoutines) + "/" + routines + "]")); + "rebalancing [grp=" + grp.cacheOrGroupName() + + ", supplier=" + nodeId + + ", topVer=" + topologyVersion() + + ", progress=" + (routines - remainingRoutines) + "/" + routines + "," + + ", batch=" + batchPageWriteEnabled + "]")); remaining.remove(nodeId); + + ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG); + + ctx.kernalContext().diagnostic().endTrack(TOTAL); + ctx.kernalContext().diagnostic().printStats(); } checkIsDone(); @@ -1494,3 +1696,4 @@ private void sendRebalanceFinishedEvent() { } } } + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java index 514f8fd6f90e3..18f55e7cb5091 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java @@ -55,6 +55,8 @@ import org.apache.ignite.spi.IgniteSpiException; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.SUPPLIER_PROCESS_MSG; +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.TOTAL; /** * Class for supplying partitions to demanding nodes. @@ -255,6 +257,10 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand long maxBatchesCnt = grp.config().getRebalanceBatchesPrefetchCount(); if (sctx == null) { + grp.shared().kernalContext().diagnostic().beginTrack(TOTAL); + + grp.shared().kernalContext().diagnostic().beginTrack(SUPPLIER_PROCESS_MSG); + if (log.isDebugEnabled()) log.debug("Starting supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", fullPartitions=" + S.compact(demandMsg.partitions().fullSet()) + @@ -429,6 +435,10 @@ else if (iter.isPartitionMissing(p)) { if (log.isInfoEnabled()) log.info("Finished supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]"); + + grp.shared().kernalContext().diagnostic().endTrack(SUPPLIER_PROCESS_MSG); + grp.shared().kernalContext().diagnostic().endTrack(TOTAL); + grp.shared().kernalContext().diagnostic().printStats(); } catch (Throwable t) { if (grp.shared().kernalContext().isStopping()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java index 042e0eaa484cb..9ba9fcd688bc4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java @@ -58,6 +58,7 @@ import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.SUPPLIER_PROCESS_MSG; /** * DHT cache preloader. @@ -387,11 +388,15 @@ private List remoteOwners(int p, AffinityTopologyVersion topVer) { if (!enterBusy()) return; + grp.shared().kernalContext().diagnostic().beginTrack(SUPPLIER_PROCESS_MSG); + try { supplier.handleDemandMessage(idx, id, d); } finally { leaveBusy(); + + grp.shared().kernalContext().diagnostic().endTrack(SUPPLIER_PROCESS_MSG); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java index 35dd3c46ee431..ab9bf86913bdd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache.persistence; +import java.util.Collection; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; import org.apache.ignite.IgniteCheckedException; @@ -307,6 +308,26 @@ protected final R write( return PageHandler.writePage(pageMem, grpId, pageId, this, h, init, wal, null, arg, intArg, lockFailed, statHolder); } + /** + * @param pageId Page ID. + * @param h Handler. + * @param init IO for new page initialization or {@code null} if it is an existing page. + * @param arg Argument. + * @param lockFailed Result in case of lock failure due to page recycling. + * @param statHolder Statistics holder to track IO operations. + * @return Handler result. + * @throws IgniteCheckedException If failed. + */ + protected final R write( + long pageId, + PageHandler h, + PageIO init, + Collection arg, + R lockFailed, + IoStatisticsHolder statHolder) throws IgniteCheckedException { + return PageHandler.writePageBatch(pageMem, grpId, pageId, this, h, init, wal, null, arg, lockFailed, statHolder); + } + /** * @param pageId Page ID. * @param h Handler. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 26a535ca17a84..bbe631426986c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache.persistence; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -29,6 +30,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import javax.cache.processor.EntryProcessor; +import javax.naming.OperationNotSupportedException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.failure.FailureContext; @@ -50,12 +52,14 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PartitionDestroyRecord; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.BatchedCacheEntries; import org.apache.ignite.internal.processors.cache.CacheEntryPredicate; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryEx; import org.apache.ignite.internal.processors.cache.GridCacheMvccEntryInfo; +import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo; import org.apache.ignite.internal.processors.cache.GridCacheTtlManager; import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManagerImpl; import org.apache.ignite.internal.processors.cache.KeyCacheObject; @@ -83,6 +87,7 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer; import org.apache.ignite.internal.processors.cache.tree.CacheDataRowStore; import org.apache.ignite.internal.processors.cache.tree.CacheDataTree; +import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; import org.apache.ignite.internal.processors.cache.tree.PendingRow; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult; @@ -97,6 +102,7 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; @@ -1493,7 +1499,8 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException null, ctx.wal(), reuseRoot.pageId().pageId(), - reuseRoot.isAllocated()) { + reuseRoot.isAllocated(), + ctx.kernalContext()) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { assert grp.shared().database().checkpointLockIsHeldByThread(); @@ -1967,7 +1974,7 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { return delegate.mvccInitialValue(cctx, key, val, ver, expireTime, mvccVer, newMvccVer); } - + /** {@inheritDoc} */ @Override public boolean mvccApplyHistoryIfAbsent( GridCacheContext cctx, @@ -2107,6 +2114,16 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { delegate.invoke(cctx, key, c); } + /** {@inheritDoc} */ + @Override public void invokeAll(GridCacheContext cctx, Collection keys, OffheapInvokeAllClosure c) + throws IgniteCheckedException { + assert ctx.database().checkpointLockIsHeldByThread(); + + CacheDataStore delegate = init0(false); + + delegate.invokeAll(cctx, keys, c); + } + /** {@inheritDoc} */ @Override public void remove(GridCacheContext cctx, KeyCacheObject key, int partId) throws IgniteCheckedException { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 7fc70d0b8923d..b4a6a4a219f94 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -248,13 +248,14 @@ protected void initPageMemoryDataStructures(DataStorageConfiguration dbCfg) thro boolean persistenceEnabled = memPlcCfg.isPersistenceEnabled(); CacheFreeListImpl freeList = new CacheFreeListImpl(0, - cctx.igniteInstanceName(), + memPlc.config().getName(), memMetrics, memPlc, null, persistenceEnabled ? cctx.wal() : null, 0L, - true); + true, + cctx.kernalContext()); freeListMap.put(memPlcCfg.getName(), freeList); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java index 91fd2070cc048..3e43fba9c620f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache.persistence; +import java.util.Collection; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.CacheGroupContext; @@ -27,6 +28,10 @@ import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner; import org.apache.ignite.internal.stat.IoStatisticsHolder; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_OFFHEAP_INVOKE_INSERT_FREELIST; + +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_FREELIST_REMOVE; + /** * Data store for H2 rows. */ @@ -80,11 +85,12 @@ public void removeRow(long link, IoStatisticsHolder statHolder) throws IgniteChe freeList.removeDataRowByLink(link, statHolder); else { ctx.database().checkpointReadLock(); - +// ctx.kernalContext().diagnostic().beginTrack(PRELOAD_FREELIST_REMOVE); try { freeList.removeDataRowByLink(link, statHolder); } finally { +// ctx.kernalContext().diagnostic().endTrack(PRELOAD_FREELIST_REMOVE); ctx.database().checkpointReadUnlock(); } } @@ -111,6 +117,25 @@ public void addRow(CacheDataRow row, IoStatisticsHolder statHolder) throws Ignit } } + /** + * @param rows Rows. + * @throws IgniteCheckedException If failed. + */ + public void addRows(Collection rows, IoStatisticsHolder statHolder) throws IgniteCheckedException { + if (!persistenceEnabled) + freeList.insertDataRows(rows, statHolder); + else { + ctx.database().checkpointReadLock(); + + try { + freeList.insertDataRows(rows, statHolder); + } + finally { + ctx.database().checkpointReadUnlock(); + } + } + } + /** * @param link Row link. * @param row New row data. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java index 60aefb927ce6f..254c38abbaf04 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java @@ -17,9 +17,17 @@ package org.apache.ignite.internal.processors.cache.persistence.freelist; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicReferenceArray; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageUtils; @@ -30,6 +38,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageUpdateRecord; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.IndexStorageImpl; import org.apache.ignite.internal.processors.cache.persistence.Storable; import org.apache.ignite.internal.processors.cache.persistence.evict.PageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.tree.io.AbstractDataPageIO; @@ -42,8 +51,16 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; +import org.apache.ignite.internal.util.lang.GridTuple3; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.T3; import org.apache.ignite.internal.util.typedef.internal.U; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_BIN_PACK; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH; + /** */ public abstract class AbstractFreeList extends PagesList implements FreeList, ReuseList { @@ -86,6 +103,9 @@ public abstract class AbstractFreeList extends PagesList imp /** */ private final PageEvictionTracker evictionTracker; + /** */ + private final GridKernalContext ctx; + /** * */ @@ -133,12 +153,15 @@ private final class UpdateRowHandler extends PageHandler { /** */ private final PageHandler writeRow = new WriteRowHandler(); + /** */ + private final PageHandler writeRows = new WriteRowHandlerBatch(); + /** * */ - private final class WriteRowHandler extends PageHandler { - @Override public Integer run( - int cacheId, + private class WriteRowHandler extends PageHandler { + /** {@inheritDoc} */ + @Override public Integer run(int cacheId, long pageId, long page, long pageAddr, @@ -146,6 +169,33 @@ private final class WriteRowHandler extends PageHandler { Boolean walPlc, T row, int written, + IoStatisticsHolder statHolder + ) throws IgniteCheckedException { + written = run0(pageId, page, pageAddr, iox, row, written, statHolder); + + putPage((AbstractDataPageIO)iox, pageId, page, pageAddr, statHolder); + + return written; + } + + /** + * @param pageId Page ID. + * @param page Page absolute pointer. + * @param pageAddr Page address. + * @param iox IO. + * @param row Data row. + * @param written Count of bytes written. + * @param statHolder Statistics holder to track IO operations. + * @return Result. + * @throws IgniteCheckedException If failed. + */ + protected Integer run0( + long pageId, + long page, + long pageAddr, + PageIO iox, + T row, + int written, IoStatisticsHolder statHolder) throws IgniteCheckedException { AbstractDataPageIO io = (AbstractDataPageIO)iox; @@ -156,18 +206,11 @@ private final class WriteRowHandler extends PageHandler { assert oldFreeSpace > 0 : oldFreeSpace; // If the full row does not fit into this page write only a fragment. +// System.out.println(">xxx> free=" + oldFreeSpace + ", rowSize=" + rowSize + " hash=" + row.hashCode()); + written = (written == 0 && oldFreeSpace >= rowSize) ? addRow(pageId, page, pageAddr, io, row, rowSize) : addRowFragment(pageId, page, pageAddr, io, row, written, rowSize); - // Reread free space after update. - int newFreeSpace = io.getFreeSpace(pageAddr); - - if (newFreeSpace > MIN_PAGE_FREE_SPACE) { - int bucket = bucket(newFreeSpace, false); - - put(null, pageId, page, pageAddr, bucket, statHolder); - } - if (written == rowSize) evictionTracker.touchPage(pageId); @@ -185,7 +228,7 @@ private final class WriteRowHandler extends PageHandler { * @return Written size which is always equal to row size here. * @throws IgniteCheckedException If failed. */ - private int addRow( + protected int addRow( long pageId, long page, long pageAddr, @@ -225,7 +268,7 @@ private int addRow( * @return Updated written size. * @throws IgniteCheckedException If failed. */ - private int addRowFragment( + protected int addRowFragment( long pageId, long page, long pageAddr, @@ -254,6 +297,83 @@ private int addRowFragment( return written + payloadSize; } + + /** + * Put page to freelist if needed. + * + * @param iox IO. + * @param pageId Page ID. + * @param page Paege pointer. + * @param pageAddr Page address. + * @param statHolder Statistics holder to track IO operations. + */ + protected void putPage( + AbstractDataPageIO iox, + long pageId, + long page, + long pageAddr, + IoStatisticsHolder statHolder + ) throws IgniteCheckedException { + // Reread free space after update. + int newFreeSpace = ((AbstractDataPageIO)iox).getFreeSpace(pageAddr); + + if (newFreeSpace > MIN_PAGE_FREE_SPACE) { + int bucket = bucket(newFreeSpace, false); + + put(null, pageId, page, pageAddr, bucket, statHolder); + } + } + } + + /** + * + */ + private class WriteRowHandlerBatch extends WriteRowHandler { + /** {@inheritDoc} */ + @Override public Integer runBatch( + int cacheId, + long pageId, + long page, + long pageAddr, + PageIO io, + Boolean walPlc, + Collection args, + IoStatisticsHolder statHolder + ) throws IgniteCheckedException { + int maxPayloadSize = pageSize() - AbstractDataPageIO.MIN_DATA_PAGE_OVERHEAD; + + AbstractDataPageIO iox = (AbstractDataPageIO)io; + + // todo !! DO NOT FORGET WAL DELTA !! + if (iox.getFreeSpace(pageAddr) == maxPayloadSize) { + // todo save links for WAL + + iox.addRows(pageMem, pageId, pageAddr, args, pageSize()); + + // todo update wal + } + else { + for (T row : args) { + assert iox.getFreeSpace(pageAddr) > 0 : iox.getFreeSpace(pageAddr); + + int size = row.size(); + + int written = size > maxPayloadSize ? + addRowFragment(pageId, page, pageAddr, iox, row, size - (size % maxPayloadSize), size) : + addRow(pageId, page, pageAddr, iox, row, size); + + assert written == size : "The object is not fully written into page: " + + "pageId=" + pageId + ", written=" + written + ", size=" + row.size(); + + evictionTracker.touchPage(pageId); + } + } + + // return page to freelist if needed + putPage((AbstractDataPageIO)io, pageId, page, pageAddr, statHolder); + + return COMPLETE; + } } /** */ @@ -345,7 +465,8 @@ public AbstractFreeList( ReuseList reuseList, IgniteWriteAheadLogManager wal, long metaPageId, - boolean initNew) throws IgniteCheckedException { + boolean initNew, + GridKernalContext ctx) throws IgniteCheckedException { super(cacheId, name, memPlc.pageMemory(), BUCKETS, wal, metaPageId); rmvRow = new RemoveRowHandler(cacheId == 0); @@ -374,6 +495,8 @@ public AbstractFreeList( this.memMetrics = memMetrics; init(metaPageId, initNew); + + this.ctx = ctx; } /** @@ -435,7 +558,14 @@ public long freeSpace() { log.info("FreeList [name=" + name + ", buckets=" + BUCKETS + ", dataPages=" + dataPages + - ", reusePages=" + bucketsSize[REUSE_BUCKET].longValue() + "]"); + ", reusePages=" + bucketsSize[REUSE_BUCKET].longValue() + "" + + ", bucket[0]=" + bucketsSize[0] + + ", bucket[1]=" + bucketsSize[1] + + ", bucket[2]=" + bucketsSize[2] + + ", bucket[3]=" + bucketsSize[3] + + ", bucket[4]=" + bucketsSize[4] + + ", bucket[5]=" + bucketsSize[5] + + "]"); } } @@ -509,6 +639,260 @@ else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) while (written != COMPLETE); } + /** {@inheritDoc} */ + @Override public void insertDataRows(Collection rows, IoStatisticsHolder statHolder) throws IgniteCheckedException { + // 1. split into 3 bags + // A. Large objects. + // B1. Tails of large objects + // B2. small objects + + // Max bytes per data page. + int maxPayloadSize = pageSize() - AbstractDataPageIO.MIN_DATA_PAGE_OVERHEAD; + + int maxRowsPerPage = IndexStorageImpl.MAX_IDX_NAME_LEN; + + // Data rows <-> count of pages needed + List largeRows = new ArrayList<>(16); + + // other objects + List regularRows = new ArrayList<>(16); + + for (T dataRow : rows) { + if (dataRow.size() < maxPayloadSize) + regularRows.add(dataRow); + else { + largeRows.add(dataRow); + + int tailSize = dataRow.size() % maxPayloadSize; + + if (tailSize > 0) + regularRows.add(dataRow); + } + } + + // Writing large objects. + for (T row : largeRows) { + int rowSize = row.size(); + + int written = 0; + + do { + if (written != 0) + memMetrics.incrementLargeEntriesPages(); + + int remaining = rowSize - written; + + long pageId; + + if (remaining >= MIN_SIZE_FOR_DATA_PAGE) + pageId = takeEmptyPage(REUSE_BUCKET, ioVersions(), statHolder); + else + break; + + AbstractDataPageIO initIo = null; + + if (pageId == 0L) { + pageId = allocateDataPage(row.partition()); + + initIo = ioVersions().latest(); + } + else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) + pageId = initReusedPage(pageId, row.partition(), statHolder); + else + pageId = PageIdUtils.changePartitionId(pageId, (row.partition())); + + written = write(pageId, writeRow, initIo, row, written, FAIL_I, statHolder); + + assert written != FAIL_I; // We can't fail here. + } + while (written != COMPLETE); + } + + List dataRows = new ArrayList<>(maxRowsPerPage); + + int remainPageSpace = 0; + + long pageId = 0; + + AbstractDataPageIO initIo = null; + + for (int i = 0; i < regularRows.size(); i++) { + T row = regularRows.get(i); + + boolean tail = i == (regularRows.size() - 1); + + boolean fragment = row.size() > maxPayloadSize; + + int payloadSize = fragment ? (row.size() % maxPayloadSize) + 12 : row.size() + 4; + + // There is no space left on this page. + if (((remainPageSpace - payloadSize) < 0 || dataRows.size() == maxRowsPerPage) && pageId != 0) { + int written = write(pageId, writeRows, initIo, dataRows, FAIL_I, statHolder); + + assert written == COMPLETE : written; + + initIo = null; + remainPageSpace = 0; + pageId = 0; + dataRows.clear(); + } + + dataRows.add(row); + + if (pageId == 0) { + int minBucket = bucket(payloadSize, false) + 1; + + if (payloadSize != MIN_SIZE_FOR_DATA_PAGE) { + for (int b = REUSE_BUCKET - 1; b >= minBucket; b--) { + pageId = takeEmptyPage(b, ioVersions(), statHolder); + + if (pageId != 0L) { + remainPageSpace = (b << shift) + 4; // todo explain "+4"? + + break; + } + } + } + + if (pageId == 0) + pageId = takeEmptyPage(REUSE_BUCKET, ioVersions(), statHolder); + + if (pageId == 0) { + pageId = allocateDataPage(row.partition()); + + initIo = ioVersions().latest(); + } + else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) + pageId = initReusedPage(pageId, row.partition(), statHolder); + else + pageId = PageIdUtils.changePartitionId(pageId, row.partition()); + + if (remainPageSpace == 0) + remainPageSpace = maxPayloadSize; + } + + remainPageSpace -= payloadSize; + + if (tail) { + int written; + + if (dataRows.size() == 1) { + written = fragment ? row.size() - (rows.size() % maxPayloadSize) : 0; + + written = write(pageId, writeRows, initIo, row, written, FAIL_I, statHolder); + } else + written = write(pageId, writeRows, initIo, dataRows, FAIL_I, statHolder); + + assert written == COMPLETE : written; + } + + } + +// for (T2, Integer> bin : bins) { +// long pageId = 0; +// +// int remaining = bin.get2(); +// +//// ctx.diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH); +// +// int buck = bucket(remaining, false) + 1; +// +// for (int b = remaining < MIN_SIZE_FOR_DATA_PAGE ? buck : REUSE_BUCKET; b < BUCKETS; b++) { +// pageId = takeEmptyPage(b, ioVersions(), statHolder); +// +// if (pageId != 0L) +// break; +// } +// +//// ctx.diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH); +// +//// ctx.diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE); +// +// T row = bin.get1().get(0); +// +// AbstractDataPageIO initIo = null; +// +// if (pageId == 0) { +// pageId = allocateDataPage(row.partition()); +// +// initIo = ioVersions().latest(); +// } +// else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) +// pageId = initReusedPage(pageId, row.partition(), statHolder); +// else +// pageId = PageIdUtils.changePartitionId(pageId, row.partition()); +// +//// ctx.diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE); +//// +//// ctx.diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT); +// +// int written = write(pageId, writeRows, initIo, bin.get1(), FAIL_I, statHolder); +// +//// ctx.diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT); +// +// assert written == COMPLETE : written; +// } + } + + // todo move out + // todo experiment with "bestfit" approach + private List, Integer>> binPack(List> rows, int cap) { + // Initialize result (Count of bins) + int cnt = 0; + + // Result. + List, Integer>> bins = new ArrayList<>(rows.size()); + + // Create an array to store remaining space in bins + // there can be at most n bins + int[] remains = new int[rows.size()]; + + // Place items one by one + for (int i = (rows.size() - 1); i >= 0; i--) { + // Find the first bin that can accommodate weight[i] + int j; + + T3 t3 = rows.get(i); + + int size = t3.get1() + (t3.get3() ? 12 : 4); // + inner pointer + pageId (for head of large row) + + for (j = 0; j < cnt; j++) { + if (remains[j] >= size) { + remains[j] -= size; + + T row = rows.get(i).get2(); + + bins.get(j).get1().add(row); + bins.get(j).set2(bins.get(j).get2() + size); + +// binMap.put(row, j); + + break; + } + } + + // If no bin could accommodate sizes[i]. + if (j == cnt) { + remains[cnt] = cap - size; + + // todo remove magic number + List list = new ArrayList<>(16); + + bins.add(new T2<>(list, size)); + + T row = rows.get(i).get2(); + + list.add(row); + +// binMap.put(row, j); + + cnt++; + } + } + + return bins; + } + /** * @param reusedPageId Reused page id. * @param partId Partition id. @@ -579,7 +963,7 @@ private long initReusedPage(long reusedPageId, int partId, long nextLink = write(pageId, rmvRow, bag, itemId, FAIL_L, statHolder); - assert nextLink != FAIL_L; // Can't fail here. + assert nextLink != FAIL_L : pageId; // Can't fail here. while (nextLink != 0L) { memMetrics.decrementLargeEntriesPages(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java index 625c0b15d9d56..beab554d978dc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.freelist; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; @@ -46,8 +47,8 @@ public class CacheFreeListImpl extends AbstractFreeList { */ public CacheFreeListImpl(int cacheId, String name, DataRegionMetricsImpl regionMetrics, DataRegion dataRegion, ReuseList reuseList, - IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew) throws IgniteCheckedException { - super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew); + IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew, GridKernalContext ctx) throws IgniteCheckedException { + super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew, ctx); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java index e28d421bdf063..f49addab848aa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java @@ -17,11 +17,15 @@ package org.apache.ignite.internal.processors.cache.persistence.freelist; +import java.util.Collection; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.processors.cache.persistence.Storable; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; +import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.stat.IoStatisticsHolder; +import org.apache.ignite.lang.IgniteClosure; +import org.apache.ignite.lang.IgniteInClosure; /** */ @@ -32,6 +36,12 @@ public interface FreeList { */ public void insertDataRow(T row, IoStatisticsHolder statHolder) throws IgniteCheckedException; + /** + * @param rows Rows. + * @throws IgniteCheckedException If failed. + */ + public void insertDataRows(Collection rows, IoStatisticsHolder statHolder) throws IgniteCheckedException; + /** * @param link Row link. * @param row New row data. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java index 05efb405c2b38..53d805282c4c8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java @@ -666,7 +666,7 @@ public class FreeListImpl extends AbstractFreeList { FreeListImpl(int cacheId, String name, DataRegionMetricsImpl regionMetrics, DataRegion dataRegion, ReuseList reuseList, IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew) throws IgniteCheckedException { - super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew); + super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew, cctx.kernalContext()); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index 54d9816bcb7e2..01508b407e762 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -19,11 +19,14 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import javax.naming.OperationNotSupportedException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; @@ -46,6 +49,8 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.RemoveRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.ReplaceRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.SplitExistingPageRecord; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow; import org.apache.ignite.internal.processors.cache.persistence.DataStructure; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; @@ -58,6 +63,7 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandlerWrapper; +import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; @@ -72,6 +78,7 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteInClosure; import org.jetbrains.annotations.Nullable; +import sun.reflect.generics.reflectiveObjects.NotImplementedException; import static org.apache.ignite.IgniteSystemProperties.IGNITE_BPLUS_TREE_LOCK_RETRIES; import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Bool.DONE; @@ -1822,6 +1829,10 @@ public final boolean removex(L row) throws IgniteCheckedException { } } + @Override public void invokeAll(List rows, Object z, InvokeAllClosure c) throws IgniteCheckedException { + throw new UnsupportedOperationException(); + } + /** * @param x Invoke operation. * @param pageId Page ID. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java index 78752bbfefc84..4b2d4030fe581 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java @@ -31,6 +31,7 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.util.GridStringBuilder; import org.apache.ignite.internal.util.typedef.internal.SB; +import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.util.GridUnsafe.bufferAddress; @@ -810,7 +811,7 @@ public void addRow( final int rowSize, final int pageSize ) throws IgniteCheckedException { - assert rowSize <= getFreeSpace(pageAddr) : "can't call addRow if not enough space for the whole row"; + assert rowSize <= getFreeSpace(pageAddr) : "can't call addRow if not enough space for the whole row (free=" + getFreeSpace(pageAddr) + ", required=" + rowSize + ")"; int fullEntrySize = getPageEntrySize(rowSize, SHOW_PAYLOAD_LEN | SHOW_ITEM); @@ -977,6 +978,75 @@ public void addRowFragment( addRowFragment(null, pageId, pageAddr, 0, 0, lastLink, null, payload, pageSize); } + /** + * @param pageMem Page memory. + * @param pageId Page ID to use to construct a link. + * @param pageAddr Page address. + * @param rows Data rows. + * @param pageSize Page size. + * @throws IgniteCheckedException If failed. + */ + public void addRows( + final PageMemory pageMem, + final long pageId, + final long pageAddr, + final Collection rows, + final int pageSize + ) throws IgniteCheckedException { + // todo code duplication (3 times!) + int maxPayloadSIze = pageSize - MIN_DATA_PAGE_OVERHEAD; + int dataOff = pageSize; + int cnt = 0; + int written = 0; + + for (T row : rows) { + boolean fragment = row.size() > maxPayloadSIze; + + int payloadSize = row.size() % maxPayloadSIze; + + assert payloadSize <= getFreeSpace(pageAddr) : "can't call addRow if not enough space for the whole row"; + + int sizeSetup = fragment ? SHOW_PAYLOAD_LEN | SHOW_LINK | SHOW_ITEM : SHOW_PAYLOAD_LEN | SHOW_ITEM; + + int fullEntrySize = getPageEntrySize(payloadSize, sizeSetup); + + written += fullEntrySize; + + dataOff -= (fullEntrySize - ITEM_SIZE); + + if (fragment) { + ByteBuffer buf = pageMem.pageBuffer(pageAddr); + + buf.position(dataOff); + + buf.putShort((short)(payloadSize | FRAGMENTED_FLAG)); + buf.putLong(row.link()); + + // todo is it 0? + writeFragmentData(row, buf, 0, payloadSize); + } + else + writeRowData(pageAddr, dataOff, payloadSize, row, true); + + setItem(pageAddr, cnt, directItemFromOffset(dataOff)); + + assert checkIndex(cnt) : cnt; + assert getIndirectCount(pageAddr) <= getDirectCount(pageAddr); + + setLinkByPageId(row, pageId, cnt); + + ++cnt; + } + + setDirectCount(pageAddr, cnt); + + setFirstEntryOffset(pageAddr, dataOff, pageSize); + + // Update free space. If number of indirect items changed, then we were able to reuse an item slot. + // + (getIndirectCount(pageAddr) != indirectCnt ? ITEM_SIZE : 0) + setRealFreeSpace(pageAddr, getRealFreeSpace(pageAddr) - written, pageSize); + } + /** * Adds maximum possible fragment of the given row to this data page and sets respective link to the row. * @@ -1112,6 +1182,7 @@ private int insertItem(long pageAddr, int dataOff, int directCnt, int indirectCn setItem(pageAddr, directCnt, directItemFromOffset(dataOff)); setDirectCount(pageAddr, directCnt + 1); + assert getDirectCount(pageAddr) == directCnt + 1; return directCnt; // Previous directCnt will be our itemId. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java index 5ab1bf38dbc18..72302bf36ecad 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.tree.util; import java.nio.ByteBuffer; +import java.util.Collection; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageSupport; @@ -70,6 +71,32 @@ public abstract R run( ) throws IgniteCheckedException; + /** + * @param cacheId Cache ID. + * @param pageId Page ID. + * @param page Page absolute pointer. + * @param pageAddr Page address. + * @param io IO. + * @param walPlc Full page WAL record policy. + * @param args Arguments. + * @param statHolder Statistics holder to track IO operations. + * @return Result. + * @throws IgniteCheckedException If failed. + */ + public R runBatch( + int cacheId, + long pageId, + long page, + long pageAddr, + PageIO io, + Boolean walPlc, + Collection args, + IoStatisticsHolder statHolder + ) throws IgniteCheckedException { + // todo + throw new UnsupportedOperationException(); + } + /** * @param cacheId Cache ID. * @param pageId Page ID. @@ -308,6 +335,74 @@ public static R writePage( } } + /** + * @param pageMem Page memory. + * @param grpId Group ID. + * @param pageId Page ID. + * @param lsnr Lock listener. + * @param h Handler. + * @param init IO for new page initialization or {@code null} if it is an existing page. + * @param wal Write ahead log. + * @param walPlc Full page WAL record policy. + * @param args Argument. + * @param lockFailed Result in case of lock failure due to page recycling. + * @param statHolder Statistics holder to track IO operations. + * @return Handler result. + * @throws IgniteCheckedException If failed. + */ + public static R writePageBatch( + PageMemory pageMem, + int grpId, + final long pageId, + PageLockListener lsnr, + PageHandler h, + PageIO init, + IgniteWriteAheadLogManager wal, + Boolean walPlc, + Collection args, + R lockFailed, + IoStatisticsHolder statHolder + ) throws IgniteCheckedException { + boolean releaseAfterWrite = true; + + long page = pageMem.acquirePage(grpId, pageId, statHolder); + + try { + long pageAddr = writeLock(pageMem, grpId, pageId, page, lsnr, false); + + if (pageAddr == 0L) + return lockFailed; + + boolean ok = false; + + try { + if (init != null) { + // It is a new page and we have to initialize it. + doInitPage(pageMem, grpId, pageId, page, pageAddr, init, wal); + walPlc = FALSE; + } + else + init = PageIO.getPageIO(pageAddr); + + R res = h.runBatch(grpId, pageId, page, pageAddr, init, walPlc, args, statHolder); + + ok = true; + + return res; + } + finally { + assert PageIO.getCrc(pageAddr) == 0; //TODO GG-11480 + + if (releaseAfterWrite = h.releaseAfterWrite(grpId, pageId, page, pageAddr, null, 0)) + writeUnlock(pageMem, grpId, pageId, page, pageAddr, lsnr, walPlc, ok); + } + } + finally { + if (releaseAfterWrite) + pageMem.releasePage(grpId, pageId, page); + } + } + /** * @param pageMem Page memory. * @param grpId Group ID. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index 0396c3e43a76a..23ccf2aceb92a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -61,6 +61,7 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; +import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.HadoopHelper; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -507,6 +508,11 @@ protected IgniteConfiguration prepareIgniteConfiguration() { return null; } + /** {@inheritDoc} */ + @Override public DiagnosticProcessor diagnostic() { + return null; + } + /** {@inheritDoc} */ @Override public void printMemoryStats() { } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java index ae5f7dfb986f4..563aa6d8febd6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java @@ -137,6 +137,8 @@ import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL; import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL_FIELDS; import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.TEXT; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_INDEXING_REMOVE; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_INDEXING_STORE; /** * Query and index manager. @@ -388,6 +390,8 @@ public void store(CacheDataRow newRow, @Nullable CacheDataRow prevRow, if (!enterBusy()) throw new NodeStoppingException("Operation has been cancelled (node is stopping)."); +// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_INDEXING_STORE); + try { if (isIndexingSpiEnabled()) { CacheObjectContext coctx = cctx.cacheObjectContext(); @@ -403,6 +407,8 @@ public void store(CacheDataRow newRow, @Nullable CacheDataRow prevRow, qryProc.store(cctx, newRow, prevRow, prevRowAvailable); } finally { +// cctx.kernalContext().diagnostic().endTrack(PRELOAD_INDEXING_STORE); + invalidateResultCache(); leaveBusy(); @@ -422,6 +428,8 @@ public void remove(KeyCacheObject key, @Nullable CacheDataRow prevRow) if (!enterBusy()) return; // Ignore index update when node is stopping. +// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_INDEXING_REMOVE); + try { if (isIndexingSpiEnabled()) { Object key0 = unwrapIfNeeded(key, cctx.cacheObjectContext()); @@ -434,6 +442,8 @@ public void remove(KeyCacheObject key, @Nullable CacheDataRow prevRow) qryProc.remove(cctx, prevRow); } finally { +// cctx.kernalContext().diagnostic().endTrack(PRELOAD_INDEXING_REMOVE); + invalidateResultCache(); leaveBusy(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index b3c1c69e66319..05043d66ca935 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -17,6 +17,9 @@ package org.apache.ignite.internal.processors.cache.tree; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.store.PageStore; @@ -45,6 +48,8 @@ import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.lang.GridCursor; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.T3; import org.apache.ignite.internal.util.typedef.internal.CU; import static java.lang.Boolean.FALSE; @@ -327,6 +332,134 @@ public CacheDataRowStore rowStore() { return rowStore; } + /** {@inheritDoc} */ + @Override public void invokeAll(List rows, Object z1, InvokeAllClosure c) throws IgniteCheckedException { + checkDestroyed(); + + int cnt = rows.size(); + + assert cnt > 0 : cnt; + + // todo No algorithm this is draft implementation only for check that closure is working properly + CacheSearchRow lower = rows.get(0); + CacheSearchRow upper = rows.get(cnt - 1); + + List> batch = new ArrayList<>(cnt); + + Iterator rowItr = rows.iterator(); + + assert lower.key().hashCode() <= upper.key().hashCode() : "lower=" + lower.key().hashCode() + ", upper=" + upper.key().hashCode(); + + GridCursor cur = find(lower, upper, CacheDataRowAdapter.RowData.FULL); + + CacheSearchRow lastSearchRow = null; + KeyCacheObject newKey = null; + + while (rowItr.hasNext() && cur.next()) { + CacheDataRow oldRow = cur.get(); + KeyCacheObject oldKey = oldRow.key(); + + while (rowItr.hasNext() && (newKey == null || newKey.hashCode() <= oldKey.hashCode())) { + if (newKey != null && newKey.hashCode() == oldKey.hashCode()) { + while (newKey.hashCode() == oldKey.hashCode()) { + + if (newKey.equals(oldKey)) + batch.add(new T2<>(oldRow, lastSearchRow)); + else + batch.add(new T2<>(null, lastSearchRow)); + + if (!rowItr.hasNext()) + break; + + lastSearchRow = rowItr.next(); + newKey = lastSearchRow.key(); + } + } + else { + if (lastSearchRow != null) + batch.add(new T2<>(null, lastSearchRow)); + + if (!rowItr.hasNext()) + break; + + lastSearchRow = rowItr.next(); + newKey = lastSearchRow.key(); + } + } + } + + while (rowItr.hasNext()) + batch.add(new T2<>(null, rowItr.next())); + + // todo call on insertion point + c.call(batch); + + // todo + for (T3 t3 : c.result()) { + OperationType oper = t3.get1(); + CacheDataRow oldRow = t3.get2(); + CacheDataRow newRow = t3.get3(); + + if (oper == OperationType.PUT) + put(newRow); + else + if (oper == OperationType.REMOVE) + remove(oldRow); + } + +// while (cur.next()) { +// T t = cur.get(); +// +// +// } + +// InvokeAll x = new InvokeAll(row, z, c); + +// try { +// for (;;) { +// x.init(); +// +// Result res = invokeDown(x, x.rootId, 0L, 0L, x.rootLvl); +// +// switch (res) { +// case RETRY: +// case RETRY_ROOT: +// checkInterrupted(); +// +// continue; +// +// default: +// if (!x.isFinished()) { +// res = x.tryFinish(); +// +// if (res == RETRY || res == RETRY_ROOT) { +// checkInterrupted(); +// +// continue; +// } +// +// assert x.isFinished(): res; +// } +// +// return; +// } +// } +// } +// catch (UnregisteredClassException | UnregisteredBinaryTypeException e) { +// throw e; +// } +// catch (IgniteCheckedException e) { +// throw new IgniteCheckedException("Runtime failure on search row: " + row, e); +// } +// catch (RuntimeException | AssertionError e) { +// throw new CorruptedTreeException("Runtime failure on search row: " + row, e); +// } +// finally { +// x.releaseAll(); +// checkDestroyed(); +// } + } + /** {@inheritDoc} */ @Override protected int compare(BPlusIO iox, long pageAddr, int idx, CacheSearchRow row) throws IgniteCheckedException { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java index 5e3a0c825ffc3..8d7f7db27e4a8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java @@ -53,6 +53,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteInterruptedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.cluster.ClusterTopologyException; @@ -73,6 +74,7 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; +import org.apache.ignite.internal.processors.cache.BatchedCacheEntries; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.CacheObjectContext; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; @@ -129,6 +131,10 @@ public class DataStreamerImpl implements IgniteDataStreamer, Delayed /** Per thread buffer size. */ private int bufLdrSzPerThread = DFLT_PER_THREAD_BUFFER_SIZE; + /** */ + private static final boolean batchPageWriteEnabled = + IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, false); + /** * Thread buffer map: on each thread there are future and list of entries which will be streamed after filling * thread batch. @@ -136,7 +142,7 @@ public class DataStreamerImpl implements IgniteDataStreamer, Delayed private final Map threadBufMap = new ConcurrentHashMap<>(); /** Isolated receiver. */ - private static final StreamReceiver ISOLATED_UPDATER = new IsolatedUpdater(); + private static final StreamReceiver ISOLATED_UPDATER = new IsolatedUpdater();//batchPageWriteEnabled ? new OptimizedIsolatedUpdater() : new IsolatedUpdater(); /** Amount of permissions should be available to continue new data processing. */ private static final int REMAP_SEMAPHORE_PERMISSIONS_COUNT = Integer.MAX_VALUE; @@ -2332,6 +2338,205 @@ else if (ttl == CU.TTL_NOT_CHANGED) } } + /** + * Isolated batch receiver which only loads entry initial value. + * + * todo + */ + protected static class OptimizedIsolatedUpdater extends IsolatedUpdater { + /** */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override public void receive( + IgniteCache cache, + Collection> entries + ) { + IgniteCacheProxy proxy = (IgniteCacheProxy)cache; + + GridCacheAdapter internalCache = proxy.context().cache(); + + if (internalCache.context().mvccEnabled() || internalCache.isNear() || internalCache.context().isLocal() || entries.size() < 10) { // todo threshold + super.receive(cache, entries); + + return; + } + +// if (internalCache.isNear()) +// internalCache = internalCache.context().near().dht(); + + GridCacheContext cctx = internalCache.context(); + + GridDhtTopologyFuture topFut = cctx.shared().exchange().lastFinishedFuture(); + + AffinityTopologyVersion topVer = topFut.topologyVersion(); + + GridCacheVersion ver = cctx.versions().isolatedStreamerVersion(); + + long ttl = CU.TTL_ETERNAL; + long expiryTime = CU.EXPIRE_TIME_ETERNAL; + + ExpiryPolicy plc = cctx.expiry(); + + Collection reservedParts = new HashSet<>(); + Collection ignoredParts = new HashSet<>(); + + Map batchMap = new HashMap<>(); + + try { +// log.info("Received " + entries.size()); + + for (Entry e : entries) { +// cctx.shared().database().checkpointReadLock(); + + try { + e.getKey().finishUnmarshal(cctx.cacheObjectContext(), cctx.deploy().globalLoader()); + + BatchedCacheEntries batch = null; + + if (plc != null) { + ttl = CU.toTtl(plc.getExpiryForCreation()); + + if (ttl == CU.TTL_ZERO) + continue; + else if (ttl == CU.TTL_NOT_CHANGED) + ttl = 0; + + expiryTime = CU.toExpireTime(ttl); + } + + // todo kill duplication + int p = cctx.affinity().partition(e.getKey()); + + if (ignoredParts.contains(p)) + continue; + + if (!reservedParts.contains(p)) { + GridDhtLocalPartition part = cctx.topology().localPartition(p, topVer, true); + + if (!part.reserve()) { + ignoredParts.add(p); + + continue; + } + else { + // We must not allow to read from RENTING partitions. + if (part.state() == GridDhtPartitionState.RENTING) { + part.release(); + + ignoredParts.add(p); + + continue; + } + + reservedParts.add(p); + } + } + + /// + batch = batchMap.computeIfAbsent(p, v -> new BatchedCacheEntries(topVer, p, cctx, false)); + + boolean primary = cctx.affinity().primaryByKey(cctx.localNode(), e.getKey(), topVer); + + batch.addEntry(e.getKey(), e.getValue(), expiryTime, ttl, ver, primary ? GridDrType.DR_LOAD : GridDrType.DR_PRELOAD); + + +// if (topFut != null) { +// Throwable err = topFut.validateCache(cctx, false, false, entry.key(), null); +// +// if (err != null) +// throw new IgniteCheckedException(err); +// } + +// boolean primary = cctx.affinity().primaryByKey(cctx.localNode(), entry.key(), topVer); +// +// entry.initialValue(e.getValue(), +// ver, +// ttl, +// expiryTime, +// false, +// topVer, +// primary ? GridDrType.DR_LOAD : GridDrType.DR_PRELOAD, +// false); +// +// entry.touch(topVer); +// +// CU.unwindEvicts(cctx); +// +// entry.onUnlock(); +// } + } + catch (GridDhtInvalidPartitionException ignored) { + ignoredParts.add(cctx.affinity().partition(e.getKey())); + } +// catch (GridCacheEntryRemovedException ignored) { +// // No-op. +// } + catch (IgniteCheckedException ex) { + IgniteLogger log = cache.unwrap(Ignite.class).log(); + + U.error(log, "Failed to set initial value for cache entry: " + e, ex); + + throw new IgniteException("Failed to set initial value for cache entry.", ex); + } +// finally { +//// cctx.shared().database().checkpointReadUnlock(); +// } + } + + cctx.shared().database().checkpointReadLock(); + + try { + for (BatchedCacheEntries b : batchMap.values()) { + b.lock(); + try { + // todo topFut.validateCache + + cctx.offheap().invokeAll(b.context(), b.keys(), b.part(), b.new UpdateClosure()); + //cctx.offheap().updateBatch(batch); + + + } finally { + b.unlock(); + } + } + } + catch (IgniteCheckedException e) { + // todo handle exceptions properly + IgniteLogger log = cache.unwrap(Ignite.class).log(); + + U.error(log, "Failed to set initial value for cache entry.", e); + + throw new IgniteException("Failed to set initial value for cache entry.", e); + } + finally { + cctx.shared().database().checkpointReadUnlock(); + } + + } + finally { + for (Integer part : reservedParts) { + GridDhtLocalPartition locPart = cctx.topology().localPartition(part, topVer, false); + + assert locPart != null : "Evicted reserved partition: " + locPart; + + locPart.release(); + } + + try { + if (!cctx.isNear() && cctx.shared().wal() != null) + cctx.shared().wal().flush(null, false); + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to write preloaded entries into write-ahead log.", e); + + throw new IgniteException("Failed to write preloaded entries into write-ahead log.", e); + } + } + } + } + + /** * Key object wrapper. Using identity equals prevents slow down in case of hash code collision. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java new file mode 100644 index 0000000000000..c3f9ce5c7e1aa --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.diag; + +import java.util.Comparator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.LongAdder; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.processors.GridProcessorAdapter; +import org.apache.ignite.internal.util.typedef.internal.U; + +import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.TOTAL; + +/** + * General rebalance diagnostic processing API + */ +public class DiagnosticProcessor extends GridProcessorAdapter { + /** */ + private final ConcurrentMap timings = new ConcurrentHashMap<>(); + + /** */ + private final ConcurrentMap counts = new ConcurrentHashMap<>(); + + /** */ + private final ConcurrentMap tracks = new ConcurrentHashMap<>(); + + /** */ + private volatile boolean enabled; + + /** + * @param ctx Context. + */ + public DiagnosticProcessor(GridKernalContext ctx) { + super(ctx); + } + + /** {@inheritDoc} */ + @Override public void start() throws IgniteCheckedException { + for (DiagnosticTopics topics : DiagnosticTopics.values()) { + timings.put(topics.name(), new LongAdder()); + + counts.put(topics.name(), new LongAdder()); + } + + U.quietAndInfo(log, "DiagnosticProcessor started"); + } + + /** {@inheritDoc} */ + @Override public void stop(boolean cancel) throws IgniteCheckedException { + super.stop(cancel); + + resetCounts(); + } + + /** */ + public void beginTrack(DiagnosticTopics topic) { + if (TOTAL == topic) + enabled = true; + + if (!enabled) + return; + + beginTrack(topic.name()); + } + + /** */ + private void beginTrack(String topic) { + tracks.putIfAbsent(topic, U.currentTimeMillis()); + } + + /** */ + public void endTrack(DiagnosticTopics topic) { + if (!enabled) + return; + + if (TOTAL == topic) + enabled = false; + + endTrack(topic.name()); + } + + /** */ + public void timeTrack(DiagnosticTopics topic, long time) { + if (!enabled) + return; + + if (TOTAL == topic) + enabled = false; + + timings.get(topic.name()).add(time); + } + + /** */ + private void endTrack(String topic) { + Long value = tracks.remove(topic); + + if (value == null) + return; + + timings.get(topic).add(U.currentTimeMillis() - value); + counts.get(topic).increment(); + } + + /** */ + public synchronized void printStats() { + long total = timings.get(TOTAL.name()).longValue(); + + StringBuilder buf = new StringBuilder(); + + String out = timings.entrySet() + .stream() + .filter(e -> e.getValue().longValue() != 0) + .sorted(Comparator.comparingInt(o -> DiagnosticTopics.valueOf(o.getKey()).ordinal())) + .map(e -> String.format("# %s : %s ms : %.2f : %s", + DiagnosticTopics.valueOf(e.getKey()).desc(), + e.getValue().longValue(), + ( ((double)e.getValue().longValue()) / total * 100), + counts.get(e.getKey()).longValue())) + .collect(Collectors.joining("\n")); + + buf.append("\n# Diagnostic processor info: \n" + out); + + resetCounts(); + + if (!tracks.isEmpty()) { + String str = tracks.entrySet() + .stream() + .map(e -> "# " + DiagnosticTopics.valueOf(e.getKey()).desc() + " : " + (e.getValue() - U.currentTimeMillis())) + .collect(Collectors.joining("\n")); + + buf.append("\n# Unfinished tracks: \n" + str); + } + + log.info(buf.toString()); + + tracks.clear(); + } + + /** */ + public synchronized void resetCounts() { + for (Map.Entry e : timings.entrySet()) + e.getValue().reset(); + + for (Map.Entry c : counts.entrySet()) + c.getValue().reset(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java new file mode 100644 index 0000000000000..5588ee707541d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java @@ -0,0 +1,77 @@ +package org.apache.ignite.internal.processors.diag; + +import java.util.HashMap; +import java.util.Map; + +/** + * + */ +public enum DiagnosticTopics { + /** Root. */ + +// /** GridDhtPartitionDemander#preloadEntry(..) */ +// PRELOAD_ENTRY("# # preload on demander"), +// /** GridCacheMapEntry#storeValue(..) */ +// PRELOAD_OFFHEAP_INVOKE("# # # offheap().invoke(..)"), +// +// PRELOAD_OFFHEAP_INVOKE_INSERT_FREELIST("# # # freeList.insertDataRow"), + + +// /** CacheDataStoreImpl#invoke0(..) */ +// PRELOAD_TREE_INVOKE("# # # # dataTree.invoke(..)"), +// /** rowStore.addRow(..) */ +// PRELOAD_TREE_ADD_ROW("# # # # # FreeList.insertDataRow(..)"), +// /** */ +// PRELOAD_TREE_FINISH_UPDATE("# # # # CacheDataStoreImpl.finishUpdate(..)"), +// /** CacheDataStoreImpl.finishUpdate(..) */ +// PRELOAD_INDEXING_STORE("# # # # # indexing().store(..)"), +// /** CacheDataStoreImpl.finishUpdate(..) */ +// PRELOAD_PENDING_TREE_REMOVE("# # # # # pendingTree().removex(..)"), +// /** CacheDataStoreImpl.finishUpdate(..) */ +// PRELOAD_PENDING_TREE_PUT("# # # # # pendingTree().putx(..)"), +// /** CacheDataStoreImpl#finishRemove(..) */ +// PRELOAD_INDEXING_REMOVE("# # # # finishRemove -> indexing().remove(..)"), +// /** CacheDataStoreImpl#finishRemove(..) */ +// PRELOAD_FREELIST_REMOVE("# # # # finishRemove -> freeList.removeDataRowByLink(..)"), +// /** */ +// PRELOAD_UPDATED("# # # ttl().addTrackedEntry(..)"), +// /** */ +// PRELOAD_ON_WAL_LOG("# # # wal.log(..)"), +// /** */ +// PRELOAD_ON_ENTRY_UPDATED("# # # continuousQueries().onEntryUpdated(..)"), +// +// SEND_DEMAND("# message serialization"), +// SEND_RECEIVE("# network delay between nodes"), +// DEMAND_MSG_SEND("# # demand message send"), +// SUPPLY_MSG_SEND("# # supply message send"), + SUPPLIER_PROCESS_MSG("# make batch on supplier handleDemandMessage(..)"), + + DEMANDER_PROCESS_MSG_SINGLE("# # demander process single"), +// DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH("# # # # # demander search freelist"), +// DEMANDER_PROCESS_MSG_BATCH_BIN_PACK("# # # # # demander process binPack"), +// DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT("# # # # # demander process insert"), +// DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE("# # # # # demander alloc page"), +// PRELOAD_OFFHEAP_BATCH_FIND("# # # # # demander find"), +// PRELOAD_OFFHEAP_BATCH_INSERT("# # # # demander rowStore.freeList().insertBatch"), +// PRELOAD_OFFHEAP_BATCH_TREE_INSERT("# # # # demander dataTree.putx"), +// DEMANDER_PROCESS_MSG_BATCH_LOCK("# # # batch lock"), +// DEMANDER_PROCESS_MSG_BATCH_UNLOCK("# # # batch unlock"), +// DEMANDER_PROCESS_MSG_BATCH_UPDATE("# # # demander batch update"), + DEMANDER_PROCESS_MSG_BATCH("# # demander process batch"), + + DEMANDER_PROCESS_MSG("# demander handleSupplyMessage(..)"), + TOTAL("# cache rebalance total"); + + /** */ + private String desc; + + /** */ + DiagnosticTopics(String desc) { + this.desc = desc; + } + + /** */ + public String desc() { + return desc; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java index 9e854d28f6cb0..12d1a6d3918dc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java @@ -17,8 +17,12 @@ package org.apache.ignite.internal.util; +import java.util.Collection; +import java.util.List; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.util.lang.GridCursor; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.T3; import org.jetbrains.annotations.Nullable; /** @@ -42,6 +46,14 @@ public interface IgniteTree { */ public void invoke(L key, Object x, InvokeClosure c) throws IgniteCheckedException; + /** + * @param keys Keys. + * @param x Implementation specific argument, {@code null} always means that we need a full detached data row. + * @param c Closure. + * @throws IgniteCheckedException If failed. + */ + public void invokeAll(List keys, Object x, InvokeAllClosure c) throws IgniteCheckedException; + /** * Returns the value to which the specified key is mapped, or {@code null} if this tree contains no mapping for the * key. @@ -130,6 +142,25 @@ interface InvokeClosure { OperationType operationType(); } + /** + * T found row + * L search row + */ + interface InvokeAllClosure { + /** + * + * @param rows Old row -> new row + * @throws IgniteCheckedException If failed. + */ + void call(@Nullable Collection> rows) throws IgniteCheckedException; + + /** + * + * @return operation, old row, new row + */ + Collection> result(); + } + /** * */ diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java index 571f0fd234508..7038227add8d4 100755 --- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java @@ -70,9 +70,11 @@ import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; +import org.apache.ignite.internal.managers.communication.GridIoMessage; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpi; import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.managers.eventstorage.HighPriorityListener; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessage; import org.apache.ignite.internal.util.GridConcurrentFactory; import org.apache.ignite.internal.util.GridSpinReadWriteLock; import org.apache.ignite.internal.util.future.GridFinishedFuture; @@ -160,6 +162,7 @@ import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION; +//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.SUPPLY_MSG_SEND; import static org.apache.ignite.internal.util.nio.GridNioSessionMetaKey.SSL_META; import static org.apache.ignite.plugin.extensions.communication.Message.DIRECT_TYPE_SIZE; import static org.apache.ignite.spi.communication.tcp.internal.TcpCommunicationConnectionCheckFuture.SES_FUT_META; @@ -838,6 +841,18 @@ else if (connKey.dummy()) { else c = NOOP; +// if (msg instanceof GridIoMessage) { +// GridIoMessage msg0 = (GridIoMessage)msg; +// +// Message msg1 = msg0.message(); +// +// if (msg1 instanceof GridDhtPartitionSupplyMessage) { +// +//// ((IgniteEx)ignite).context().diagnostic().timeTrack(SUPPLY_MSG_SEND, (U.currentTimeMillis() - ((GridDhtPartitionSupplyMessage)msg1).timestamp())); +// +// } +// } + notifyListener(connKey.nodeId(), msg, c); } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java index d6e59af335283..6512649565235 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java @@ -718,6 +718,12 @@ void recheckLock() { return false; } + /** @inheritDoc */ + @Override public void finishPreload(@Nullable CacheObject val, long expTime, long ttl, GridCacheVersion ver, + AffinityTopologyVersion topVer, GridDrType drType, MvccVersion mvccVer, boolean preload) { + assert false; + } + /** @inheritDoc */ @Override public GridCacheVersionedEntryEx versionedEntry(final boolean keepBinary) throws IgniteCheckedException { return null; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java index ef74bcb9494e5..8b35374fbe865 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java @@ -360,7 +360,7 @@ protected FreeList createFreeList(int pageSize) throws Exception { DataRegion dataRegion = new DataRegion(pageMem, plcCfg, regionMetrics, new NoOpPageEvictionTracker()); - return new CacheFreeListImpl(1, "freelist", regionMetrics, dataRegion, null, null, metaPageId, true); + return new CacheFreeListImpl(1, "freelist", regionMetrics, dataRegion, null, null, metaPageId, true, null); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java new file mode 100644 index 0000000000000..1d7eb57d0aa66 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java @@ -0,0 +1,389 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.processors.database; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.BaselineNode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; +import org.apache.ignite.internal.util.typedef.PA; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD; +import static org.junit.Assert.assertArrayEquals; + +/** + * + */ +@RunWith(Parameterized.class) +public class FreeListPreloadWithBatchUpdatesTest extends GridCommonAbstractTest { + /** */ + private static final int HDR_SIZE = 8 + 32; + + /** */ + private static final long DEF_REG_SIZE = 6 * 1024 * 1024 * 1024L; + + /** */ + private static final String DEF_CACHE_NAME = "some-cache"; + + /** */ + @Parameterized.Parameters(name = "with atomicity={0} and persistence={1}") + public static Iterable setup() { + return Arrays.asList(new Object[][]{ + {CacheAtomicityMode.ATOMIC, false}, + {CacheAtomicityMode.ATOMIC, true}, +// {CacheAtomicityMode.TRANSACTIONAL, false}, +// {CacheAtomicityMode.TRANSACTIONAL, true}, +// {CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT, false}, +// {CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT, true} + }); + } + + /** */ + @Parameterized.Parameter(0) + public CacheAtomicityMode cacheAtomicityMode; + + /** */ + @Parameterized.Parameter(1) + public boolean persistence; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + DataRegionConfiguration def = new DataRegionConfiguration(); + def.setInitialSize(3400 * 1024 * 1024L); + def.setMaxSize(DEF_REG_SIZE); + def.setPersistenceEnabled(persistence); + + DataStorageConfiguration storeCfg = new DataStorageConfiguration(); + + storeCfg.setDefaultDataRegionConfiguration(def); + + if (persistence) { + storeCfg.setWalMode(WALMode.LOG_ONLY); + storeCfg.setMaxWalArchiveSize(Integer.MAX_VALUE); + } + + cfg.setDataStorageConfiguration(storeCfg); + + return cfg; + } + + /** + * + */ + @Before + public void before() throws Exception { + cleanPersistenceDir(); + + System.setProperty(IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, "true"); + } + + /** + * + */ + @After + public void after() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); + + System.clearProperty(IGNITE_PDS_WAL_REBALANCE_THRESHOLD); + System.clearProperty(IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE); + } + + /** + * + */ + @Test + public void testBatchRebalance() throws Exception { + Ignite node = startGrid(0); + + node.cluster().active(true); + + node.cluster().baselineAutoAdjustEnabled(false); + + node.createCache(ccfg()); + + int cnt = 100_000; + int minSize = 0; + int maxSize = 2048; + int start = 0; + + log.info("Loading " + cnt + " random entries per " + minSize + " - " + maxSize + " bytes."); + + Map srcMap = new HashMap<>(); + + for (int i = start; i < start + cnt; i++) { + int size = minSize + ThreadLocalRandom.current().nextInt(maxSize - minSize); + + byte[] obj = new byte[size]; + + srcMap.put(i, obj); + } + + try (IgniteDataStreamer streamer = node.dataStreamer(DEF_CACHE_NAME)) { + streamer.addData(srcMap); + } + + srcMap.put(1, new byte[65536]); + + node.cache(DEF_CACHE_NAME).put(1, new byte[65536]); + + log.info("Done"); + + IgniteCache cache = node.cache(DEF_CACHE_NAME); + + if (persistence) + node.cluster().active(false); + + final IgniteEx node2 = startGrid(1); + + if (persistence) { + List list = new ArrayList<>(node.cluster().currentBaselineTopology()); + + list.add(node2.localNode()); + + node.cluster().active(true); + + node.cluster().setBaselineTopology(list); + } + + log.info("await rebalance"); + + awaitRebalance(node2, DEF_CACHE_NAME); + + U.sleep(2_000); + + node.close(); + + log.info("Verification on node2"); + + validateCacheEntries(node2.cache(DEF_CACHE_NAME), srcMap); + + if (persistence) { + node2.close(); + + Ignite ignite = startGrid(1); + + ignite.cluster().active(true); + + log.info("Validate entries after restart"); + + validateCacheEntries(ignite.cache(DEF_CACHE_NAME), srcMap); + } + } + + /** + * + */ + @Test + public void testBatchHistoricalRebalance() throws Exception { + if (!persistence) + return; + + // TODO https://issues.apache.org/jira/browse/IGNITE-7384 + // http://apache-ignite-developers.2346864.n4.nabble.com/Historical-rebalance-td38380.html + if (cacheAtomicityMode == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT) + return; + + System.setProperty(IGNITE_PDS_WAL_REBALANCE_THRESHOLD, "100"); + + Ignite node = startGrids(2); + + node.cluster().active(true); + + IgniteCache cache = node.createCache(ccfg()); + + int cnt = 10_000; + + log.info("Loading " + cnt + " random entries."); + + Map srcMap = new HashMap<>(); + + for (int i = 0; i < cnt; i++) { + byte[] obj = new byte[ThreadLocalRandom.current().nextInt(1024)]; + + srcMap.put(i, obj); + } + + try (IgniteDataStreamer streamer = node.dataStreamer(DEF_CACHE_NAME)) { + streamer.addData(srcMap); + } + + forceCheckpoint(); + + log.info("Stopping node #2."); + + grid(1).close(); + + log.info("Updating values on node #1."); + + for (int i = 100; i < 1000; i++) { + if (i % 33 == 0) { + cache.remove(i); + + srcMap.remove(i); + } + else { + byte[] bytes = new byte[512]; + + Arrays.fill(bytes, (byte)1); + + srcMap.put(i, bytes); + cache.put(i, bytes); + } + } + + forceCheckpoint(); + + log.info("Starting node #2."); + + IgniteEx node2 = startGrid(1); + + log.info("Await rebalance on node #2."); + + awaitRebalance(node2, DEF_CACHE_NAME); + + log.info("Stop node #1."); + + node.close(); + + validateCacheEntries(node2.cache(DEF_CACHE_NAME), srcMap); + } + + /** */ + @Test + @Ignore + public void checkStreamer() throws Exception { + Ignite node = startGrids(4); + + node.cluster().active(true); + + IgniteCache cache = node.createCache(ccfg(8, CacheMode.REPLICATED)); + + awaitPartitionMapExchange(); + + int cnt = 1024; + + try (IgniteDataStreamer streamer = node.dataStreamer(DEF_CACHE_NAME)) { + + for (int i = 0; i < cnt; i++) + streamer.addData(String.valueOf(i), new byte[128]); + } + + log.info("Sleep"); + + U.sleep(5_000); + + assert GridTestUtils.waitForCondition(() -> { + return cache.size() == cnt; + }, 10_000); + + for (int i = 0; i < cnt; i++) + assertTrue(cache.get(String.valueOf(i)).length == 128); + } + + /** + * @param node Ignite node. + * @param name Cache name. + */ + private void awaitRebalance(IgniteEx node, String name) throws IgniteInterruptedCheckedException { + boolean ok = GridTestUtils.waitForCondition(new PA() { + @Override public boolean apply() { + for ( GridDhtLocalPartition part : node.context().cache().cache(name).context().group().topology().localPartitions()) { + if (part.state() != GridDhtPartitionState.OWNING) + return false; + } + + return true; + } + }, 60_000); + + U.sleep(1000); + + assertTrue(ok); + } + + /** + * @param cache Cache. + * @param map Map. + */ + @SuppressWarnings("unchecked") + private void validateCacheEntries(IgniteCache cache, Map map) { + log.info("Cache validation: " + map.size()); + + assertEquals(map.size(), cache.size()); + + for (Map.Entry e : map.entrySet()) { + String idx = "idx=" + e.getKey(); + + byte[] bytes = (byte[])cache.get(e.getKey()); + + assertNotNull(idx, bytes); + + assertEquals(idx + ": length not equal", e.getValue().length, bytes.length); + + assertArrayEquals(idx, e.getValue(), bytes); + } + } + + /** + * @return Cache configuration. + */ + private CacheConfiguration ccfg() { + return ccfg(1, CacheMode.REPLICATED); + } + + /** + * @return Cache configuration. + */ + private CacheConfiguration ccfg(int parts, CacheMode mode) { + return new CacheConfiguration(DEF_CACHE_NAME) + .setAffinity(new RendezvousAffinityFunction(false, parts)) + .setCacheMode(mode) + .setAtomicityMode(cacheAtomicityMode); + } +} From 89d6aac2bec19004fb3b9c972d22a3581775e915 Mon Sep 17 00:00:00 2001 From: pereslegin-pa Date: Sat, 16 Mar 2019 22:44:59 +0300 Subject: [PATCH 2/7] Removed Diagnostic processor --- .../ignite/internal/GridKernalContext.java | 6 +- .../internal/GridKernalContextImpl.java | 15 +- .../apache/ignite/internal/IgniteKernal.java | 3 - .../preloader/GridDhtPartitionDemander.java | 155 ++++++---------- .../preloader/GridDhtPartitionSupplier.java | 9 - .../dht/preloader/GridDhtPreloader.java | 7 +- .../reader/StandaloneGridKernalContext.java | 6 - .../processors/diag/DiagnosticProcessor.java | 166 ------------------ .../processors/diag/DiagnosticTopics.java | 77 -------- 9 files changed, 62 insertions(+), 382 deletions(-) delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index ead2402741614..b7b95c3d8b315 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -43,10 +43,10 @@ import org.apache.ignite.internal.processors.cluster.ClusterProcessor; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.processors.compress.CompressionProcessor; +import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; -import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.HadoopHelper; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -55,7 +55,6 @@ import org.apache.ignite.internal.processors.job.GridJobProcessor; import org.apache.ignite.internal.processors.jobmetrics.GridJobMetricsProcessor; import org.apache.ignite.internal.processors.marshaller.GridMarshallerMappingProcessor; -import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage; import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; import org.apache.ignite.internal.processors.platform.PlatformProcessor; @@ -484,9 +483,6 @@ public interface GridKernalContext extends Iterable { */ public FailureProcessor failure(); - /** */ - public DiagnosticProcessor diagnostic(); - /** * Print grid kernal memory stats (sizes of internal structures, etc.). * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index 148b700a9227c..7e4bf5a098d61 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -48,8 +48,6 @@ import org.apache.ignite.internal.managers.failover.GridFailoverManager; import org.apache.ignite.internal.managers.indexing.GridIndexingManager; import org.apache.ignite.internal.managers.loadbalancer.GridLoadBalancerManager; -import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; -import org.apache.ignite.internal.processors.service.ServiceProcessorAdapter; import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor; import org.apache.ignite.internal.processors.cache.CacheConflictResolutionManager; @@ -62,6 +60,7 @@ import org.apache.ignite.internal.processors.cluster.ClusterProcessor; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.processors.compress.CompressionProcessor; +import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; @@ -73,7 +72,6 @@ import org.apache.ignite.internal.processors.job.GridJobProcessor; import org.apache.ignite.internal.processors.jobmetrics.GridJobMetricsProcessor; import org.apache.ignite.internal.processors.marshaller.GridMarshallerMappingProcessor; -import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage; import org.apache.ignite.internal.processors.nodevalidation.DiscoveryNodeValidationProcessor; import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; @@ -88,6 +86,7 @@ import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; import org.apache.ignite.internal.processors.security.GridSecurityProcessor; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; +import org.apache.ignite.internal.processors.service.ServiceProcessorAdapter; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; import org.apache.ignite.internal.processors.subscription.GridInternalSubscriptionProcessor; import org.apache.ignite.internal.processors.task.GridTaskProcessor; @@ -434,9 +433,6 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** Failure processor. */ private FailureProcessor failureProc; - /** */ - private DiagnosticProcessor diagProc; - /** Recovery mode flag. Flag is set to {@code false} when discovery manager started. */ private boolean recoveryMode = true; @@ -602,8 +598,6 @@ else if (comp instanceof GridEncryptionManager) else if (comp instanceof FailureProcessor) failureProc = (FailureProcessor)comp; - else if (comp instanceof DiagnosticProcessor) - diagProc = (DiagnosticProcessor)comp; else if (comp instanceof GridTaskProcessor) taskProc = (GridTaskProcessor)comp; else if (comp instanceof GridJobProcessor) @@ -1226,11 +1220,6 @@ void disconnected(boolean disconnected) { return failureProc; } - /** {@inheritDoc} */ - @Override public DiagnosticProcessor diagnostic() { - return diagProc; - } - /** {@inheritDoc} */ @Override public Thread.UncaughtExceptionHandler uncaughtExceptionHandler() { return hnd; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index cb6ba9aa81e69..9462c50194bf7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -145,7 +145,6 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; -import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.Hadoop; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -977,8 +976,6 @@ public void start( startProcessor(new FailureProcessor(ctx)); - startProcessor(new DiagnosticProcessor(ctx)); - startProcessor(new PoolProcessor(ctx)); // Closure processor should be started before all others diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index 3b6b24b6f1f97..1cbe345045b3f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -81,10 +81,6 @@ import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STARTED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STOPPED; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG; -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH; -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_SINGLE; -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.TOTAL; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_PRELOAD; @@ -488,8 +484,6 @@ private void requestPartitions(final RebalanceFuture fut, GridDhtPreloaderAssign for (int i = 0; i < stripes; i++) stripePartitions.add(new IgniteDhtDemandedPartitionsMap()); - ctx.kernalContext().diagnostic().beginTrack(TOTAL); - // Reserve one stripe for historical partitions. if (parts.hasHistorical()) { stripePartitions.set(stripes - 1, new IgniteDhtDemandedPartitionsMap(parts.historicalMap(), null)); @@ -673,8 +667,6 @@ public void handleSupplyMessage( final UUID nodeId, final GridDhtPartitionSupplyMessage supplyMsg ) { - ctx.kernalContext().diagnostic().beginTrack(DEMANDER_PROCESS_MSG); - AffinityTopologyVersion topVer = supplyMsg.topologyVersion(); final RebalanceFuture fut = rebalanceFut; @@ -868,8 +860,6 @@ public void handleSupplyMessage( catch (IgniteSpiException | IgniteCheckedException e) { LT.error(log, e, "Error during rebalancing [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + ", err=" + e + ']'); - } finally { - ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG); } } @@ -883,35 +873,29 @@ public void preloadEntriesSingle(ClusterNode from, ) throws IgniteCheckedException { GridCacheContext cctx = null; - ctx.kernalContext().diagnostic().beginTrack(DEMANDER_PROCESS_MSG_SINGLE); - try { - // Loop through all received entries and try to preload them. - for (GridCacheEntryInfo entry : entries) { - - if (cctx == null || (grp.sharedGroup() && entry.cacheId() != cctx.cacheId())) { - cctx = grp.sharedGroup() ? grp.shared().cacheContext(entry.cacheId()) : grp.singleCacheContext(); - - if (cctx == null) - continue; - else if (cctx.isNear()) - cctx = cctx.dhtCache().context(); - } + // Loop through all received entries and try to preload them. + for (GridCacheEntryInfo entry : entries) { + if (cctx == null || (grp.sharedGroup() && entry.cacheId() != cctx.cacheId())) { + cctx = grp.sharedGroup() ? grp.shared().cacheContext(entry.cacheId()) : grp.singleCacheContext(); + + if (cctx == null) + continue; + else if (cctx.isNear()) + cctx = cctx.dhtCache().context(); + } - if (!preloadEntry(from, p, entry, topVer, cctx)) { - if (log.isTraceEnabled()) - log.trace("Got entries for invalid partition during " + - "preloading (will skip) [p=" + p + ", entry=" + entry + ']'); + if (!preloadEntry(from, p, entry, topVer, cctx)) { + if (log.isTraceEnabled()) + log.trace("Got entries for invalid partition during " + + "preloading (will skip) [p=" + p + ", entry=" + entry + ']'); - break; - } + break; + } - for (GridCacheContext cctx0 : grp.caches()) { - if (cctx0.statisticsEnabled()) - cctx0.cache().metrics0().onRebalanceKeyReceived(); - } + for (GridCacheContext cctx0 : grp.caches()) { + if (cctx0.statisticsEnabled()) + cctx0.cache().metrics0().onRebalanceKeyReceived(); } - } finally { - ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG_SINGLE); } } @@ -928,70 +912,64 @@ public void preloadEntriesBatch(ClusterNode from, Collection entries, AffinityTopologyVersion topVer ) throws IgniteCheckedException { - ctx.kernalContext().diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH); - - try { - if (entries.isEmpty()) - return; - - Map cctxMap = new HashMap<>(); + if (entries.isEmpty()) + return; - // Map by context. - for (GridCacheEntryInfo info : entries) { - try { - GridCacheContext cctx0 = grp.sharedGroup() ? ctx.cacheContext(info.cacheId()) : grp.singleCacheContext(); + Map cctxMap = new HashMap<>(); - if (cctx0 == null) - return; + // Map by context. + for (GridCacheEntryInfo info : entries) { + try { + GridCacheContext cctx0 = grp.sharedGroup() ? ctx.cacheContext(info.cacheId()) : grp.singleCacheContext(); - if (cctx0.isNear()) - cctx0 = cctx0.dhtCache().context(); + if (cctx0 == null) + return; - final GridCacheContext cctx = cctx0; + if (cctx0.isNear()) + cctx0 = cctx0.dhtCache().context(); - if (log.isTraceEnabled()) - log.trace("Rebalancing key [key=" + info.key() + ", part=" + p + ", node=" + from.id() + ']'); + final GridCacheContext cctx = cctx0; - BatchedCacheEntries batch = cctxMap.get(cctx.cacheId()); + if (log.isTraceEnabled()) + log.trace("Rebalancing key [key=" + info.key() + ", part=" + p + ", node=" + from.id() + ']'); - if (batch == null) { - // todo lock should be called for ALL group - cctx.group().listenerLock().readLock().lock(); + BatchedCacheEntries batch = cctxMap.get(cctx.cacheId()); - cctxMap.put(cctx.cacheId(), batch = new BatchedCacheEntries(topVer, p, cctx, true)); - } + if (batch == null) { + // todo lock should be called for ALL group + cctx.group().listenerLock().readLock().lock(); - batch.addEntry(info.key(), info.value(), info.expireTime(), info.ttl(), info.version(), DR_PRELOAD); - } - catch (GridDhtInvalidPartitionException ignored) { - if (log.isDebugEnabled()) - log.debug("Partition became invalid during rebalancing (will ignore): " + p); + cctxMap.put(cctx.cacheId(), batch = new BatchedCacheEntries(topVer, p, cctx, true)); } + + batch.addEntry(info.key(), info.value(), info.expireTime(), info.ttl(), info.version(), DR_PRELOAD); + } + catch (GridDhtInvalidPartitionException ignored) { + if (log.isDebugEnabled()) + log.debug("Partition became invalid during rebalancing (will ignore): " + p); } + } - for (BatchedCacheEntries batch : cctxMap.values()) { - assert batch.size() > BATCH_PRELOAD_THRESHOLD : batch.size(); + for (BatchedCacheEntries batch : cctxMap.values()) { + assert batch.size() > BATCH_PRELOAD_THRESHOLD : batch.size(); - GridCacheContext cctx = batch.context(); + GridCacheContext cctx = batch.context(); - batch.lock(); + batch.lock(); - try { - cctx.offheap().invokeAll(cctx, batch.keys(), batch.part(), batch.new UpdateClosure()); - } - finally { - batch.unlock(); + try { + cctx.offheap().invokeAll(cctx, batch.keys(), batch.part(), batch.new UpdateClosure()); + } + finally { + batch.unlock(); - cctx.group().listenerLock().readLock().unlock(); + cctx.group().listenerLock().readLock().unlock(); - for (GridCacheContext cctx0 : grp.caches()) { - if (cctx0.statisticsEnabled()) - cctx0.cache().metrics0().onRebalanceKeysReceived(batch.size()); - } + for (GridCacheContext cctx0 : grp.caches()) { + if (cctx0.statisticsEnabled()) + cctx0.cache().metrics0().onRebalanceKeysReceived(batch.size()); } } - } finally { - ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH); } } @@ -1411,20 +1389,8 @@ public static class RebalanceFuture extends GridFutureAdapter { this.rebalanceId = rebalanceId; ctx = grp.shared(); - -// ctx.kernalContext().diagnostic().beginTrack(TOTAL); } -// @Override protected boolean onDone(@Nullable Boolean res, @Nullable Throwable err, boolean cancel) { -// if (ctx != null) { // can be dummy -// ctx.kernalContext().diagnostic().endTrack(TOTAL); -// -// ctx.kernalContext().diagnostic().printStats(); -// } -// -// return super.onDone(res, err, cancel); -// } - /** * Dummy future. Will be done by real one. */ @@ -1590,11 +1556,6 @@ private void partitionDone(UUID nodeId, int p, boolean updateState) { ", batch=" + batchPageWriteEnabled + "]")); remaining.remove(nodeId); - - ctx.kernalContext().diagnostic().endTrack(DEMANDER_PROCESS_MSG); - - ctx.kernalContext().diagnostic().endTrack(TOTAL); - ctx.kernalContext().diagnostic().printStats(); } checkIsDone(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java index 18f55e7cb5091..f59ebfef93ee3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java @@ -55,8 +55,6 @@ import org.apache.ignite.spi.IgniteSpiException; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.SUPPLIER_PROCESS_MSG; -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.TOTAL; /** * Class for supplying partitions to demanding nodes. @@ -257,9 +255,6 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand long maxBatchesCnt = grp.config().getRebalanceBatchesPrefetchCount(); if (sctx == null) { - grp.shared().kernalContext().diagnostic().beginTrack(TOTAL); - - grp.shared().kernalContext().diagnostic().beginTrack(SUPPLIER_PROCESS_MSG); if (log.isDebugEnabled()) log.debug("Starting supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + @@ -435,10 +430,6 @@ else if (iter.isPartitionMissing(p)) { if (log.isInfoEnabled()) log.info("Finished supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]"); - - grp.shared().kernalContext().diagnostic().endTrack(SUPPLIER_PROCESS_MSG); - grp.shared().kernalContext().diagnostic().endTrack(TOTAL); - grp.shared().kernalContext().diagnostic().printStats(); } catch (Throwable t) { if (grp.shared().kernalContext().isStopping()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java index 9ba9fcd688bc4..64bcb56cf0eef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java @@ -38,9 +38,9 @@ import org.apache.ignite.internal.processors.cache.GridCachePreloaderAdapter; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtFuture; -import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridNearAtomicAbstractUpdateRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -58,7 +58,6 @@ import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.SUPPLIER_PROCESS_MSG; /** * DHT cache preloader. @@ -388,15 +387,11 @@ private List remoteOwners(int p, AffinityTopologyVersion topVer) { if (!enterBusy()) return; - grp.shared().kernalContext().diagnostic().beginTrack(SUPPLIER_PROCESS_MSG); - try { supplier.handleDemandMessage(idx, id, d); } finally { leaveBusy(); - - grp.shared().kernalContext().diagnostic().endTrack(SUPPLIER_PROCESS_MSG); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index 23ccf2aceb92a..0396c3e43a76a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -61,7 +61,6 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; -import org.apache.ignite.internal.processors.diag.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.HadoopHelper; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -508,11 +507,6 @@ protected IgniteConfiguration prepareIgniteConfiguration() { return null; } - /** {@inheritDoc} */ - @Override public DiagnosticProcessor diagnostic() { - return null; - } - /** {@inheritDoc} */ @Override public void printMemoryStats() { } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java deleted file mode 100644 index c3f9ce5c7e1aa..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticProcessor.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.processors.diag; - -import java.util.Comparator; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.LongAdder; -import java.util.stream.Collectors; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.internal.GridKernalContext; -import org.apache.ignite.internal.processors.GridProcessorAdapter; -import org.apache.ignite.internal.util.typedef.internal.U; - -import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.TOTAL; - -/** - * General rebalance diagnostic processing API - */ -public class DiagnosticProcessor extends GridProcessorAdapter { - /** */ - private final ConcurrentMap timings = new ConcurrentHashMap<>(); - - /** */ - private final ConcurrentMap counts = new ConcurrentHashMap<>(); - - /** */ - private final ConcurrentMap tracks = new ConcurrentHashMap<>(); - - /** */ - private volatile boolean enabled; - - /** - * @param ctx Context. - */ - public DiagnosticProcessor(GridKernalContext ctx) { - super(ctx); - } - - /** {@inheritDoc} */ - @Override public void start() throws IgniteCheckedException { - for (DiagnosticTopics topics : DiagnosticTopics.values()) { - timings.put(topics.name(), new LongAdder()); - - counts.put(topics.name(), new LongAdder()); - } - - U.quietAndInfo(log, "DiagnosticProcessor started"); - } - - /** {@inheritDoc} */ - @Override public void stop(boolean cancel) throws IgniteCheckedException { - super.stop(cancel); - - resetCounts(); - } - - /** */ - public void beginTrack(DiagnosticTopics topic) { - if (TOTAL == topic) - enabled = true; - - if (!enabled) - return; - - beginTrack(topic.name()); - } - - /** */ - private void beginTrack(String topic) { - tracks.putIfAbsent(topic, U.currentTimeMillis()); - } - - /** */ - public void endTrack(DiagnosticTopics topic) { - if (!enabled) - return; - - if (TOTAL == topic) - enabled = false; - - endTrack(topic.name()); - } - - /** */ - public void timeTrack(DiagnosticTopics topic, long time) { - if (!enabled) - return; - - if (TOTAL == topic) - enabled = false; - - timings.get(topic.name()).add(time); - } - - /** */ - private void endTrack(String topic) { - Long value = tracks.remove(topic); - - if (value == null) - return; - - timings.get(topic).add(U.currentTimeMillis() - value); - counts.get(topic).increment(); - } - - /** */ - public synchronized void printStats() { - long total = timings.get(TOTAL.name()).longValue(); - - StringBuilder buf = new StringBuilder(); - - String out = timings.entrySet() - .stream() - .filter(e -> e.getValue().longValue() != 0) - .sorted(Comparator.comparingInt(o -> DiagnosticTopics.valueOf(o.getKey()).ordinal())) - .map(e -> String.format("# %s : %s ms : %.2f : %s", - DiagnosticTopics.valueOf(e.getKey()).desc(), - e.getValue().longValue(), - ( ((double)e.getValue().longValue()) / total * 100), - counts.get(e.getKey()).longValue())) - .collect(Collectors.joining("\n")); - - buf.append("\n# Diagnostic processor info: \n" + out); - - resetCounts(); - - if (!tracks.isEmpty()) { - String str = tracks.entrySet() - .stream() - .map(e -> "# " + DiagnosticTopics.valueOf(e.getKey()).desc() + " : " + (e.getValue() - U.currentTimeMillis())) - .collect(Collectors.joining("\n")); - - buf.append("\n# Unfinished tracks: \n" + str); - } - - log.info(buf.toString()); - - tracks.clear(); - } - - /** */ - public synchronized void resetCounts() { - for (Map.Entry e : timings.entrySet()) - e.getValue().reset(); - - for (Map.Entry c : counts.entrySet()) - c.getValue().reset(); - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java deleted file mode 100644 index 5588ee707541d..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/diag/DiagnosticTopics.java +++ /dev/null @@ -1,77 +0,0 @@ -package org.apache.ignite.internal.processors.diag; - -import java.util.HashMap; -import java.util.Map; - -/** - * - */ -public enum DiagnosticTopics { - /** Root. */ - -// /** GridDhtPartitionDemander#preloadEntry(..) */ -// PRELOAD_ENTRY("# # preload on demander"), -// /** GridCacheMapEntry#storeValue(..) */ -// PRELOAD_OFFHEAP_INVOKE("# # # offheap().invoke(..)"), -// -// PRELOAD_OFFHEAP_INVOKE_INSERT_FREELIST("# # # freeList.insertDataRow"), - - -// /** CacheDataStoreImpl#invoke0(..) */ -// PRELOAD_TREE_INVOKE("# # # # dataTree.invoke(..)"), -// /** rowStore.addRow(..) */ -// PRELOAD_TREE_ADD_ROW("# # # # # FreeList.insertDataRow(..)"), -// /** */ -// PRELOAD_TREE_FINISH_UPDATE("# # # # CacheDataStoreImpl.finishUpdate(..)"), -// /** CacheDataStoreImpl.finishUpdate(..) */ -// PRELOAD_INDEXING_STORE("# # # # # indexing().store(..)"), -// /** CacheDataStoreImpl.finishUpdate(..) */ -// PRELOAD_PENDING_TREE_REMOVE("# # # # # pendingTree().removex(..)"), -// /** CacheDataStoreImpl.finishUpdate(..) */ -// PRELOAD_PENDING_TREE_PUT("# # # # # pendingTree().putx(..)"), -// /** CacheDataStoreImpl#finishRemove(..) */ -// PRELOAD_INDEXING_REMOVE("# # # # finishRemove -> indexing().remove(..)"), -// /** CacheDataStoreImpl#finishRemove(..) */ -// PRELOAD_FREELIST_REMOVE("# # # # finishRemove -> freeList.removeDataRowByLink(..)"), -// /** */ -// PRELOAD_UPDATED("# # # ttl().addTrackedEntry(..)"), -// /** */ -// PRELOAD_ON_WAL_LOG("# # # wal.log(..)"), -// /** */ -// PRELOAD_ON_ENTRY_UPDATED("# # # continuousQueries().onEntryUpdated(..)"), -// -// SEND_DEMAND("# message serialization"), -// SEND_RECEIVE("# network delay between nodes"), -// DEMAND_MSG_SEND("# # demand message send"), -// SUPPLY_MSG_SEND("# # supply message send"), - SUPPLIER_PROCESS_MSG("# make batch on supplier handleDemandMessage(..)"), - - DEMANDER_PROCESS_MSG_SINGLE("# # demander process single"), -// DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH("# # # # # demander search freelist"), -// DEMANDER_PROCESS_MSG_BATCH_BIN_PACK("# # # # # demander process binPack"), -// DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT("# # # # # demander process insert"), -// DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE("# # # # # demander alloc page"), -// PRELOAD_OFFHEAP_BATCH_FIND("# # # # # demander find"), -// PRELOAD_OFFHEAP_BATCH_INSERT("# # # # demander rowStore.freeList().insertBatch"), -// PRELOAD_OFFHEAP_BATCH_TREE_INSERT("# # # # demander dataTree.putx"), -// DEMANDER_PROCESS_MSG_BATCH_LOCK("# # # batch lock"), -// DEMANDER_PROCESS_MSG_BATCH_UNLOCK("# # # batch unlock"), -// DEMANDER_PROCESS_MSG_BATCH_UPDATE("# # # demander batch update"), - DEMANDER_PROCESS_MSG_BATCH("# # demander process batch"), - - DEMANDER_PROCESS_MSG("# demander handleSupplyMessage(..)"), - TOTAL("# cache rebalance total"); - - /** */ - private String desc; - - /** */ - DiagnosticTopics(String desc) { - this.desc = desc; - } - - /** */ - public String desc() { - return desc; - } -} From 13431b8ab153828c92873c1d640c40b4fa5f771c Mon Sep 17 00:00:00 2001 From: pereslegin-pa Date: Sat, 16 Mar 2019 22:59:00 +0300 Subject: [PATCH 3/7] cleanup --- .../jmh/pagemem/JmhBatchUpdatesBenchmark.java | 1 - .../ignite/internal/GridKernalContext.java | 2 +- .../internal/GridKernalContextImpl.java | 4 +- .../cache/IgniteCacheOffheapManagerImpl.java | 18 +-- .../preloader/GridDhtPartitionDemander.java | 13 +- .../preloader/GridDhtPartitionSupplier.java | 1 - .../dht/preloader/GridDhtPreloader.java | 2 +- .../persistence/GridCacheOffheapManager.java | 8 +- .../IgniteCacheDatabaseSharedManager.java | 3 +- .../cache/persistence/RowStore.java | 7 +- .../freelist/AbstractFreeList.java | 130 +----------------- .../freelist/CacheFreeListImpl.java | 5 +- .../persistence/metastorage/MetaStorage.java | 2 +- .../cache/persistence/tree/BPlusTree.java | 10 +- .../cache/query/GridCacheQueryManager.java | 10 -- .../tcp/TcpCommunicationSpi.java | 15 -- .../database/CacheFreeListImplSelfTest.java | 2 +- 17 files changed, 25 insertions(+), 208 deletions(-) diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java index 522cb1c9ac6b4..a6e565f337132 100644 --- a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/pagemem/JmhBatchUpdatesBenchmark.java @@ -191,7 +191,6 @@ private CacheConfiguration ccfg(boolean batch) { .setDataRegionName(batch ? REG_BATCH : REG_SINGLE); } - /** * Test single updates. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index b7b95c3d8b315..744f85857a203 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -43,7 +43,6 @@ import org.apache.ignite.internal.processors.cluster.ClusterProcessor; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.processors.compress.CompressionProcessor; -import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; @@ -55,6 +54,7 @@ import org.apache.ignite.internal.processors.job.GridJobProcessor; import org.apache.ignite.internal.processors.jobmetrics.GridJobMetricsProcessor; import org.apache.ignite.internal.processors.marshaller.GridMarshallerMappingProcessor; +import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage; import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; import org.apache.ignite.internal.processors.platform.PlatformProcessor; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index 7e4bf5a098d61..85e02f93ce057 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -48,6 +48,7 @@ import org.apache.ignite.internal.managers.failover.GridFailoverManager; import org.apache.ignite.internal.managers.indexing.GridIndexingManager; import org.apache.ignite.internal.managers.loadbalancer.GridLoadBalancerManager; +import org.apache.ignite.internal.processors.service.ServiceProcessorAdapter; import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor; import org.apache.ignite.internal.processors.cache.CacheConflictResolutionManager; @@ -60,7 +61,6 @@ import org.apache.ignite.internal.processors.cluster.ClusterProcessor; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.processors.compress.CompressionProcessor; -import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; @@ -72,6 +72,7 @@ import org.apache.ignite.internal.processors.job.GridJobProcessor; import org.apache.ignite.internal.processors.jobmetrics.GridJobMetricsProcessor; import org.apache.ignite.internal.processors.marshaller.GridMarshallerMappingProcessor; +import org.apache.ignite.internal.processors.configuration.distributed.DistributedConfigurationProcessor; import org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage; import org.apache.ignite.internal.processors.nodevalidation.DiscoveryNodeValidationProcessor; import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; @@ -86,7 +87,6 @@ import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; import org.apache.ignite.internal.processors.security.GridSecurityProcessor; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; -import org.apache.ignite.internal.processors.service.ServiceProcessorAdapter; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; import org.apache.ignite.internal.processors.subscription.GridInternalSubscriptionProcessor; import org.apache.ignite.internal.processors.task.GridTaskProcessor; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 59708b9b53cef..a9bad9c74470b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -1614,7 +1614,7 @@ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow ol return false; // Use grp.sharedGroup() flag since it is possible cacheId is not yet set here. -// boolean sizeWithCacheId = grp.sharedGroup(); + boolean sizeWithCacheId = grp.sharedGroup(); int oldLen = oldRow.size(); @@ -1709,12 +1709,8 @@ private void invokeAll0(GridCacheContext cctx, List rows, Offhea try { assert cctx.shared().database().checkpointLockIsHeldByThread(); -// ctx.kernalContext().diagnostic().beginTrack(PRELOAD_TREE_INVOKE); - dataTree.invokeAll(rows, CacheDataRowAdapter.RowData.NO_KEY, c); -// ctx.kernalContext().diagnostic().endTrack(PRELOAD_TREE_INVOKE); - for (T3 tuple : c.result()) { IgniteTree.OperationType opType = tuple.get1(); @@ -1727,6 +1723,7 @@ private void invokeAll0(GridCacheContext cctx, List rows, Offhea assert newRow != null : tuple; finishUpdate(cctx, newRow, oldRow); + break; } @@ -1790,7 +1787,7 @@ private void invokeAll0(GridCacheContext cctx, List rows, Offhea * @param cacheId Cache id. * @return Made data row. */ - @NotNull public DataRow makeDataRow(KeyCacheObject key, CacheObject val, GridCacheVersion ver, long expireTime, + @NotNull private DataRow makeDataRow(KeyCacheObject key, CacheObject val, GridCacheVersion ver, long expireTime, int cacheId) { if (key.partition() == -1) key.partition(partId); @@ -2618,17 +2615,13 @@ private void updatePendingEntries(GridCacheContext cctx, CacheDataRow newRow, @N if (oldRow != null) { assert oldRow.link() != 0 : oldRow; - if (pendingTree() != null && oldRow.expireTime() != 0) { -// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_PENDING_TREE_REMOVE); + if (pendingTree() != null && oldRow.expireTime() != 0) pendingTree().removex(new PendingRow(cacheId, oldRow.expireTime(), oldRow.link())); -// cctx.kernalContext().diagnostic().endTrack(PRELOAD_PENDING_TREE_REMOVE); - } } if (pendingTree() != null && expireTime != 0) { -// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_PENDING_TREE_PUT); pendingTree().putx(new PendingRow(cacheId, expireTime, newRow.link())); -// cctx.kernalContext().diagnostic().endTrack(PRELOAD_PENDING_TREE_PUT); + hasPendingEntries = true; } } @@ -2899,7 +2892,6 @@ private void afterRowFound(@Nullable CacheDataRow row, KeyCacheObject key) throw dataTree.destroy(new IgniteInClosure() { @Override public void apply(CacheSearchRow row) { try { -// log.info("Remove row: " + row.key().hashCode() + " link " + row.link()); rowStore.removeRow(row.link(), grp.statisticsHolderData()); } catch (IgniteCheckedException e) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index 1cbe345045b3f..bb1371fe932a8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -1171,8 +1171,6 @@ private boolean preloadEntry( ) throws IgniteCheckedException { assert ctx.database().checkpointLockIsHeldByThread(); -// ctx.kernalContext().diagnostic().beginTrack(PRELOAD_ENTRY); - try { GridCacheEntryEx cached = null; @@ -1549,11 +1547,11 @@ private void partitionDone(UUID nodeId, int p, boolean updateState) { int remainingRoutines = remaining.size() - 1; U.log(log, "Completed " + ((remainingRoutines == 0 ? "(final) " : "") + - "rebalancing [grp=" + grp.cacheOrGroupName() + - ", supplier=" + nodeId + - ", topVer=" + topologyVersion() + - ", progress=" + (routines - remainingRoutines) + "/" + routines + "," + - ", batch=" + batchPageWriteEnabled + "]")); + "rebalancing [grp=" + grp.cacheOrGroupName() + + ", supplier=" + nodeId + + ", topVer=" + topologyVersion() + + ", progress=" + (routines - remainingRoutines) + "/" + routines + "," + + ", batch=" + batchPageWriteEnabled + "]")); remaining.remove(nodeId); } @@ -1657,4 +1655,3 @@ private void sendRebalanceFinishedEvent() { } } } - diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java index f59ebfef93ee3..514f8fd6f90e3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java @@ -255,7 +255,6 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand long maxBatchesCnt = grp.config().getRebalanceBatchesPrefetchCount(); if (sctx == null) { - if (log.isDebugEnabled()) log.debug("Starting supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + ", fullPartitions=" + S.compact(demandMsg.partitions().fullSet()) + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java index 64bcb56cf0eef..042e0eaa484cb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java @@ -38,9 +38,9 @@ import org.apache.ignite.internal.processors.cache.GridCachePreloaderAdapter; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtFuture; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridNearAtomicAbstractUpdateRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; -import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index bbe631426986c..41ece06324654 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -30,7 +30,6 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import javax.cache.processor.EntryProcessor; -import javax.naming.OperationNotSupportedException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.failure.FailureContext; @@ -52,14 +51,12 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.PartitionDestroyRecord; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; -import org.apache.ignite.internal.processors.cache.BatchedCacheEntries; import org.apache.ignite.internal.processors.cache.CacheEntryPredicate; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryEx; import org.apache.ignite.internal.processors.cache.GridCacheMvccEntryInfo; -import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo; import org.apache.ignite.internal.processors.cache.GridCacheTtlManager; import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManagerImpl; import org.apache.ignite.internal.processors.cache.KeyCacheObject; @@ -87,7 +84,6 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer; import org.apache.ignite.internal.processors.cache.tree.CacheDataRowStore; import org.apache.ignite.internal.processors.cache.tree.CacheDataTree; -import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; import org.apache.ignite.internal.processors.cache.tree.PendingRow; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult; @@ -102,7 +98,6 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; @@ -1499,8 +1494,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException null, ctx.wal(), reuseRoot.pageId().pageId(), - reuseRoot.isAllocated(), - ctx.kernalContext()) { + reuseRoot.isAllocated()) { /** {@inheritDoc} */ @Override protected long allocatePageNoReuse() throws IgniteCheckedException { assert grp.shared().database().checkpointLockIsHeldByThread(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index b4a6a4a219f94..d4db27c74bed4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -254,8 +254,7 @@ protected void initPageMemoryDataStructures(DataStorageConfiguration dbCfg) thro null, persistenceEnabled ? cctx.wal() : null, 0L, - true, - cctx.kernalContext()); + true); freeListMap.put(memPlcCfg.getName(), freeList); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java index 3e43fba9c620f..2f2942d51ce1d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java @@ -28,10 +28,6 @@ import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner; import org.apache.ignite.internal.stat.IoStatisticsHolder; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_OFFHEAP_INVOKE_INSERT_FREELIST; - -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_FREELIST_REMOVE; - /** * Data store for H2 rows. */ @@ -85,12 +81,11 @@ public void removeRow(long link, IoStatisticsHolder statHolder) throws IgniteChe freeList.removeDataRowByLink(link, statHolder); else { ctx.database().checkpointReadLock(); -// ctx.kernalContext().diagnostic().beginTrack(PRELOAD_FREELIST_REMOVE); + try { freeList.removeDataRowByLink(link, statHolder); } finally { -// ctx.kernalContext().diagnostic().endTrack(PRELOAD_FREELIST_REMOVE); ctx.database().checkpointReadUnlock(); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java index 254c38abbaf04..41d81ddcf20bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java @@ -19,15 +19,10 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicReferenceArray; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.internal.GridKernalContext; -import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageUtils; @@ -51,16 +46,8 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; -import org.apache.ignite.internal.util.lang.GridTuple3; -import org.apache.ignite.internal.util.typedef.T2; -import org.apache.ignite.internal.util.typedef.T3; import org.apache.ignite.internal.util.typedef.internal.U; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_BIN_PACK; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH; - /** */ public abstract class AbstractFreeList extends PagesList implements FreeList, ReuseList { @@ -103,9 +90,6 @@ public abstract class AbstractFreeList extends PagesList imp /** */ private final PageEvictionTracker evictionTracker; - /** */ - private final GridKernalContext ctx; - /** * */ @@ -206,8 +190,6 @@ protected Integer run0( assert oldFreeSpace > 0 : oldFreeSpace; // If the full row does not fit into this page write only a fragment. -// System.out.println(">xxx> free=" + oldFreeSpace + ", rowSize=" + rowSize + " hash=" + row.hashCode()); - written = (written == 0 && oldFreeSpace >= rowSize) ? addRow(pageId, page, pageAddr, io, row, rowSize) : addRowFragment(pageId, page, pageAddr, io, row, written, rowSize); @@ -465,8 +447,7 @@ public AbstractFreeList( ReuseList reuseList, IgniteWriteAheadLogManager wal, long metaPageId, - boolean initNew, - GridKernalContext ctx) throws IgniteCheckedException { + boolean initNew) throws IgniteCheckedException { super(cacheId, name, memPlc.pageMemory(), BUCKETS, wal, metaPageId); rmvRow = new RemoveRowHandler(cacheId == 0); @@ -495,8 +476,6 @@ public AbstractFreeList( this.memMetrics = memMetrics; init(metaPageId, initNew); - - this.ctx = ctx; } /** @@ -747,7 +726,7 @@ else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) pageId = takeEmptyPage(b, ioVersions(), statHolder); if (pageId != 0L) { - remainPageSpace = (b << shift) + 4; // todo explain "+4"? + remainPageSpace = (b << shift); //todo + 4, wtf "+4"? break; } @@ -785,112 +764,7 @@ else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) assert written == COMPLETE : written; } - - } - -// for (T2, Integer> bin : bins) { -// long pageId = 0; -// -// int remaining = bin.get2(); -// -//// ctx.diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH); -// -// int buck = bucket(remaining, false) + 1; -// -// for (int b = remaining < MIN_SIZE_FOR_DATA_PAGE ? buck : REUSE_BUCKET; b < BUCKETS; b++) { -// pageId = takeEmptyPage(b, ioVersions(), statHolder); -// -// if (pageId != 0L) -// break; -// } -// -//// ctx.diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_SEARCH); -// -//// ctx.diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE); -// -// T row = bin.get1().get(0); -// -// AbstractDataPageIO initIo = null; -// -// if (pageId == 0) { -// pageId = allocateDataPage(row.partition()); -// -// initIo = ioVersions().latest(); -// } -// else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) -// pageId = initReusedPage(pageId, row.partition(), statHolder); -// else -// pageId = PageIdUtils.changePartitionId(pageId, row.partition()); -// -//// ctx.diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH_ALLOC_PAGE); -//// -//// ctx.diagnostic().beginTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT); -// -// int written = write(pageId, writeRows, initIo, bin.get1(), FAIL_I, statHolder); -// -//// ctx.diagnostic().endTrack(DEMANDER_PROCESS_MSG_BATCH_BIN_INSERT); -// -// assert written == COMPLETE : written; -// } - } - - // todo move out - // todo experiment with "bestfit" approach - private List, Integer>> binPack(List> rows, int cap) { - // Initialize result (Count of bins) - int cnt = 0; - - // Result. - List, Integer>> bins = new ArrayList<>(rows.size()); - - // Create an array to store remaining space in bins - // there can be at most n bins - int[] remains = new int[rows.size()]; - - // Place items one by one - for (int i = (rows.size() - 1); i >= 0; i--) { - // Find the first bin that can accommodate weight[i] - int j; - - T3 t3 = rows.get(i); - - int size = t3.get1() + (t3.get3() ? 12 : 4); // + inner pointer + pageId (for head of large row) - - for (j = 0; j < cnt; j++) { - if (remains[j] >= size) { - remains[j] -= size; - - T row = rows.get(i).get2(); - - bins.get(j).get1().add(row); - bins.get(j).set2(bins.get(j).get2() + size); - -// binMap.put(row, j); - - break; - } - } - - // If no bin could accommodate sizes[i]. - if (j == cnt) { - remains[cnt] = cap - size; - - // todo remove magic number - List list = new ArrayList<>(16); - - bins.add(new T2<>(list, size)); - - T row = rows.get(i).get2(); - - list.add(row); - -// binMap.put(row, j); - - cnt++; - } } - - return bins; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java index beab554d978dc..625c0b15d9d56 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeListImpl.java @@ -18,7 +18,6 @@ package org.apache.ignite.internal.processors.cache.persistence.freelist; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; @@ -47,8 +46,8 @@ public class CacheFreeListImpl extends AbstractFreeList { */ public CacheFreeListImpl(int cacheId, String name, DataRegionMetricsImpl regionMetrics, DataRegion dataRegion, ReuseList reuseList, - IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew, GridKernalContext ctx) throws IgniteCheckedException { - super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew, ctx); + IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew) throws IgniteCheckedException { + super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java index 53d805282c4c8..05efb405c2b38 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java @@ -666,7 +666,7 @@ public class FreeListImpl extends AbstractFreeList { FreeListImpl(int cacheId, String name, DataRegionMetricsImpl regionMetrics, DataRegion dataRegion, ReuseList reuseList, IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew) throws IgniteCheckedException { - super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew, cctx.kernalContext()); + super(cacheId, name, regionMetrics, dataRegion, reuseList, wal, metaPageId, initNew); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index 01508b407e762..e5bebf3feee80 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -19,14 +19,11 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; -import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import javax.naming.OperationNotSupportedException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; @@ -49,8 +46,6 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.RemoveRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.ReplaceRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.SplitExistingPageRecord; -import org.apache.ignite.internal.processors.cache.KeyCacheObject; -import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow; import org.apache.ignite.internal.processors.cache.persistence.DataStructure; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; @@ -63,7 +58,6 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandlerWrapper; -import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; @@ -78,7 +72,6 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteInClosure; import org.jetbrains.annotations.Nullable; -import sun.reflect.generics.reflectiveObjects.NotImplementedException; import static org.apache.ignite.IgniteSystemProperties.IGNITE_BPLUS_TREE_LOCK_RETRIES; import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Bool.DONE; @@ -1830,7 +1823,8 @@ public final boolean removex(L row) throws IgniteCheckedException { } @Override public void invokeAll(List rows, Object z, InvokeAllClosure c) throws IgniteCheckedException { - throw new UnsupportedOperationException(); + // todo + throw new UnsupportedOperationException("Not implemented yet"); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java index 563aa6d8febd6..ae5f7dfb986f4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java @@ -137,8 +137,6 @@ import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL; import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL_FIELDS; import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.TEXT; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_INDEXING_REMOVE; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_INDEXING_STORE; /** * Query and index manager. @@ -390,8 +388,6 @@ public void store(CacheDataRow newRow, @Nullable CacheDataRow prevRow, if (!enterBusy()) throw new NodeStoppingException("Operation has been cancelled (node is stopping)."); -// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_INDEXING_STORE); - try { if (isIndexingSpiEnabled()) { CacheObjectContext coctx = cctx.cacheObjectContext(); @@ -407,8 +403,6 @@ public void store(CacheDataRow newRow, @Nullable CacheDataRow prevRow, qryProc.store(cctx, newRow, prevRow, prevRowAvailable); } finally { -// cctx.kernalContext().diagnostic().endTrack(PRELOAD_INDEXING_STORE); - invalidateResultCache(); leaveBusy(); @@ -428,8 +422,6 @@ public void remove(KeyCacheObject key, @Nullable CacheDataRow prevRow) if (!enterBusy()) return; // Ignore index update when node is stopping. -// cctx.kernalContext().diagnostic().beginTrack(PRELOAD_INDEXING_REMOVE); - try { if (isIndexingSpiEnabled()) { Object key0 = unwrapIfNeeded(key, cctx.cacheObjectContext()); @@ -442,8 +434,6 @@ public void remove(KeyCacheObject key, @Nullable CacheDataRow prevRow) qryProc.remove(cctx, prevRow); } finally { -// cctx.kernalContext().diagnostic().endTrack(PRELOAD_INDEXING_REMOVE); - invalidateResultCache(); leaveBusy(); diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java index 7038227add8d4..571f0fd234508 100755 --- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java @@ -70,11 +70,9 @@ import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; -import org.apache.ignite.internal.managers.communication.GridIoMessage; import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpi; import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.managers.eventstorage.HighPriorityListener; -import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessage; import org.apache.ignite.internal.util.GridConcurrentFactory; import org.apache.ignite.internal.util.GridSpinReadWriteLock; import org.apache.ignite.internal.util.future.GridFinishedFuture; @@ -162,7 +160,6 @@ import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.SUPPLY_MSG_SEND; import static org.apache.ignite.internal.util.nio.GridNioSessionMetaKey.SSL_META; import static org.apache.ignite.plugin.extensions.communication.Message.DIRECT_TYPE_SIZE; import static org.apache.ignite.spi.communication.tcp.internal.TcpCommunicationConnectionCheckFuture.SES_FUT_META; @@ -841,18 +838,6 @@ else if (connKey.dummy()) { else c = NOOP; -// if (msg instanceof GridIoMessage) { -// GridIoMessage msg0 = (GridIoMessage)msg; -// -// Message msg1 = msg0.message(); -// -// if (msg1 instanceof GridDhtPartitionSupplyMessage) { -// -//// ((IgniteEx)ignite).context().diagnostic().timeTrack(SUPPLY_MSG_SEND, (U.currentTimeMillis() - ((GridDhtPartitionSupplyMessage)msg1).timestamp())); -// -// } -// } - notifyListener(connKey.nodeId(), msg, c); } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java index 8b35374fbe865..ef74bcb9494e5 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java @@ -360,7 +360,7 @@ protected FreeList createFreeList(int pageSize) throws Exception { DataRegion dataRegion = new DataRegion(pageMem, plcCfg, regionMetrics, new NoOpPageEvictionTracker()); - return new CacheFreeListImpl(1, "freelist", regionMetrics, dataRegion, null, null, metaPageId, true, null); + return new CacheFreeListImpl(1, "freelist", regionMetrics, dataRegion, null, null, metaPageId, true); } /** From 059dff6d23a43d31aaee2e0934f29441ffbf037b Mon Sep 17 00:00:00 2001 From: pereslegin-pa Date: Mon, 18 Mar 2019 11:45:32 +0300 Subject: [PATCH 4/7] Big objects bug fix. --- .../cache/IgniteCacheOffheapManagerImpl.java | 25 +++--- .../freelist/AbstractFreeList.java | 2 +- .../processors/cache/tree/CacheDataTree.java | 8 +- .../FreeListPreloadWithBatchUpdatesTest.java | 77 ++++++++----------- 4 files changed, 48 insertions(+), 64 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index a9bad9c74470b..31d5782c5e27d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; @@ -99,7 +98,7 @@ import org.apache.ignite.internal.util.GridLongList; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.GridStripedLock; -import org.apache.ignite.internal.util.IgniteTree; +import org.apache.ignite.internal.util.IgniteTree.OperationType; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.lang.GridIterator; @@ -135,8 +134,6 @@ import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.unexpectedStateException; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager.EMPTY_CURSOR; import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.MVCC_INFO_SIZE; -import static org.apache.ignite.internal.util.IgniteTree.OperationType.NOOP; -import static org.apache.ignite.internal.util.IgniteTree.OperationType.PUT; /** * @@ -1711,11 +1708,9 @@ private void invokeAll0(GridCacheContext cctx, List rows, Offhea dataTree.invokeAll(rows, CacheDataRowAdapter.RowData.NO_KEY, c); - for (T3 tuple : c.result()) { - IgniteTree.OperationType opType = tuple.get1(); - + for (T3 tuple : c.result()) { + OperationType opType = tuple.get1(); CacheDataRow oldRow = tuple.get2(); - CacheDataRow newRow = tuple.get3(); switch (opType) { @@ -1976,7 +1971,7 @@ private void invokeAll0(GridCacheContext cctx, List rows, Offhea invoke0(cctx, clo, clo); - return clo.operationType() == PUT; + return clo.operationType() == OperationType.PUT; } finally { busyLock.leaveBusy(); @@ -3092,7 +3087,7 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements /** */ private CacheDataRow oldRow; /** */ - private IgniteTree.OperationType op; + private OperationType op; /** * @param cctx Cache context. @@ -3137,7 +3132,7 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements this.oldRow = oldRow; if (oldRow == null) { - op = PUT; + op = OperationType.PUT; int cacheId = cacheId(); @@ -3149,7 +3144,7 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements cacheId(cacheId); } else { - op = NOOP; + op = OperationType.NOOP; if (oldRow.mvccTxState() != mvccTxState() || oldRow.newMvccCoordinatorVersion() != newMvccCoordinatorVersion() || @@ -3164,12 +3159,12 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements /** {@inheritDoc} */ @Override public CacheDataRow newRow() { - return op == PUT ? this : null; + return op == OperationType.PUT ? this : null; } /** {@inheritDoc} */ - @Override public IgniteTree.OperationType operationType() { - return op == null ? NOOP : op; + @Override public OperationType operationType() { + return op == null ? OperationType.NOOP : op; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java index 41d81ddcf20bd..0828d028207e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java @@ -756,7 +756,7 @@ else if (PageIdUtils.tag(pageId) != PageIdAllocator.FLAG_DATA) int written; if (dataRows.size() == 1) { - written = fragment ? row.size() - (rows.size() % maxPayloadSize) : 0; + written = fragment ? row.size() - (row.size() % maxPayloadSize) : 0; written = write(pageId, writeRows, initIo, row, written, FAIL_I, statHolder); } else diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index 05043d66ca935..29d7e74dbc63e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -355,14 +355,13 @@ public CacheDataRowStore rowStore() { CacheSearchRow lastSearchRow = null; KeyCacheObject newKey = null; - while (rowItr.hasNext() && cur.next()) { + while (cur.next()) { CacheDataRow oldRow = cur.get(); KeyCacheObject oldKey = oldRow.key(); - while (rowItr.hasNext() && (newKey == null || newKey.hashCode() <= oldKey.hashCode())) { + while (newKey == null || newKey.hashCode() <= oldKey.hashCode()) { if (newKey != null && newKey.hashCode() == oldKey.hashCode()) { while (newKey.hashCode() == oldKey.hashCode()) { - if (newKey.equals(oldKey)) batch.add(new T2<>(oldRow, lastSearchRow)); else @@ -385,6 +384,9 @@ public CacheDataRowStore rowStore() { lastSearchRow = rowItr.next(); newKey = lastSearchRow.key(); } + + if (!rowItr.hasNext()) + break; } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java index 1d7eb57d0aa66..794fdbe079ed4 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java @@ -41,6 +41,7 @@ import org.apache.ignite.internal.util.typedef.PA; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.WithSystemProperty; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.After; import org.junit.Before; @@ -62,7 +63,10 @@ public class FreeListPreloadWithBatchUpdatesTest extends GridCommonAbstractTest private static final int HDR_SIZE = 8 + 32; /** */ - private static final long DEF_REG_SIZE = 6 * 1024 * 1024 * 1024L; + private static final long DEF_REG_SIZE_INIT = 3400 * 1024 * 1024L; + + /** */ + private static final long DEF_REG_SIZE = 6144 * 1024 * 1024L; /** */ private static final String DEF_CACHE_NAME = "some-cache"; @@ -73,15 +77,15 @@ public static Iterable setup() { return Arrays.asList(new Object[][]{ {CacheAtomicityMode.ATOMIC, false}, {CacheAtomicityMode.ATOMIC, true}, -// {CacheAtomicityMode.TRANSACTIONAL, false}, -// {CacheAtomicityMode.TRANSACTIONAL, true}, -// {CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT, false}, -// {CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT, true} + {CacheAtomicityMode.TRANSACTIONAL, false}, + {CacheAtomicityMode.TRANSACTIONAL, true}, + {CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT, false}, + {CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT, true} }); } /** */ - @Parameterized.Parameter(0) + @Parameterized.Parameter() public CacheAtomicityMode cacheAtomicityMode; /** */ @@ -93,7 +97,7 @@ public static Iterable setup() { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); DataRegionConfiguration def = new DataRegionConfiguration(); - def.setInitialSize(3400 * 1024 * 1024L); + def.setInitialSize(DEF_REG_SIZE_INIT); def.setMaxSize(DEF_REG_SIZE); def.setPersistenceEnabled(persistence); @@ -117,8 +121,6 @@ public static Iterable setup() { @Before public void before() throws Exception { cleanPersistenceDir(); - - System.setProperty(IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, "true"); } /** @@ -129,15 +131,13 @@ public void after() throws Exception { stopAllGrids(); cleanPersistenceDir(); - - System.clearProperty(IGNITE_PDS_WAL_REBALANCE_THRESHOLD); - System.clearProperty(IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE); } /** * */ @Test + @WithSystemProperty(key = IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, value = "true") public void testBatchRebalance() throws Exception { Ignite node = startGrid(0); @@ -148,16 +148,14 @@ public void testBatchRebalance() throws Exception { node.createCache(ccfg()); int cnt = 100_000; - int minSize = 0; - int maxSize = 2048; - int start = 0; - log.info("Loading " + cnt + " random entries per " + minSize + " - " + maxSize + " bytes."); + int minSize = 0; + int maxSize = 16384; Map srcMap = new HashMap<>(); - for (int i = start; i < start + cnt; i++) { - int size = minSize + ThreadLocalRandom.current().nextInt(maxSize - minSize); + for (int i = 0; i < cnt; i++) { + int size = maxSize == minSize ? maxSize : minSize + ThreadLocalRandom.current().nextInt(maxSize - minSize); byte[] obj = new byte[size]; @@ -168,13 +166,7 @@ public void testBatchRebalance() throws Exception { streamer.addData(srcMap); } - srcMap.put(1, new byte[65536]); - - node.cache(DEF_CACHE_NAME).put(1, new byte[65536]); - - log.info("Done"); - - IgniteCache cache = node.cache(DEF_CACHE_NAME); + log.info("Data loaded."); if (persistence) node.cluster().active(false); @@ -191,16 +183,12 @@ public void testBatchRebalance() throws Exception { node.cluster().setBaselineTopology(list); } - log.info("await rebalance"); + log.info("Await rebalance."); awaitRebalance(node2, DEF_CACHE_NAME); - U.sleep(2_000); - node.close(); - log.info("Verification on node2"); - validateCacheEntries(node2.cache(DEF_CACHE_NAME), srcMap); if (persistence) { @@ -220,17 +208,17 @@ public void testBatchRebalance() throws Exception { * */ @Test + @WithSystemProperty(key = IGNITE_PDS_WAL_REBALANCE_THRESHOLD, value = "100") + @WithSystemProperty(key = IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, value = "true") public void testBatchHistoricalRebalance() throws Exception { if (!persistence) return; // TODO https://issues.apache.org/jira/browse/IGNITE-7384 - // http://apache-ignite-developers.2346864.n4.nabble.com/Historical-rebalance-td38380.html + // TODO http://apache-ignite-developers.2346864.n4.nabble.com/Historical-rebalance-td38380.html if (cacheAtomicityMode == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT) return; - System.setProperty(IGNITE_PDS_WAL_REBALANCE_THRESHOLD, "100"); - Ignite node = startGrids(2); node.cluster().active(true); @@ -244,7 +232,7 @@ public void testBatchHistoricalRebalance() throws Exception { Map srcMap = new HashMap<>(); for (int i = 0; i < cnt; i++) { - byte[] obj = new byte[ThreadLocalRandom.current().nextInt(1024)]; + byte[] obj = new byte[ThreadLocalRandom.current().nextInt(16384)]; srcMap.put(i, obj); } @@ -297,6 +285,7 @@ public void testBatchHistoricalRebalance() throws Exception { /** */ @Test @Ignore + @WithSystemProperty(key = IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, value = "true") public void checkStreamer() throws Exception { Ignite node = startGrids(4); @@ -342,7 +331,7 @@ private void awaitRebalance(IgniteEx node, String name) throws IgniteInterrupted } }, 60_000); - U.sleep(1000); + U.sleep(3000); assertTrue(ok); } @@ -353,20 +342,16 @@ private void awaitRebalance(IgniteEx node, String name) throws IgniteInterrupted */ @SuppressWarnings("unchecked") private void validateCacheEntries(IgniteCache cache, Map map) { - log.info("Cache validation: " + map.size()); + int size = cache.size(); - assertEquals(map.size(), cache.size()); - - for (Map.Entry e : map.entrySet()) { - String idx = "idx=" + e.getKey(); + assertEquals("Cache size mismatch.", map.size(), size); - byte[] bytes = (byte[])cache.get(e.getKey()); + log.info("Validation " + cache.getName() + ", size=" + size); - assertNotNull(idx, bytes); - - assertEquals(idx + ": length not equal", e.getValue().length, bytes.length); + for (Map.Entry e : map.entrySet()) { + String idx = "key=" + e.getKey(); - assertArrayEquals(idx, e.getValue(), bytes); + assertArrayEquals(idx, e.getValue(), (byte[])cache.get(e.getKey())); } } @@ -387,3 +372,5 @@ private CacheConfiguration ccfg(int parts, CacheMode mode) { .setAtomicityMode(cacheAtomicityMode); } } + + From 7f1f3d227e2a771380e2100f131542d097133a4b Mon Sep 17 00:00:00 2001 From: pereslegin-pa Date: Mon, 18 Mar 2019 16:22:12 +0300 Subject: [PATCH 5/7] code cleanup --- .../processors/cache/tree/CacheDataTree.java | 52 ------------------- 1 file changed, 52 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index 29d7e74dbc63e..9858f433c8437 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -408,58 +408,6 @@ public CacheDataRowStore rowStore() { if (oper == OperationType.REMOVE) remove(oldRow); } - -// while (cur.next()) { -// T t = cur.get(); -// -// -// } - -// InvokeAll x = new InvokeAll(row, z, c); - -// try { -// for (;;) { -// x.init(); -// -// Result res = invokeDown(x, x.rootId, 0L, 0L, x.rootLvl); -// -// switch (res) { -// case RETRY: -// case RETRY_ROOT: -// checkInterrupted(); -// -// continue; -// -// default: -// if (!x.isFinished()) { -// res = x.tryFinish(); -// -// if (res == RETRY || res == RETRY_ROOT) { -// checkInterrupted(); -// -// continue; -// } -// -// assert x.isFinished(): res; -// } -// -// return; -// } -// } -// } -// catch (UnregisteredClassException | UnregisteredBinaryTypeException e) { -// throw e; -// } -// catch (IgniteCheckedException e) { -// throw new IgniteCheckedException("Runtime failure on search row: " + row, e); -// } -// catch (RuntimeException | AssertionError e) { -// throw new CorruptedTreeException("Runtime failure on search row: " + row, e); -// } -// finally { -// x.releaseAll(); -// checkDestroyed(); -// } } /** {@inheritDoc} */ From b85eeeb68c1c917d99b425bc22db13fcaaf02f19 Mon Sep 17 00:00:00 2001 From: pereslegin-pa Date: Mon, 18 Mar 2019 16:53:16 +0300 Subject: [PATCH 6/7] cleanup - removed attempt for data streamer support. --- .../processors/cache/BatchedCacheEntries.java | 131 +++++------ .../cache/IgniteCacheOffheapManager.java | 2 - .../cache/IgniteCacheOffheapManagerImpl.java | 22 +- .../preloader/GridDhtPartitionDemander.java | 2 +- .../datastreamer/DataStreamerImpl.java | 207 +----------------- .../FreeListPreloadWithBatchUpdatesTest.java | 36 +-- 6 files changed, 84 insertions(+), 316 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java index e0e5aad22ef6e..cb9b399f85f54 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java @@ -43,23 +43,19 @@ import static org.apache.ignite.internal.util.IgniteTree.OperationType.NOOP; import static org.apache.ignite.internal.util.IgniteTree.OperationType.PUT; import static org.apache.ignite.internal.util.IgniteTree.OperationType.REMOVE; -//import static org.apache.ignite.internal.processors.diag.DiagnosticTopics.PRELOAD_TREE_FINISH_UPDATE1; /** * Batch of cache entries to optimize page memory processing. */ public class BatchedCacheEntries { /** */ -// private final int partId; - - /** */ private final GridDhtLocalPartition part; /** */ private final GridCacheContext cctx; /** */ - private final LinkedHashMap infos = new LinkedHashMap<>(); + private final LinkedHashMap infos = new LinkedHashMap<>(); /** */ private final AffinityTopologyVersion topVer; @@ -84,7 +80,7 @@ public BatchedCacheEntries(AffinityTopologyVersion topVer, int partId, GridCache /** */ public void addEntry(KeyCacheObject key, CacheObject val, long expTime, long ttl, GridCacheVersion ver, GridDrType drType) { // todo remove `key` duplication (Map keys() { } /** */ - public Collection values() { + public Collection values() { return infos.values(); } - /** */ -// public int part() { -// return partId; -// } - /** */ public GridDhtLocalPartition part() { return part; @@ -113,7 +104,7 @@ public GridCacheContext context() { } /** */ - public BatchedCacheMapEntryInfo get(KeyCacheObject key) { + public CacheMapEntryInfo get(KeyCacheObject key) { return infos.get(key); } @@ -123,62 +114,45 @@ public boolean preload() { } /** */ - public boolean needUpdate(KeyCacheObject key, CacheDataRow row) throws GridCacheEntryRemovedException { - BatchedCacheMapEntryInfo info = infos.get(key); - - return info.needUpdate(row); - } - public void onRemove(KeyCacheObject key) { // todo - remove from original collection ++skipped; } + /** */ public void onError(KeyCacheObject key, IgniteCheckedException e) { // todo - remove from original collection ++skipped; } + /** */ public boolean skip(KeyCacheObject key) { // todo return false; } + /** */ public List lock() { - entries = lockEntries(infos.values(), topVer); - - return entries; + return entries = lockEntries(infos.values(), topVer); } + /** */ public void unlock() { unlockEntries(infos.values(), topVer); } + /** */ public int size() { return infos.size() - skipped; } - private List lockEntries(Collection list, AffinityTopologyVersion topVer) + /** */ + private List lockEntries(Collection list, AffinityTopologyVersion topVer) throws GridDhtInvalidPartitionException { -// if (req.size() == 1) { -// KeyCacheObject key = req.key(0); -// -// while (true) { -// GridDhtCacheEntry entry = entryExx(key, topVer); -// -// entry.lockEntry(); -// -// if (entry.obsolete()) -// entry.unlockEntry(); -// else -// return Collections.singletonList(entry); -// } -// } -// else { List locked = new ArrayList<>(list.size()); while (true) { - for (BatchedCacheMapEntryInfo info : list) { + for (CacheMapEntryInfo info : list) { GridDhtCacheEntry entry = (GridDhtCacheEntry)cctx.cache().entryEx(info.key(), topVer); locked.add(entry); @@ -219,7 +193,6 @@ private List lockEntries(Collection if (!retry) return locked; } -// } } /** @@ -229,7 +202,7 @@ private List lockEntries(Collection * @param locked Locked entries. * @param topVer Topology version. */ - private void unlockEntries(Collection locked, AffinityTopologyVersion topVer) { + private void unlockEntries(Collection locked, AffinityTopologyVersion topVer) { // Process deleted entries before locks release. assert cctx.deferredDelete() : this; @@ -241,7 +214,7 @@ private void unlockEntries(Collection locked, Affinity int size = locked.size(); try { - for (BatchedCacheMapEntryInfo info : locked) { + for (CacheMapEntryInfo info : locked) { GridCacheMapEntry entry = info.cacheEntry(); if (entry != null && entry.deleted()) { @@ -262,7 +235,7 @@ private void unlockEntries(Collection locked, Affinity // At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is // an attempt to use cleaned resources. // That's why releasing locks in the finally block.. - for (BatchedCacheMapEntryInfo info : locked) { + for (CacheMapEntryInfo info : locked) { GridCacheMapEntry entry = info.cacheEntry(); if (entry != null) entry.unlockEntry(); @@ -270,7 +243,7 @@ private void unlockEntries(Collection locked, Affinity } // Try evict partitions. - for (BatchedCacheMapEntryInfo info : locked) { + for (CacheMapEntryInfo info : locked) { GridDhtCacheEntry entry = info.cacheEntry(); if (entry != null) entry.onUnlock(); @@ -282,7 +255,7 @@ private void unlockEntries(Collection locked, Affinity // Must touch all entries since update may have deleted entries. // Eviction manager will remove empty entries. - for (BatchedCacheMapEntryInfo info : locked) { + for (CacheMapEntryInfo info : locked) { GridCacheMapEntry entry = info.cacheEntry(); if (entry != null && (skip == null || !skip.contains(entry.key()))) entry.touch(); @@ -290,12 +263,12 @@ private void unlockEntries(Collection locked, Affinity } /** */ - public class UpdateClosure implements IgniteCacheOffheapManager.OffheapInvokeAllClosure { + public class BatchUpdateClosure implements IgniteCacheOffheapManager.OffheapInvokeAllClosure { /** */ private final List> resBatch = new ArrayList<>(entries.size()); /** */ - private final int cacheId = context().group().storeCacheIdInDataPage() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID; + private final int cacheId = context().group().storeCacheIdInDataPage() ? context().cacheId() : CU.UNDEFINED_CACHE_ID; /** */ private final int partId = part().id(); @@ -309,7 +282,7 @@ public class UpdateClosure implements IgniteCacheOffheapManager.OffheapInvokeAll KeyCacheObject key = t2.get2().key(); - BatchedCacheMapEntryInfo newRowInfo = get(key); + CacheMapEntryInfo newRowInfo = get(key); try { if (newRowInfo.needUpdate(oldRow)) { @@ -321,7 +294,7 @@ public class UpdateClosure implements IgniteCacheOffheapManager.OffheapInvokeAll if (oldRow != null) { // todo think about batch updates newRow = context().offheap().dataStore(part()).createRow( - cctx, + context(), key, newRowInfo.value(), newRowInfo.version(), @@ -329,7 +302,7 @@ public class UpdateClosure implements IgniteCacheOffheapManager.OffheapInvokeAll oldRow); } else { - CacheObjectContext coCtx = cctx.cacheObjectContext(); + CacheObjectContext coCtx = context().cacheObjectContext(); // todo why we need this val.valueBytes(coCtx); key.valueBytes(coCtx); @@ -364,30 +337,48 @@ public class UpdateClosure implements IgniteCacheOffheapManager.OffheapInvokeAll context().offheap().dataStore(part()).rowStore().addRows(newRows, cctx.group().statisticsHolderData()); } + /** {@inheritDoc} */ @Override public Collection> result() { return resBatch; } + /** {@inheritDoc} */ @Override public boolean apply(CacheDataRow row) { return false; } } - public static class BatchedCacheMapEntryInfo { - // todo think about remove + /** */ + public static class CacheMapEntryInfo { + /** todo think about remove */ private final BatchedCacheEntries batch; + + /** */ private final KeyCacheObject key; + + /** */ private final CacheObject val; + + /** */ private final long expTime; + + /** */ private final long ttl; + + /** */ private final GridCacheVersion ver; + + /** */ private final GridDrType drType; + /** */ private GridDhtCacheEntry entry; + /** */ private boolean update; - public BatchedCacheMapEntryInfo( + /** */ + public CacheMapEntryInfo( BatchedCacheEntries batch, KeyCacheObject key, CacheObject val, @@ -405,30 +396,49 @@ public BatchedCacheMapEntryInfo( this.ttl = ttl; } + /** + * @return Key. + */ public KeyCacheObject key() { return key; } + /** + * @return Version. + */ public GridCacheVersion version() { return ver; } + /** + * @return Value. + */ public CacheObject value() { return val; } + /** + * @return Expire time. + */ public long expireTime() { return expTime; } - public GridDhtCacheEntry cacheEntry() { - return entry; - } - + /** + * @param entry Cache entry. + */ public void cacheEntry(GridDhtCacheEntry entry) { this.entry = entry; } + /** + * @return Cache entry. + */ + public GridDhtCacheEntry cacheEntry() { + return entry; + } + + /** */ public void updateCacheEntry() throws IgniteCheckedException { if (!update) return; @@ -436,10 +446,7 @@ public void updateCacheEntry() throws IgniteCheckedException { entry.finishPreload(val, expTime, ttl, ver, batch.topVer, drType, null, batch.preload); } -// public void update(boolean update) { -// this.update = update; -// } - + /** */ public boolean needUpdate(CacheDataRow row) throws GridCacheEntryRemovedException { GridCacheVersion currVer = row != null ? row.version() : entry.version(); @@ -462,7 +469,7 @@ public boolean needUpdate(CacheDataRow row) throws GridCacheEntryRemovedExceptio else update0 = (isStartVer && row == null); - // todo update0 |= (!preload && deletedUnlocked()); + update0 |= (!batch.preload() && entry.deletedUnlocked()); update = update0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java index ac529633edb1d..c9dace3ae682d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java @@ -35,7 +35,6 @@ import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; import org.apache.ignite.internal.processors.cache.persistence.partstate.PartitionRecoverState; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; -import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult; import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow; @@ -50,7 +49,6 @@ import org.apache.ignite.internal.util.lang.IgniteInClosure2X; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgnitePredicate; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 31d5782c5e27d..94ff0ec4186a4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -98,7 +98,7 @@ import org.apache.ignite.internal.util.GridLongList; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.GridStripedLock; -import org.apache.ignite.internal.util.IgniteTree.OperationType; +import org.apache.ignite.internal.util.IgniteTree; import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.lang.GridIterator; @@ -134,6 +134,8 @@ import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.unexpectedStateException; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager.EMPTY_CURSOR; import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.MVCC_INFO_SIZE; +import static org.apache.ignite.internal.util.IgniteTree.OperationType.NOOP; +import static org.apache.ignite.internal.util.IgniteTree.OperationType.PUT; /** * @@ -1708,8 +1710,8 @@ private void invokeAll0(GridCacheContext cctx, List rows, Offhea dataTree.invokeAll(rows, CacheDataRowAdapter.RowData.NO_KEY, c); - for (T3 tuple : c.result()) { - OperationType opType = tuple.get1(); + for (T3 tuple : c.result()) { + IgniteTree.OperationType opType = tuple.get1(); CacheDataRow oldRow = tuple.get2(); CacheDataRow newRow = tuple.get3(); @@ -1971,7 +1973,7 @@ private void invokeAll0(GridCacheContext cctx, List rows, Offhea invoke0(cctx, clo, clo); - return clo.operationType() == OperationType.PUT; + return clo.operationType() == PUT; } finally { busyLock.leaveBusy(); @@ -3087,7 +3089,7 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements /** */ private CacheDataRow oldRow; /** */ - private OperationType op; + private IgniteTree.OperationType op; /** * @param cctx Cache context. @@ -3132,7 +3134,7 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements this.oldRow = oldRow; if (oldRow == null) { - op = OperationType.PUT; + op = PUT; int cacheId = cacheId(); @@ -3144,7 +3146,7 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements cacheId(cacheId); } else { - op = OperationType.NOOP; + op = NOOP; if (oldRow.mvccTxState() != mvccTxState() || oldRow.newMvccCoordinatorVersion() != newMvccCoordinatorVersion() || @@ -3159,12 +3161,12 @@ private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements /** {@inheritDoc} */ @Override public CacheDataRow newRow() { - return op == OperationType.PUT ? this : null; + return op == PUT ? this : null; } /** {@inheritDoc} */ - @Override public OperationType operationType() { - return op == null ? OperationType.NOOP : op; + @Override public IgniteTree.OperationType operationType() { + return op == null ? NOOP : op; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index bb1371fe932a8..7800276738676 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -958,7 +958,7 @@ public void preloadEntriesBatch(ClusterNode from, batch.lock(); try { - cctx.offheap().invokeAll(cctx, batch.keys(), batch.part(), batch.new UpdateClosure()); + cctx.offheap().invokeAll(cctx, batch.keys(), batch.part(), batch.new BatchUpdateClosure()); } finally { batch.unlock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java index 8d7f7db27e4a8..5e3a0c825ffc3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java @@ -53,7 +53,6 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteInterruptedException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.cluster.ClusterTopologyException; @@ -74,7 +73,6 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; -import org.apache.ignite.internal.processors.cache.BatchedCacheEntries; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.CacheObjectContext; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; @@ -131,10 +129,6 @@ public class DataStreamerImpl implements IgniteDataStreamer, Delayed /** Per thread buffer size. */ private int bufLdrSzPerThread = DFLT_PER_THREAD_BUFFER_SIZE; - /** */ - private static final boolean batchPageWriteEnabled = - IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, false); - /** * Thread buffer map: on each thread there are future and list of entries which will be streamed after filling * thread batch. @@ -142,7 +136,7 @@ public class DataStreamerImpl implements IgniteDataStreamer, Delayed private final Map threadBufMap = new ConcurrentHashMap<>(); /** Isolated receiver. */ - private static final StreamReceiver ISOLATED_UPDATER = new IsolatedUpdater();//batchPageWriteEnabled ? new OptimizedIsolatedUpdater() : new IsolatedUpdater(); + private static final StreamReceiver ISOLATED_UPDATER = new IsolatedUpdater(); /** Amount of permissions should be available to continue new data processing. */ private static final int REMAP_SEMAPHORE_PERMISSIONS_COUNT = Integer.MAX_VALUE; @@ -2338,205 +2332,6 @@ else if (ttl == CU.TTL_NOT_CHANGED) } } - /** - * Isolated batch receiver which only loads entry initial value. - * - * todo - */ - protected static class OptimizedIsolatedUpdater extends IsolatedUpdater { - /** */ - private static final long serialVersionUID = 0L; - - /** {@inheritDoc} */ - @Override public void receive( - IgniteCache cache, - Collection> entries - ) { - IgniteCacheProxy proxy = (IgniteCacheProxy)cache; - - GridCacheAdapter internalCache = proxy.context().cache(); - - if (internalCache.context().mvccEnabled() || internalCache.isNear() || internalCache.context().isLocal() || entries.size() < 10) { // todo threshold - super.receive(cache, entries); - - return; - } - -// if (internalCache.isNear()) -// internalCache = internalCache.context().near().dht(); - - GridCacheContext cctx = internalCache.context(); - - GridDhtTopologyFuture topFut = cctx.shared().exchange().lastFinishedFuture(); - - AffinityTopologyVersion topVer = topFut.topologyVersion(); - - GridCacheVersion ver = cctx.versions().isolatedStreamerVersion(); - - long ttl = CU.TTL_ETERNAL; - long expiryTime = CU.EXPIRE_TIME_ETERNAL; - - ExpiryPolicy plc = cctx.expiry(); - - Collection reservedParts = new HashSet<>(); - Collection ignoredParts = new HashSet<>(); - - Map batchMap = new HashMap<>(); - - try { -// log.info("Received " + entries.size()); - - for (Entry e : entries) { -// cctx.shared().database().checkpointReadLock(); - - try { - e.getKey().finishUnmarshal(cctx.cacheObjectContext(), cctx.deploy().globalLoader()); - - BatchedCacheEntries batch = null; - - if (plc != null) { - ttl = CU.toTtl(plc.getExpiryForCreation()); - - if (ttl == CU.TTL_ZERO) - continue; - else if (ttl == CU.TTL_NOT_CHANGED) - ttl = 0; - - expiryTime = CU.toExpireTime(ttl); - } - - // todo kill duplication - int p = cctx.affinity().partition(e.getKey()); - - if (ignoredParts.contains(p)) - continue; - - if (!reservedParts.contains(p)) { - GridDhtLocalPartition part = cctx.topology().localPartition(p, topVer, true); - - if (!part.reserve()) { - ignoredParts.add(p); - - continue; - } - else { - // We must not allow to read from RENTING partitions. - if (part.state() == GridDhtPartitionState.RENTING) { - part.release(); - - ignoredParts.add(p); - - continue; - } - - reservedParts.add(p); - } - } - - /// - batch = batchMap.computeIfAbsent(p, v -> new BatchedCacheEntries(topVer, p, cctx, false)); - - boolean primary = cctx.affinity().primaryByKey(cctx.localNode(), e.getKey(), topVer); - - batch.addEntry(e.getKey(), e.getValue(), expiryTime, ttl, ver, primary ? GridDrType.DR_LOAD : GridDrType.DR_PRELOAD); - - -// if (topFut != null) { -// Throwable err = topFut.validateCache(cctx, false, false, entry.key(), null); -// -// if (err != null) -// throw new IgniteCheckedException(err); -// } - -// boolean primary = cctx.affinity().primaryByKey(cctx.localNode(), entry.key(), topVer); -// -// entry.initialValue(e.getValue(), -// ver, -// ttl, -// expiryTime, -// false, -// topVer, -// primary ? GridDrType.DR_LOAD : GridDrType.DR_PRELOAD, -// false); -// -// entry.touch(topVer); -// -// CU.unwindEvicts(cctx); -// -// entry.onUnlock(); -// } - } - catch (GridDhtInvalidPartitionException ignored) { - ignoredParts.add(cctx.affinity().partition(e.getKey())); - } -// catch (GridCacheEntryRemovedException ignored) { -// // No-op. -// } - catch (IgniteCheckedException ex) { - IgniteLogger log = cache.unwrap(Ignite.class).log(); - - U.error(log, "Failed to set initial value for cache entry: " + e, ex); - - throw new IgniteException("Failed to set initial value for cache entry.", ex); - } -// finally { -//// cctx.shared().database().checkpointReadUnlock(); -// } - } - - cctx.shared().database().checkpointReadLock(); - - try { - for (BatchedCacheEntries b : batchMap.values()) { - b.lock(); - try { - // todo topFut.validateCache - - cctx.offheap().invokeAll(b.context(), b.keys(), b.part(), b.new UpdateClosure()); - //cctx.offheap().updateBatch(batch); - - - } finally { - b.unlock(); - } - } - } - catch (IgniteCheckedException e) { - // todo handle exceptions properly - IgniteLogger log = cache.unwrap(Ignite.class).log(); - - U.error(log, "Failed to set initial value for cache entry.", e); - - throw new IgniteException("Failed to set initial value for cache entry.", e); - } - finally { - cctx.shared().database().checkpointReadUnlock(); - } - - } - finally { - for (Integer part : reservedParts) { - GridDhtLocalPartition locPart = cctx.topology().localPartition(part, topVer, false); - - assert locPart != null : "Evicted reserved partition: " + locPart; - - locPart.release(); - } - - try { - if (!cctx.isNear() && cctx.shared().wal() != null) - cctx.shared().wal().flush(null, false); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to write preloaded entries into write-ahead log.", e); - - throw new IgniteException("Failed to write preloaded entries into write-ahead log.", e); - } - } - } - } - - /** * Key object wrapper. Using identity equals prevents slow down in case of hash code collision. */ diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java index 794fdbe079ed4..2458cfd375d6e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListPreloadWithBatchUpdatesTest.java @@ -45,7 +45,6 @@ import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.After; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -282,39 +281,6 @@ public void testBatchHistoricalRebalance() throws Exception { validateCacheEntries(node2.cache(DEF_CACHE_NAME), srcMap); } - /** */ - @Test - @Ignore - @WithSystemProperty(key = IGNITE_DATA_STORAGE_BATCH_PAGE_WRITE, value = "true") - public void checkStreamer() throws Exception { - Ignite node = startGrids(4); - - node.cluster().active(true); - - IgniteCache cache = node.createCache(ccfg(8, CacheMode.REPLICATED)); - - awaitPartitionMapExchange(); - - int cnt = 1024; - - try (IgniteDataStreamer streamer = node.dataStreamer(DEF_CACHE_NAME)) { - - for (int i = 0; i < cnt; i++) - streamer.addData(String.valueOf(i), new byte[128]); - } - - log.info("Sleep"); - - U.sleep(5_000); - - assert GridTestUtils.waitForCondition(() -> { - return cache.size() == cnt; - }, 10_000); - - for (int i = 0; i < cnt; i++) - assertTrue(cache.get(String.valueOf(i)).length == 128); - } - /** * @param node Ignite node. * @param name Cache name. @@ -351,7 +317,7 @@ private void validateCacheEntries(IgniteCache cache, Map map) { for (Map.Entry e : map.entrySet()) { String idx = "key=" + e.getKey(); - assertArrayEquals(idx, e.getValue(), (byte[])cache.get(e.getKey())); + assertEquals(idx, e.getValue().length, ((byte[])cache.get(e.getKey())).length); } } From bd8225df47839b541e56f35303f7afaae6e64586 Mon Sep 17 00:00:00 2001 From: pereslegin-pa Date: Mon, 18 Mar 2019 19:43:27 +0300 Subject: [PATCH 7/7] cleanup --- .../processors/cache/BatchedCacheEntries.java | 2 +- .../processors/cache/GridCacheEntryEx.java | 12 ---------- .../processors/cache/GridCacheMapEntry.java | 23 ++++++++++++++----- .../cache/IgniteCacheOffheapManagerImpl.java | 2 -- .../preloader/GridDhtPartitionDemander.java | 4 ++-- .../freelist/AbstractFreeList.java | 11 ++------- .../cache/persistence/freelist/FreeList.java | 3 --- .../cache/persistence/tree/BPlusTree.java | 4 ++-- .../processors/cache/tree/CacheDataTree.java | 23 +++++++++++-------- .../cache/GridCacheTestEntryEx.java | 6 ----- 10 files changed, 37 insertions(+), 53 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java index cb9b399f85f54..b8b2e3590922f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/BatchedCacheEntries.java @@ -443,7 +443,7 @@ public void updateCacheEntry() throws IgniteCheckedException { if (!update) return; - entry.finishPreload(val, expTime, ttl, ver, batch.topVer, drType, null, batch.preload); + entry.finishInitialUpdate(val, expTime, ttl, ver, batch.topVer, drType, null, batch.preload); } /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java index cf8eef32c3f5a..9aec3996c3204 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java @@ -805,18 +805,6 @@ public boolean initialValue(CacheObject val, GridDrType drType, boolean fromStore) throws IgniteCheckedException, GridCacheEntryRemovedException; - - public void finishPreload( - @Nullable CacheObject val, - long expTime, - long ttl, - GridCacheVersion ver, - AffinityTopologyVersion topVer, - GridDrType drType, - MvccVersion mvccVer, - boolean preload - ) throws IgniteCheckedException; - /** * Create versioned entry for this cache entry. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index 87d31502a188a..b15da6b8db03f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -3504,10 +3504,21 @@ else if (deletedUnlocked()) } } - /** {@inheritDoc} */ - @Override public void finishPreload( + /** + * todo explain this and remove code duplication + * @param val New value. + * @param expireTime Expiration time. + * @param ttl Time to live. + * @param ver Version to use. + * @param topVer Topology version. + * @param drType DR type. + * @param mvccVer Mvcc version. + * @param preload Flag indicating whether entry is being preloaded. + * @throws IgniteCheckedException In case of error. + */ + protected void finishInitialUpdate( @Nullable CacheObject val, - long expTime, + long expireTime, long ttl, GridCacheVersion ver, AffinityTopologyVersion topVer, @@ -3518,7 +3529,7 @@ else if (deletedUnlocked()) boolean fromStore = false; boolean walEnabled = !cctx.isNear() && cctx.group().persistenceEnabled() && cctx.group().walEnabled(); - update(val, expTime, ttl, ver, true); + update(val, expireTime, ttl, ver, true); boolean skipQryNtf = false; @@ -3545,7 +3556,7 @@ else if (deletedUnlocked()) val == null ? DELETE : GridCacheOperation.CREATE, null, ver, - expTime, + expireTime, partition(), updateCntr, mvccVer == null ? MvccUtils.INITIAL_VERSION : mvccVer @@ -3558,7 +3569,7 @@ else if (deletedUnlocked()) val == null ? DELETE : GridCacheOperation.CREATE, null, ver, - expTime, + expireTime, partition(), updateCntr ))); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 94ff0ec4186a4..4bd50750c722f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -1601,8 +1601,6 @@ void decrementSize(int cacheId) { * @param dataRow New row. * @return {@code True} if it is possible to update old row data. * @throws IgniteCheckedException If failed. - * - * todo think about this meth */ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow oldRow, DataRow dataRow) throws IgniteCheckedException { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index 7800276738676..95aae574a06be 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -474,8 +474,8 @@ private void requestPartitions(final RebalanceFuture fut, GridDhtPreloaderAssign parts = fut.remaining.get(node.id()); U.log(log, "Prepared rebalancing [grp=" + grp.cacheOrGroupName() - + ", mode=" + cfg.getRebalanceMode() + ", supplier=" + node.id() + ", partitionsCount=" + parts.size() - + ", topVer=" + fut.topologyVersion() + ", parallelism=" + totalStripes + "]"); + + ", mode=" + cfg.getRebalanceMode() + ", supplier=" + node.id() + ", partitionsCount=" + parts.size() + + ", topVer=" + fut.topologyVersion() + ", parallelism=" + totalStripes + "]"); } int stripes = totalStripes; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java index 0828d028207e2..7a3fefc6010f0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java @@ -537,14 +537,7 @@ public long freeSpace() { log.info("FreeList [name=" + name + ", buckets=" + BUCKETS + ", dataPages=" + dataPages + - ", reusePages=" + bucketsSize[REUSE_BUCKET].longValue() + "" + - ", bucket[0]=" + bucketsSize[0] + - ", bucket[1]=" + bucketsSize[1] + - ", bucket[2]=" + bucketsSize[2] + - ", bucket[3]=" + bucketsSize[3] + - ", bucket[4]=" + bucketsSize[4] + - ", bucket[5]=" + bucketsSize[5] + - "]"); + ", reusePages=" + bucketsSize[REUSE_BUCKET].longValue() + "]"); } } @@ -837,7 +830,7 @@ private long initReusedPage(long reusedPageId, int partId, long nextLink = write(pageId, rmvRow, bag, itemId, FAIL_L, statHolder); - assert nextLink != FAIL_L : pageId; // Can't fail here. + assert nextLink != FAIL_L; // Can't fail here. while (nextLink != 0L) { memMetrics.decrementLargeEntriesPages(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java index f49addab848aa..894c1aa64faca 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java @@ -22,10 +22,7 @@ import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.processors.cache.persistence.Storable; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; -import org.apache.ignite.internal.processors.cache.tree.DataRow; import org.apache.ignite.internal.stat.IoStatisticsHolder; -import org.apache.ignite.lang.IgniteClosure; -import org.apache.ignite.lang.IgniteInClosure; /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index e5bebf3feee80..7ab4dbe5345aa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -1822,8 +1822,8 @@ public final boolean removex(L row) throws IgniteCheckedException { } } - @Override public void invokeAll(List rows, Object z, InvokeAllClosure c) throws IgniteCheckedException { - // todo + /** {@inheritDoc} */ + @Override public void invokeAll(List keys, Object z, InvokeAllClosure c) throws IgniteCheckedException { throw new UnsupportedOperationException("Not implemented yet"); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index 9858f433c8437..7546f0569c3dc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -332,23 +332,28 @@ public CacheDataRowStore rowStore() { return rowStore; } - /** {@inheritDoc} */ - @Override public void invokeAll(List rows, Object z1, InvokeAllClosure c) throws IgniteCheckedException { + /** + * todo fake implementation only for checking that closure is working properly with preloader. + * @param keys Keys. + * @param x Implementation specific argument, {@code null} always means that we need a full detached data row. + * @param c Closure. + * @throws IgniteCheckedException If failed. + */ + @Override public void invokeAll(List keys, Object x, InvokeAllClosure c) throws IgniteCheckedException { checkDestroyed(); - int cnt = rows.size(); + int cnt = keys.size(); assert cnt > 0 : cnt; - // todo No algorithm this is draft implementation only for check that closure is working properly - CacheSearchRow lower = rows.get(0); - CacheSearchRow upper = rows.get(cnt - 1); + CacheSearchRow lower = keys.get(0); + CacheSearchRow upper = keys.get(cnt - 1); List> batch = new ArrayList<>(cnt); - Iterator rowItr = rows.iterator(); + Iterator rowItr = keys.iterator(); - assert lower.key().hashCode() <= upper.key().hashCode() : "lower=" + lower.key().hashCode() + ", upper=" + upper.key().hashCode(); + assert lower.key().hashCode() <= upper.key().hashCode() : "Keys must be lower=" + lower.key().hashCode() + ", upper=" + upper.key().hashCode(); GridCursor cur = find(lower, upper, CacheDataRowAdapter.RowData.FULL); @@ -393,10 +398,8 @@ public CacheDataRowStore rowStore() { while (rowItr.hasNext()) batch.add(new T2<>(null, rowItr.next())); - // todo call on insertion point c.call(batch); - // todo for (T3 t3 : c.result()) { OperationType oper = t3.get1(); CacheDataRow oldRow = t3.get2(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java index 6512649565235..d6e59af335283 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java @@ -718,12 +718,6 @@ void recheckLock() { return false; } - /** @inheritDoc */ - @Override public void finishPreload(@Nullable CacheObject val, long expTime, long ttl, GridCacheVersion ver, - AffinityTopologyVersion topVer, GridDrType drType, MvccVersion mvccVer, boolean preload) { - assert false; - } - /** @inheritDoc */ @Override public GridCacheVersionedEntryEx versionedEntry(final boolean keepBinary) throws IgniteCheckedException { return null;