diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java index fdd0faefa71c..b0f2e9c83df4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * Class for determining the "size" of a class, an attempt to calculate the * actual bytes that an object of this class will occupy in memory @@ -83,6 +84,18 @@ public class ClassSize { /** Overhead for ConcurrentSkipListMap Entry */ public static final int CONCURRENT_SKIPLISTMAP_ENTRY; + /** Overhead for CellArrayMap */ + public static final int CELL_ARRAY_MAP; + + /** Overhead for CellArrayMap */ + public static final int CELL_CHUNK_MAP; + + /** Overhead for Cell Array Entry */ + public static final int CELL_ARRAY_MAP_ENTRY; + + /** Overhead for CellChunkMap Entry */ + public static final int CELL_CHUNK_MAP_ENTRY; + /** Overhead for ReentrantReadWriteLock */ public static final int REENTRANT_LOCK; @@ -108,7 +121,7 @@ public class ClassSize { public static final int TIMERANGE_TRACKER; /** Overhead for CellSkipListSet */ - public static final int CELL_SKIPLIST_SET; + public static final int CELL_SET; public static final int STORE_SERVICES; @@ -174,10 +187,20 @@ public class ClassSize { // The size changes from jdk7 to jdk8, estimate the size rather than use a conditional CONCURRENT_SKIPLISTMAP = (int) estimateBase(ConcurrentSkipListMap.class, false); + CELL_ARRAY_MAP = align(2*OBJECT + Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + + 2*Bytes.SIZEOF_INT + REFERENCE); + + CELL_CHUNK_MAP = align(2*OBJECT + Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + + 4*Bytes.SIZEOF_INT + 2*REFERENCE); + CONCURRENT_SKIPLISTMAP_ENTRY = align( align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */ + CELL_ARRAY_MAP_ENTRY = align(OBJECT + 2*REFERENCE + 2*Bytes.SIZEOF_INT); + + CELL_CHUNK_MAP_ENTRY = align(3*Bytes.SIZEOF_INT); + REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE)); ATOMIC_LONG = align(OBJECT + Bytes.SIZEOF_LONG); @@ -194,7 +217,7 @@ public class ClassSize { TIMERANGE_TRACKER = align(ClassSize.OBJECT + Bytes.SIZEOF_LONG * 2); - CELL_SKIPLIST_SET = align(OBJECT + REFERENCE); + CELL_SET = align(OBJECT + REFERENCE); STORE_SERVICES = align(OBJECT + REFERENCE + ATOMIC_LONG); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 9f94ec0594b6..70b07fa214f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -65,14 +65,14 @@ public abstract class AbstractMemStore implements MemStore { public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + 2 * (ClassSize.ATOMIC_LONG + ClassSize.TIMERANGE_TRACKER + - ClassSize.CELL_SKIPLIST_SET + ClassSize.CONCURRENT_SKIPLISTMAP)); + ClassSize.CELL_SET + ClassSize.CONCURRENT_SKIPLISTMAP)); protected AbstractMemStore(final Configuration conf, final CellComparator c) { this.conf = conf; this.comparator = c; resetCellSet(); - this.snapshot = SegmentFactory.instance().createImmutableSegment(conf, c, 0); + this.snapshot = SegmentFactory.instance().createImmutableSegment(c, 0); this.snapshotId = NO_SNAPSHOT_ID; } @@ -206,7 +206,8 @@ public long heapSize() { */ @Override public List getScanners(long readPt) throws IOException { - return Collections. singletonList(new MemStoreScanner(this, readPt)); + return Collections. singletonList( + new MemStoreScanner(getComparator(), getListOfScanners(readPt))); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java new file mode 100644 index 000000000000..ccf1f0794fa7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java @@ -0,0 +1,53 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Cellersion 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY CellIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import java.util.Comparator; +import org.apache.hadoop.hbase.Cell; + +/** + * CellArrayMap is a simple array of Cells and can be allocated only using on-heap. + * In contrast, CellChunkMap can be also allocated off-heap. + * As all java arrays CellArrayMap's array of references pointing to Cell objects. + */ +public class CellArrayMap extends CellFlatMap { + + private final Cell[] block; + + /* The Cells Array is created only when CellArrayMap is created, all sub-CellBlocks use + * boundary indexes. The given Cell array must be ordered. */ + public CellArrayMap(Comparator comparator, Cell[] b, int min, int max, boolean d) { + super(comparator,min,max,d); + this.block = b; + } + + /* To be used by base class only to create a sub-CellFlatMap */ + @Override + protected CellFlatMap createSubCellFlatMap(Comparator comparator, int min, int max, + boolean d) { + return new CellArrayMap(comparator,this.block,min,max,d); + } + + @Override + protected Cell getCell(int i) { + if(i=maxCellIdx) return null; + return block[i]; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java new file mode 100644 index 000000000000..2880c4f16cf8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkMap.java @@ -0,0 +1,99 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Cellersion 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY CellIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import java.util.Comparator; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; + + + +/** + * CellChunkMap is a byte array holding all that is needed to access a Cell, which + * is actually saved on another deeper byte array. + * Per Cell we have a reference to this deeper byte array B, offset in bytes in B (integer), + * and length in bytes in B (integer). In order to save reference to byte array we use the Chunk's + * indexes given by MSLAB (also integer). + * + * The CellChunkMap memory layout relevant to a deeper byte array B: + * + * <----------------- first Cell ---------------------> <-------------- second Cell --- ... + * ------------------------------------------------------------------------------------- ... + * | integer = x bytes | integer = x bytes | integer = x bytes | integer = x bytes | + * | reference to B | offset in B where | length of Cell's | reference to may be| ... + * | holding Cell data | Cell's data starts| data in B | another byte array | + * ------------------------------------------------------------------------------------- ... + */ +public class CellChunkMap extends CellFlatMap { + // TODO: once Chunk class is out of HeapMemStoreLAB class we are going to use MemStoreLAB and + // not HeapMemStoreLAB + private final HeapMemStoreLAB.Chunk[] chunks; + private final HeapMemStoreLAB memStoreLAB; + private final int numOfCellsInsideChunk; + public static final int BYTES_IN_CELL = 3*(Integer.SIZE / Byte.SIZE); // each Cell requires 3 integers + + /* C-tor for increasing map starting from index zero */ + /* The given Cell array on given Chunk array must be ordered. */ + public CellChunkMap(Comparator comparator, HeapMemStoreLAB memStoreLAB, + HeapMemStoreLAB.Chunk[] chunks, int max, int chunkSize) { + super(comparator,0,max,false); + this.chunks = chunks; + this.memStoreLAB = memStoreLAB; + this.numOfCellsInsideChunk = chunkSize / BYTES_IN_CELL; + } + + /* The given Cell array on given Chunk array must be ordered. */ + public CellChunkMap(Comparator comparator, HeapMemStoreLAB memStoreLAB, + HeapMemStoreLAB.Chunk[] chunks, int min, int max, int chunkSize, boolean d) { + super(comparator,min,max, d); + this.chunks = chunks; + this.memStoreLAB = memStoreLAB; + this.numOfCellsInsideChunk = chunkSize / BYTES_IN_CELL; + } + + /* To be used by base class only to create a sub-CellFlatMap */ + @Override + protected CellFlatMap createSubCellFlatMap(Comparator comparator, int min, int max, + boolean d) { + return new CellChunkMap(comparator, this.memStoreLAB, this.chunks, min, max, + this.numOfCellsInsideChunk* BYTES_IN_CELL, d); + } + + @Override + protected Cell getCell(int i) { + if(i=maxCellIdx) return null; + + // find correct chunk + int chunkIndex = (i / numOfCellsInsideChunk); + byte[] block = chunks[chunkIndex].getData(); + i = i - chunkIndex*numOfCellsInsideChunk; + + // find inside chunk + int offsetInBytes = i* BYTES_IN_CELL; + int chunkId = Bytes.toInt(block,offsetInBytes); + int offsetOfCell = Bytes.toInt(block,offsetInBytes+(Integer.SIZE / Byte.SIZE)); + int lengthOfCell = Bytes.toInt(block,offsetInBytes+2*(Integer.SIZE / Byte.SIZE)); + byte[] chunk = memStoreLAB.translateIdToChunk(chunkId).getData(); + + Cell result = new KeyValue(chunk, offsetOfCell, lengthOfCell); + return result; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java new file mode 100644 index 000000000000..a3b589d431e3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java @@ -0,0 +1,463 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Cellersion 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY CellIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.Cell; + +import java.util.Collection; +import java.util.Comparator; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.Iterator; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; + + + +/** + * CellFlatMap stores a constant number of elements and is immutable after creation stage. + * Due to being immutable the CellFlatMap can be implemented as array. + * The actual array can be on- or off-heap and is implemented in concrete class derived from CellFlatMap. + * The CellFlatMap uses no synchronization primitives, it is assumed to be created by a + * single thread and then it can be read-only by multiple threads. + * + * The "flat" in the name, means that the memory layout of the Map is sequential array and thus + * requires less memory than ConcurrentSkipListMap. + */ +public abstract class CellFlatMap implements ConcurrentNavigableMap { + + private final Comparator comparator; + protected int minCellIdx = 0; // the index of the minimal cell (for sub-sets) + protected int maxCellIdx = 0; // the index of the maximal cell (for sub-sets) + private boolean descending = false; + + /* C-tor */ + public CellFlatMap(Comparator comparator, int min, int max, boolean d){ + this.comparator = comparator; + this.minCellIdx = min; + this.maxCellIdx = max; + this.descending = d; + } + + /* Used for abstract CellFlatMap creation, implemented by derived class */ + protected abstract CellFlatMap createSubCellFlatMap(Comparator comparator, int min, + int max, boolean descending); + + /* Returns the i-th cell in the cell block */ + protected abstract Cell getCell(int i); + + /** + * Binary search for a given key in between given boundaries of the array. + * Positive returned numbers mean the index. + * Negative returned numbers means the key not found. + * The absolute value of the output is the + * possible insert index for the searched key: (-1 * insertion point) + * @param needle The key to look for in all of the entries + * @return Same return value as Arrays.binarySearch. + */ + private int find(Cell needle) { + int begin = minCellIdx; + int end = maxCellIdx - 1; + + while (begin <= end) { + int mid = begin + ((end - begin) / 2); + Cell midCell = getCell(mid); + int compareRes = comparator.compare(midCell, needle); + + if (compareRes == 0) { + return mid; // 0 means equals. We found the key + } else if (compareRes < 0) { + // midCell is less than needle so we need to look at farther up + begin = mid + 1; + } else { + // midCell is greater than needle so we need to look down + end = mid - 1; + } + } + + return (-1 * begin); + } + + /* Get the index of the key taking into consideration whether + ** the key should be inclusive or exclusive */ + private int getValidIndex(Cell key, boolean inclusive) { + int index = find(key); + if (inclusive && index >= 0) { + index = (descending) ? index - 1 : index + 1; + } + return Math.abs(index); + } + + @Override + public Comparator comparator() { + return comparator; + } + + @Override + public int size() { + return maxCellIdx-minCellIdx; + } + + @Override + public boolean isEmpty() { + return (size()==0); + } + + + // ---------------- Sub-Maps ---------------- + @Override + public ConcurrentNavigableMap subMap( Cell fromKey, + boolean fromInclusive, + Cell toKey, + boolean toInclusive) { + int toIndex = getValidIndex(toKey, toInclusive); + int fromIndex = (getValidIndex(fromKey, !fromInclusive)); + + if (fromIndex > toIndex) { + throw new IllegalArgumentException("inconsistent range"); + } + return createSubCellFlatMap(comparator, fromIndex, toIndex, descending); + } + + @Override + public ConcurrentNavigableMap headMap(Cell toKey, boolean inclusive) { + int index = getValidIndex(toKey, inclusive); + return createSubCellFlatMap(comparator, minCellIdx, index, descending); + } + + @Override + public ConcurrentNavigableMap tailMap(Cell fromKey, boolean inclusive) { + int index = (getValidIndex(fromKey, !inclusive)); + return createSubCellFlatMap(comparator, index, maxCellIdx, descending); + } + + @Override + public ConcurrentNavigableMap descendingMap() { + return createSubCellFlatMap(comparator, minCellIdx, maxCellIdx, true); + } + + @Override + public ConcurrentNavigableMap subMap(Cell k1, Cell k2) { + return this.subMap(k1, true, k2, true); + } + + @Override + public ConcurrentNavigableMap headMap(Cell k) { + return this.headMap(k, true); + } + + @Override + public ConcurrentNavigableMap tailMap(Cell k) { + return this.tailMap(k, true); + } + + + // -------------------------------- Key's getters -------------------------------- + @Override + public Cell firstKey() { + if (isEmpty()) { + return null; + } + return descending ? getCell(maxCellIdx - 1) : getCell(minCellIdx); + } + + @Override + public Cell lastKey() { + if (isEmpty()) { + return null; + } + return descending ? getCell(minCellIdx) : getCell(maxCellIdx - 1); + } + + @Override + public Cell lowerKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + // If index>=0 there's a key exactly equal + index = (index>=0) ? index-1 : -(index); + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public Cell floorKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + index = (index>=0) ? index : -(index); + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public Cell ceilingKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + index = (index>=0) ? index : -(index)+1; + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public Cell higherKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + index = (index>=0) ? index+1 : -(index)+1; + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public boolean containsKey(Object o) { + int index = find((Cell) o); + return (index >= 0); + } + + @Override + public boolean containsValue(Object o) { // use containsKey(Object o) instead + throw new UnsupportedOperationException(); + } + + @Override + public Cell get(Object o) { + int index = find((Cell) o); + return (index >= 0) ? getCell(index) : null; + } + + // -------------------------------- Entry's getters -------------------------------- + // all interfaces returning Entries are unsupported because we are dealing only with the keys + @Override + public Entry lowerEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry higherEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry ceilingEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry floorEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry firstEntry() { + throw new UnsupportedOperationException(); + } + + @Override + public Entry lastEntry() { + throw new UnsupportedOperationException(); + } + + @Override + public Entry pollFirstEntry() { + throw new UnsupportedOperationException(); + } + + @Override + public Entry pollLastEntry() { + throw new UnsupportedOperationException(); + } + + + // -------------------------------- Updates -------------------------------- + // All updating methods below are unsupported. + // Assuming an array of Cells will be allocated externally, + // fill up with Cells and provided in construction time. + // Later the structure is immutable. + @Override + public Cell put(Cell k, Cell v) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public Cell remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean replace(Cell k, Cell v, Cell v1) { + throw new UnsupportedOperationException(); + } + + @Override + public void putAll(Map map) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell putIfAbsent(Cell k, Cell v) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o, Object o1) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell replace(Cell k, Cell v) { + throw new UnsupportedOperationException(); + } + + + // -------------------------------- Sub-Sets -------------------------------- + @Override + public NavigableSet navigableKeySet() { + throw new UnsupportedOperationException(); + } + + @Override + public NavigableSet descendingKeySet() { + throw new UnsupportedOperationException(); + } + + @Override + public NavigableSet keySet() { + throw new UnsupportedOperationException(); + } + + @Override + public Collection values() { + return new CellFlatMapCollection(); + } + + @Override + public Set> entrySet() { + throw new UnsupportedOperationException(); + } + + + // -------------------------------- Iterator K -------------------------------- + private final class CellFlatMapIterator implements Iterator { + int index; + + private CellFlatMapIterator() { + index = descending ? maxCellIdx-1 : minCellIdx; + } + + @Override + public boolean hasNext() { + return descending ? (index >= minCellIdx) : (index < maxCellIdx); + } + + @Override + public Cell next() { + Cell result = getCell(index); + if (descending) { + index--; + } else { + index++; + } + return result; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + // -------------------------------- Collection -------------------------------- + private final class CellFlatMapCollection implements Collection { + + @Override + public int size() { + return CellFlatMap.this.size(); + } + + @Override + public boolean isEmpty() { + return CellFlatMap.this.isEmpty(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean contains(Object o) { + return containsKey(o); + } + + @Override + public Iterator iterator() { + return new CellFlatMapIterator(); + } + + @Override + public Object[] toArray() { + throw new UnsupportedOperationException(); + } + + @Override + public T[] toArray(T[] ts) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean add(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 709f5027db71..04a4f91ba52b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -32,10 +30,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -59,8 +54,16 @@ public class CompactingMemStore extends AbstractMemStore { public final static long DEEP_OVERHEAD_PER_PIPELINE_ITEM = ClassSize.align( ClassSize.TIMERANGE_TRACKER + - ClassSize.CELL_SKIPLIST_SET + ClassSize.CONCURRENT_SKIPLISTMAP); + ClassSize.CELL_SET + ClassSize.CONCURRENT_SKIPLISTMAP); + public final static long DEEP_OVERHEAD_PER_PIPELINE_FLAT_ARRAY_ITEM = ClassSize.align( + ClassSize.TIMERANGE_TRACKER + + ClassSize.CELL_SET + ClassSize.CELL_ARRAY_MAP); public final static double IN_MEMORY_FLUSH_THRESHOLD_FACTOR = 0.9; + public final static double COMPACTION_TRIGGER_REMAIN_FACTOR = 1; + public final static boolean COMPACTION_PRE_CHECK = false; + + static final String COMPACTING_MEMSTORE_TYPE_KEY = "hbase.hregion.compacting.memstore.type"; + static final int COMPACTING_MEMSTORE_TYPE_DEFAULT = 1; private static final Log LOG = LogFactory.getLog(CompactingMemStore.class); private HStore store; @@ -73,6 +76,17 @@ public class CompactingMemStore extends AbstractMemStore { // A flag for tests only private final AtomicBoolean allowCompaction = new AtomicBoolean(true); + /** + * Types of CompactingMemStore + */ + public enum Type { + COMPACT_TO_SKIPLIST_MAP, + COMPACT_TO_ARRAY_MAP, + COMPACT_TO_CHUNK_MAP; + } + + private Type type = Type.COMPACT_TO_SKIPLIST_MAP; + public CompactingMemStore(Configuration conf, CellComparator c, HStore store, RegionServicesForStores regionServices) throws IOException { super(conf, c); @@ -81,6 +95,31 @@ public CompactingMemStore(Configuration conf, CellComparator c, this.pipeline = new CompactionPipeline(getRegionServices()); this.compactor = new MemStoreCompactor(); initFlushSizeLowerBound(conf); + int t = conf.getInt(COMPACTING_MEMSTORE_TYPE_KEY, COMPACTING_MEMSTORE_TYPE_DEFAULT); + switch (t) { + case 1: type = Type.COMPACT_TO_SKIPLIST_MAP; + LOG.info("Creating CompactingMemStore that is going to compact to SkipList data structure:" + + " region " + getRegionServices().getRegionInfo() + .getRegionNameAsString() + " and store: "+ getFamilyName()); + break; + case 2: type = Type.COMPACT_TO_ARRAY_MAP; + LOG.info("Creating CompactingMemStore that is going to compact to CellArray data structure:" + + " region " + getRegionServices().getRegionInfo() + .getRegionNameAsString() + " and store: "+ getFamilyName()); + break; + case 3: type = Type.COMPACT_TO_CHUNK_MAP; + LOG.info("Creating CompactingMemStore that is going to compact to ChunkMap data structure:" + + " region " + getRegionServices().getRegionInfo() + .getRegionNameAsString() + " and store: "+ getFamilyName()); + break; + } + } + + // C-tor for testing + public CompactingMemStore(Configuration conf, CellComparator c, + HStore store, RegionServicesForStores regionServices, Type type) throws IOException { + this(conf,c,store,regionServices); + this.type = type; } private void initFlushSizeLowerBound(Configuration conf) { @@ -332,6 +371,7 @@ private void pushActiveToPipeline(MutableSegment active) { private void pushTailToSnapshot() { ImmutableSegment tail = pipeline.pullTail(); + if (!tail.isEmpty()) { setSnapshot(tail); long size = getSegmentSize(tail); @@ -364,9 +404,12 @@ public void process() throws IOException { } } - /** - * The ongoing MemStore Compaction manager, dispatches a solo running compaction - * and interrupts the compaction if requested. + /** ---------------------------------------------------------------------- + * The ongoing MemStore Compaction manager, dispatches a solo running compaction and interrupts + * the compaction if requested. The compaction is interrupted and stopped by CompactingMemStore, + * for example when another compaction needs to be started. + * Prior to compaction the MemStoreCompactor evaluates + * the compacting ratio and aborts the compaction if it is not worthy. * The MemStoreScanner is used to traverse the compaction pipeline. The MemStoreScanner * is included in internal store scanner, where all compaction logic is implemented. * Threads safety: It is assumed that the compaction pipeline is immutable, @@ -374,47 +417,26 @@ public void process() throws IOException { */ private class MemStoreCompactor { - private MemStoreScanner scanner; // scanner for pipeline only - // scanner on top of MemStoreScanner that uses ScanQueryMatcher - private StoreScanner compactingScanner; - - // smallest read point for any ongoing MemStore scan - private long smallestReadPoint; - - // a static version of the segment list from the pipeline + // a snapshot of the compaction pipeline segment list private VersionedSegmentsList versionedList; + // a flag raised when compaction is requested to stop private final AtomicBoolean isInterrupted = new AtomicBoolean(false); + // the limit to the size of the groups to be later provided to MemStoreCompactorIterator + private final int compactionKVMax = getConfiguration().getInt( + HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); - /** - * ---------------------------------------------------------------------- + /** ---------------------------------------------------------------------- * The request to dispatch the compaction asynchronous task. * The method returns true if compaction was successfully dispatched, or false if there - * * is already an ongoing compaction (or pipeline is empty). */ public boolean startCompact() throws IOException { if (pipeline.isEmpty()) return false; // no compaction on empty pipeline - - List scanners = new ArrayList(); - // get the list of segments from the pipeline + // get a snapshot of the list of the segments from the pipeline, + // this local copy of the list is marked with specific version versionedList = pipeline.getVersionedList(); - // the list is marked with specific version - - // create the list of scanners with maximally possible read point, meaning that - // all KVs are going to be returned by the pipeline traversing - for (Segment segment : versionedList.getStoreSegments()) { - scanners.add(segment.getSegmentScanner(Long.MAX_VALUE)); - } - scanner = - new MemStoreScanner(CompactingMemStore.this, scanners, Long.MAX_VALUE, - MemStoreScanner.Type.COMPACT_FORWARD); - - smallestReadPoint = store.getSmallestReadPoint(); - compactingScanner = createScanner(store); - - LOG.info("Starting the MemStore in-memory compaction for store " + - store.getColumnFamilyName()); - + LOG.info( + "Starting the MemStore in-memory compaction for store: " + store.getColumnFamilyName()); doCompact(); return true; } @@ -435,10 +457,6 @@ public void stopCompact() { */ private void releaseResources() { isInterrupted.set(false); - scanner.close(); - scanner = null; - compactingScanner.close(); - compactingScanner = null; versionedList = null; } @@ -448,17 +466,28 @@ private void releaseResources() { * There is at most one thread per memstore instance. */ private void doCompact() { - - ImmutableSegment result = SegmentFactory.instance() // create the scanner - .createImmutableSegment(getConfiguration(), getComparator(), - CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM); - - // the compaction processing + int cellsAfterComp = versionedList.getNumOfCells(); try { - // Phase I: create the compacted MutableCellSetSegment - compactSegments(result); - - // Phase II: swap the old compaction pipeline + // Phase I (optional): estimate the compaction expedience - EVALUATE COMPACTION + if (COMPACTION_PRE_CHECK) { + cellsAfterComp = countCellsForCompaction(); + + if (!isInterrupted.get() && (cellsAfterComp + > COMPACTION_TRIGGER_REMAIN_FACTOR * versionedList.getNumOfCells())) { + // too much cells "survive" the possible compaction we do not want to compact! + LOG.debug("In-Memory compaction does not pay off - storing the flattened segment" + + " for store: " + getFamilyName()); + // Looking for Segment in the pipeline with SkipList index, to make it flat + pipeline.flattenYoungestSegment(versionedList.getVersion()); + return; + } + } + // Phase II: create the new compacted ImmutableSegment - START COMPACTION + ImmutableSegment result = null; + if (!isInterrupted.get()) { + result = compact(cellsAfterComp); + } + // Phase III: swap the old compaction pipeline - END COMPACTION if (!isInterrupted.get()) { pipeline.swap(versionedList, result); // update the wal so it can be truncated and not get too long @@ -467,64 +496,78 @@ private void doCompact() { } catch (Exception e) { LOG.debug("Interrupting the MemStore in-memory compaction for store " + getFamilyName()); Thread.currentThread().interrupt(); - return; } finally { releaseResources(); inMemoryFlushInProgress.set(false); } - } - /** - * Creates the scanner for compacting the pipeline. - * - * @return the scanner + /**---------------------------------------------------------------------- + * The compaction is the creation of the relevant ImmutableSegment based on + * the Compactor Itertor */ - private StoreScanner createScanner(Store store) throws IOException { + private ImmutableSegment compact(int numOfCells) + throws IOException { - Scan scan = new Scan(); - scan.setMaxVersions(); //Get all available versions + LOG.info( + "Starting in-memory compaction of type: " + type + ". Before compaction we have " + + numOfCells + " cells in the entire compaction pipeline"); - StoreScanner internalScanner = - new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(scanner), - ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, HConstants.OLDEST_TIMESTAMP); + ImmutableSegment result = null; + MemStoreCompactorIterator iterator = + new MemStoreCompactorIterator(versionedList.getStoreSegments(), getComparator(), + compactionKVMax, store); + try { + switch (type) { + case COMPACT_TO_SKIPLIST_MAP: + result = SegmentFactory.instance() + .createImmutableSegment(getConfiguration(), getComparator(), iterator); + break; + case COMPACT_TO_ARRAY_MAP: + result = SegmentFactory.instance() + .createImmutableSegment( + getConfiguration(), getComparator(), iterator, numOfCells, + ImmutableSegment.Type.ARRAY_MAP_BASED); + break; + case COMPACT_TO_CHUNK_MAP: +// org.junit.Assert.assertTrue("\n<<<< Compacting to CellChunkMap set for " + numOfCells +// + " cells. \n", false); + + result = SegmentFactory.instance() + .createImmutableSegment( + getConfiguration(), getComparator(), iterator, numOfCells, + ImmutableSegment.Type.CHUNK_MAP_BASED); + break; + default: throw new RuntimeException("Unknown type " + type); // sanity check + } + } finally { + iterator.close(); + } - return internalScanner; + return result; } - /** - * Updates the given single Segment using the internal store scanner, - * who in turn uses ScanQueryMatcher + /**---------------------------------------------------------------------- + * COUNT CELLS TO ESTIMATE THE EFFICIENCY OF THE FUTURE COMPACTION */ - private void compactSegments(Segment result) throws IOException { - - List kvs = new ArrayList(); - // get the limit to the size of the groups to be returned by compactingScanner - int compactionKVMax = getConfiguration().getInt( - HConstants.COMPACTION_KV_MAX, - HConstants.COMPACTION_KV_MAX_DEFAULT); - - ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - - boolean hasMore; - do { - hasMore = compactingScanner.next(kvs, scannerContext); - if (!kvs.isEmpty()) { - for (Cell c : kvs) { - // The scanner is doing all the elimination logic - // now we just copy it to the new segment - KeyValue kv = KeyValueUtil.ensureKeyValue(c); - Cell newKV = result.maybeCloneWithAllocator(kv); - result.internalAdd(newKV); + private int countCellsForCompaction() throws IOException { - } - kvs.clear(); + int cnt = 0; + MemStoreCompactorIterator iterator = + new MemStoreCompactorIterator(versionedList.getStoreSegments(),getComparator(), + compactionKVMax, store); + + try { + while (iterator.next() != null) { + cnt++; } - } while (hasMore && (!isInterrupted.get())); - } + } finally { + iterator.close(); + } - } + return cnt; + } + } // end of the MemStoreCompactor Class //---------------------------------------------------------------------- //methods for tests @@ -559,10 +602,10 @@ Cell getNextRow(final Cell cell) { return lowest; } - // debug method - private void debug() { + // debug method, also for testing + public void debug() { String msg = "active size="+getActive().getSize(); - msg += " threshold="+IN_MEMORY_FLUSH_THRESHOLD_FACTOR*flushSizeLowerBound; + msg += " threshold="+ IN_MEMORY_FLUSH_THRESHOLD_FACTOR *flushSizeLowerBound; msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false"); msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false"); LOG.debug(msg); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java index 165406528f64..ede973ecfdd2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -98,7 +98,7 @@ public boolean swap(VersionedSegmentsList versionedList, ImmutableSegment segmen +"Just before the swap the number of segments in pipeline is:" +versionedList.getStoreSegments().size() +", and the number of cells in new segment is:"+segment.getCellsCount()); - swapSuffix(suffix,segment); + swapSuffix(suffix, segment); } if(region != null) { // update the global memstore size counter @@ -112,6 +112,40 @@ public boolean swap(VersionedSegmentsList versionedList, ImmutableSegment segmen return true; } + /** + * If the caller holds the current version, go over the the pipeline and try to flatten each + * segment. Flattening is replacing the ConcurrentSkipListMap based CellSet to CellArrayMAp based. + * Flattening of the segment that initially is not based on ConcurrentSkipListMap has no effect. + * Return after one segment was successfully flatten. + * + * @return true iff a segment was successfully flattened + */ + public boolean flattenYoungestSegment(long requesterVersion) { + + if(requesterVersion != version) { + LOG.info("Segment flattening failed, because versions do not match"); + return false; + } + + synchronized (pipeline){ + if(requesterVersion != version) { + LOG.info("Segment flattening failed, because versions do not match"); + return false; + } + + for (ImmutableSegment s : pipeline) { + if (s.flatten()) { + LOG.info("Compaction pipeline segment " + s + " was flattened."); + return true; + } + } + + } + // do not update the global memstore size counter and do not increase the version, + // because all the cells remain in place + return false; + } + public boolean isEmpty() { return pipeline.isEmpty(); } @@ -165,7 +199,6 @@ private boolean validateSuffixList(LinkedList suffix) { // empty suffix is always valid return true; } - Iterator pipelineBackwardIterator = pipeline.descendingIterator(); Iterator suffixBackwardIterator = suffix.descendingIterator(); ImmutableSegment suffixCurrent; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemStoreLAB.java index f22a6e5c1ac8..6ca0f372c0e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemStoreLAB.java @@ -27,10 +27,12 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.ByteRange; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; import com.google.common.base.Preconditions; + /** * A memstore-local allocation buffer. *

@@ -88,9 +90,8 @@ public HeapMemStoreLAB(Configuration conf) { this.chunkPool = MemStoreChunkPool.getPool(conf); // if we don't exclude allocations >CHUNK_SIZE, we'd infiniteloop on one! - Preconditions.checkArgument( - maxAlloc <= chunkSize, - MAX_ALLOC_KEY + " must be less than " + CHUNK_SIZE_KEY); + Preconditions.checkArgument(maxAlloc <= chunkSize, + MAX_ALLOC_KEY + " must be less than " + CHUNK_SIZE_KEY); } /** @@ -102,11 +103,16 @@ public HeapMemStoreLAB(Configuration conf) { @Override public ByteRange allocateBytes(int size) { Preconditions.checkArgument(size >= 0, "negative size"); + return allocateBytesWithID(size).getFirst(); + } + + public Pair allocateBytesWithID(int size) { + Preconditions.checkArgument(size >= 0, "negative size"); // Callers should satisfy large allocations directly from JVM since they // don't cause fragmentation as badly. if (size > maxAlloc) { - return null; + return new Pair<>(null,0); } while (true) { @@ -117,7 +123,7 @@ public ByteRange allocateBytes(int size) { if (allocOffset != -1) { // We succeeded - this is the common case - small alloc // from a big buffer - return new SimpleMutableByteRange(c.data, allocOffset, size); + return new Pair<>(new SimpleMutableByteRange(c.data, allocOffset, size),c.getId()); } // not enough space! @@ -181,6 +187,7 @@ private void tryRetireChunk(Chunk c) { * allocate a new one from the JVM. */ private Chunk getOrMakeChunk() { + while (true) { // Try to get the chunk Chunk c = curChunk.get(); @@ -191,11 +198,18 @@ private Chunk getOrMakeChunk() { // No current chunk, so we want to allocate one. We race // against other allocators to CAS in an uninitialized chunk // (which is cheap to allocate) - c = (chunkPool != null) ? chunkPool.getChunk() : new Chunk(chunkSize); + if(chunkPool != null) { + c = chunkPool.getChunk(); + } else { + // HBASE-14921: 555 is here till it is decided whether ChunkPool is always on + c = new Chunk(chunkSize, 555); + c.init(); + } + if (curChunk.compareAndSet(null, c)) { // we won race - now we need to actually do the expensive // allocation step - c.init(); + this.chunkQueue.add(c); return c; } else if (chunkPool != null) { @@ -206,6 +220,32 @@ private Chunk getOrMakeChunk() { } } + /** + * Given a chunk ID return reference to the relevant chunk + * @return a chunk + */ + public Chunk translateIdToChunk(int id) { + return chunkPool.translateIdToChunk(id); + } + + /** + * Give the ID of the Chunk from where last allocation took the bytes + * @return a chunk + */ + public int getCurrentChunkId() { + return curChunk.get().getId(); + } + + /** + * Use instead of allocateBytes() when new full chunk is needed + * @return a chunk + */ + public Chunk allocateChunk() { + Chunk c = chunkPool.getChunk(); + this.chunkQueue.add(c); + return c; + } + /** * A chunk of memory out of which allocations are sliced. */ @@ -227,12 +267,18 @@ static class Chunk { /** Size of chunk in bytes */ private final int size; + /* A unique identifier of a chunk inside MemStoreChunkPool */ + private final int id; + + /* Chunk's index serves as replacement for pointer */ + /** * Create an uninitialized chunk. Note that memory is not allocated yet, so * this is cheap. * @param size in bytes */ - Chunk(int size) { + Chunk(int size, int id) { + this.id = id; this.size = size; } @@ -252,13 +298,13 @@ public void init() { assert failInit; // should be true. throw e; } + // Mark that it's ready for use boolean initted = nextFreeOffset.compareAndSet( UNINITIALIZED, 0); // We should always succeed the above CAS since only one thread // calls init()! - Preconditions.checkState(initted, - "Multiple threads tried to init same chunk"); + Preconditions.checkState(initted, "Multiple threads tried to init same chunk"); } /** @@ -311,5 +357,13 @@ public String toString() { " allocs=" + allocCount.get() + "waste=" + (data.length - nextFreeOffset.get()); } + + public int getId() { + return id; + } + + public byte[] getData() { + return data; + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java index 077c27095877..086b7d716338 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java @@ -18,8 +18,21 @@ */ package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CollectionBackedScanner; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.hadoop.hbase.util.ByteRange; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; /** * ImmutableSegment is an abstract class that extends the API supported by a {@link Segment}, @@ -30,10 +43,84 @@ @InterfaceAudience.Private public class ImmutableSegment extends Segment { + /** + * Types of ImmutableSegment + */ + public enum Type { + SKIPLIST_MAP_BASED, + ARRAY_MAP_BASED, + CHUNK_MAP_BASED + } + + private Type type = Type.SKIPLIST_MAP_BASED; + + // whether it is based on CellFlatMap or ConcurrentSkipListMap + private boolean isFlat(){ + return (type == Type.ARRAY_MAP_BASED) || (type == Type.CHUNK_MAP_BASED); + } + + ///////////////////// CONSTRUCTORS ///////////////////// + /**------------------------------------------------------------------------ + * Copy C-tor to be used when new ImmutableSegment is being built from a Mutable one. + * This C-tor should be used when active MutableSegment is pushed into the compaction + * pipeline and becomes an ImmutableSegment. + */ protected ImmutableSegment(Segment segment) { super(segment); + type = Type.SKIPLIST_MAP_BASED; } + /**------------------------------------------------------------------------ + * C-tor to be used when new ImmutableSegment is a result of compaction of a list + * of older ImmutableSegments. + * The given iterator returns the Cells that "survived" the compaction. + * According to the boolean parameter "array" the new ImmutableSegment is built based on + * CellArrayMap or CellChunkMap. + */ + protected ImmutableSegment( + final Configuration conf, CellComparator comparator, MemStoreCompactorIterator iterator, + MemStoreLAB memStoreLAB, int numOfCells, Type type) { + + super(null, comparator, memStoreLAB, + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_FLAT_ARRAY_ITEM, + (type == Type.ARRAY_MAP_BASED) ? + ClassSize.CELL_ARRAY_MAP_ENTRY : ClassSize.CELL_CHUNK_MAP_ENTRY); + + CellSet cs = null; // build the CellSet Cell array or Byte array based + if (type == Type.ARRAY_MAP_BASED) { + cs = createCellArrayMapSet(numOfCells, iterator); + } else { +// org.junit.Assert.assertTrue("\n<<<< Creating CellChunkMap set for " + numOfCells +// + " cells. \n", false); + cs = createCellChunkMapSet(numOfCells, iterator, conf); + } + this.setCellSet(null, cs); // update the CellSet of the new Segment + this.type = type; + } + + /**------------------------------------------------------------------------ + * C-tor to be used when new SKIP-LIST BASED ImmutableSegment is a result of compaction of a + * list of older ImmutableSegments. + * The given iterator returns the Cells that "survived" the compaction. + */ + protected ImmutableSegment( + CellComparator comparator, MemStoreCompactorIterator iterator, MemStoreLAB memStoreLAB) { + + super(new CellSet(comparator), comparator, memStoreLAB, + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM, ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY); + + while (iterator.hasNext()) { + Cell c = iterator.next(); + // The scanner is doing all the elimination logic + // now we just copy it to the new segment + KeyValue kv = KeyValueUtil.ensureKeyValue(c); + Cell newKV = maybeCloneWithAllocator(kv); + internalAdd(newKV); + } + type = Type.SKIPLIST_MAP_BASED; + } + + ///////////////////// PUBLIC METHODS ///////////////////// /** * Builds a special scanner for the MemStoreSnapshot object that is different than the * general segment scanner. @@ -43,4 +130,148 @@ public KeyValueScanner getKeyValueScanner() { return new CollectionBackedScanner(getCellSet(), getComparator()); } + /**------------------------------------------------------------------------ + * Change the CellSet of this ImmutableSegment from one based on ConcurrentSkipListMap to one + * based on CellArrayMap. + * If this ImmutableSegment is not based on ConcurrentSkipListMap , this is NOP + * For now the change from ConcurrentSkipListMap to CellChunkMap is not supported, because + * this requires the Cell to know on which Chunk it is placed. + * + * Synchronization of the CellSet replacement: + * The reference to the CellSet is AtomicReference and is updated only when ImmutableSegment + * is constructed (single thread) or flattened. The flattening happens as part of a single + * thread of compaction, but to be on the safe side the initial CellSet is locally saved + * before the flattening and then replaced using CAS instruction. + */ + public boolean flatten() { + if (isFlat()) return false; + CellSet oldCellSet = getCellSet(); + int numOfCells = getCellsCount(); + + // each Cell is now represented in either in CellArrayMap or in CellChunkMap + constantCellMetaDataSize = ClassSize.CELL_ARRAY_MAP_ENTRY; + + // arrange the meta-data size, decrease all meta-data sizes related to SkipList + incSize( + -(ClassSize.CONCURRENT_SKIPLISTMAP + numOfCells * ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY)); + // add size of CellArrayMap and meta-data overhead per Cell + incSize(ClassSize.CELL_CHUNK_MAP + numOfCells * constantCellMetaDataSize); + + CellSet newCellSet = recreateCellArrayMapSet(numOfCells); // build the CellSet CellArrayMap based + setCellSet(oldCellSet,newCellSet); + return true; + } + + ///////////////////// PRIVATE METHODS ///////////////////// + /*------------------------------------------------------------------------*/ + // Create CellSet based on CellArrayMap from compacting iterator + private CellSet createCellArrayMapSet(int numOfCells, MemStoreCompactorIterator iterator) { + + Cell[] cells = new Cell[numOfCells]; // build the Cell Array + int i = 0; + while (iterator.hasNext()) { + Cell c = iterator.next(); + // The scanner behind the iterator is doing all the elimination logic + // now we just copy it to the new segment (also MSLAB copy) + KeyValue kv = KeyValueUtil.ensureKeyValue(c); + cells[i++] = maybeCloneWithAllocator(kv); + // last parameter false, because in compaction count both Heap (Data) and MetaData size + updateMetaInfo(c,true); + } + // build the immutable CellSet + CellArrayMap cam = new CellArrayMap(getComparator(),cells,0,i,false); + return new CellSet(cam); + } + + /*------------------------------------------------------------------------*/ + // Create CellSet based on CellArrayMap from current ConcurrentSkipListMap based CellSet + private CellSet recreateCellArrayMapSet(int numOfCells) { + + Cell[] cells = new Cell[numOfCells]; // build the Cell Array + Cell curCell; + int idx = 0; + // create this segment scanner with maximal possible read point, to go over all Cells + SegmentScanner segmentScanner = this.getSegmentScanner(Long.MAX_VALUE); + + try { + while ((curCell = segmentScanner.next()) != null) { + cells[idx++] = curCell; + } + } catch (IOException ie) { + throw new IllegalStateException(ie); + } + // build the immutable CellSet + CellArrayMap cam = new CellArrayMap(getComparator(),cells,0,idx,false); + return new CellSet(cam); + } + + /*------------------------------------------------------------------------*/ + // Create CellSet based on CellChunkMap from compacting iterator + // we do not consider cells bigger than chunks + private CellSet createCellChunkMapSet( + int numOfCells, MemStoreCompactorIterator iterator, final Configuration conf) { + + // calculate how many chunks we will need for metadata + int chunkSize = conf.getInt(HeapMemStoreLAB.CHUNK_SIZE_KEY, HeapMemStoreLAB.CHUNK_SIZE_DEFAULT); + int numOfCellsInChunk = chunkSize / CellChunkMap.BYTES_IN_CELL; + int numberOfChunks = numOfCells/numOfCellsInChunk; + + // all Chunks (for metadata and for data) are allocated from the current segment's MSLAB + // TODO: when Chunk is going to be out of HeapMemStoreLAB we can use MemStoreLAB here + // and not HeapMemStoreLAB + HeapMemStoreLAB ms = (HeapMemStoreLAB)getMemStoreLAB(); + HeapMemStoreLAB.Chunk[] chunks = new HeapMemStoreLAB.Chunk[numberOfChunks]; // metadata chunks + int currentChunkIdx = 0; + chunks[currentChunkIdx] = ms.allocateChunk(); + int offsetInCurentChunk = 0; + + org.junit.Assert.assertTrue("\n<<<< Creating CellChunkMap set for " + numOfCells + + " cells. The calculated chunk size is " + chunkSize + " bytes. " + "We need " + + numberOfChunks + " chunks. " + "\n", false); + + while (iterator.hasNext()) { + Cell c = iterator.next(); + + if (offsetInCurentChunk + CellChunkMap.BYTES_IN_CELL > chunkSize) { + // continue to the next metadata chunk + currentChunkIdx++; + chunks[currentChunkIdx] = ms.allocateChunk(); + offsetInCurentChunk = 0; + } + + // The scanner behind the iterator is doing all the elimination logic + // now we just copy it to the new segment (also MSLAB copy) + KeyValue kv = KeyValueUtil.ensureKeyValue(c); + offsetInCurentChunk = + cloneAndReference(kv, chunks[currentChunkIdx].getData(), offsetInCurentChunk); + // last parameter false, because in compaction count both Heap (Data) and MetaData size + updateMetaInfo(c,true); + } + + CellChunkMap ccm = new CellChunkMap(getComparator(),(HeapMemStoreLAB)getMemStoreLAB(), + chunks,0,numOfCells,chunkSize,false); + return new CellSet(ccm); + } + + /*------------------------------------------------------------------------*/ + // for a given cell, allocate space and write the cell on the data chunk, + // then write the cell-reference on the metadata chunk + private int cloneAndReference(Cell cell, byte[] referencesByteArray, int offsetForReference) { + HeapMemStoreLAB ms = (HeapMemStoreLAB)getMemStoreLAB(); + int len = KeyValueUtil.length(cell); + int offset = offsetForReference; + // we assume Cell length is not bigger than Chunk + + // allocate + Pair tuple = ms.allocateBytesWithID(len); + ByteRange alloc = tuple.getFirst(); + int chunkId = tuple.getSecond(); + KeyValueUtil.appendToByteArray(cell, alloc.getBytes(), alloc.getOffset()); + + // write the reference + offset = Bytes.putInt(referencesByteArray, offset, chunkId); // write chunk id + offset = Bytes.putInt(referencesByteArray, offset, alloc.getOffset()); // offset + offset = Bytes.putInt(referencesByteArray, offset, len); // length + return offset; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java index 628506059eef..2da8aa4d0ad3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java @@ -18,13 +18,18 @@ */ package org.apache.hadoop.hbase.regionserver; +import com.google.common.annotations.VisibleForTesting; import java.lang.management.ManagementFactory; +//import java.util.concurrent.*; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -76,17 +81,23 @@ public class MemStoreChunkPool { private static final int statThreadPeriod = 60 * 5; private AtomicLong createdChunkCount = new AtomicLong(); private AtomicLong reusedChunkCount = new AtomicLong(); + private AtomicInteger chunkIDs = new AtomicInteger(1); // 14921 + + // The mapping between each chunk allocated by MemStoreChunkPool and + // an integer representing the chunk's ID (ID 0 is forbidden) + private final ConcurrentMap chunksMap = new ConcurrentHashMap(); MemStoreChunkPool(Configuration conf, int chunkSize, int maxCount, int initialCount) { this.maxCount = maxCount; this.chunkSize = chunkSize; this.reclaimedChunks = new LinkedBlockingQueue(); + for (int i = 0; i < initialCount; i++) { - Chunk chunk = new Chunk(chunkSize); - chunk.init(); + Chunk chunk = allocateChunk(); reclaimedChunks.add(chunk); } + final String n = Thread.currentThread().getName(); scheduleThreadPool = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setNameFormat(n+"-MemStoreChunkPool Statistics") @@ -103,10 +114,11 @@ public class MemStoreChunkPool { Chunk getChunk() { Chunk chunk = reclaimedChunks.poll(); if (chunk == null) { - chunk = new Chunk(chunkSize); + chunk = allocateChunk(); createdChunkCount.incrementAndGet(); } else { chunk.reset(); + chunk.init(); reusedChunkCount.incrementAndGet(); } return chunk; @@ -125,6 +137,14 @@ void putbackChunks(BlockingQueue chunks) { chunks.drainTo(reclaimedChunks, maxNumToPutback); } + /** + * Given a chunk ID return reference to the relevant chunk + * @return a chunk + */ + Chunk translateIdToChunk(int id) { + return chunksMap.get(id); + } + /** * Add the chunk to the pool, if the pool has achieved the max size, it will * skip it @@ -141,11 +161,27 @@ int getPoolSize() { return this.reclaimedChunks.size(); } + @VisibleForTesting + void clearChunks() { + this.reclaimedChunks.clear(); + } + /* * Only used in testing */ - void clearChunks() { - this.reclaimedChunks.clear(); + ConcurrentMap getChunksMap() { + return this.chunksMap; + } + + /* + * Allocate and register Chunk + */ + private Chunk allocateChunk() { + int newId = chunkIDs.getAndAdd(1); // the id of the new chunk + Chunk chunk = new Chunk(chunkSize,newId); + chunksMap.put(newId, chunk); + chunk.init(); + return chunk; } private static class StatisticsThread extends Thread { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorIterator.java new file mode 100644 index 000000000000..56c261934ddb --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorIterator.java @@ -0,0 +1,154 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Scan; + +import java.io.IOException; +import java.util.*; + +/** + * The MemStoreCompactorIterator is designed to perform one iteration over given list of segments + * For another iteration new instance of MemStoreCompactorIterator needs to be created + * The iterator is not thread-safe and must have only one instance in each period of time + */ +@InterfaceAudience.Private +public class MemStoreCompactorIterator implements Iterator { + + private List kvs = new ArrayList(); + + // scanner for full or partial pipeline (heap of segment scanners) + // we need to keep those scanners in order to close them at the end + private KeyValueScanner scanner; + + // scanner on top of pipeline scanner that uses ScanQueryMatcher + private StoreScanner compactingScanner; + + private final ScannerContext scannerContext; + + private boolean hasMore; + private Iterator kvsIterator; + + // C-tor + public MemStoreCompactorIterator(LinkedList segments, + CellComparator comparator, int compactionKVMax, HStore store) throws IOException { + + this.scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + + // list of Scanners of segments in the pipeline, when compaction starts + List scanners = new ArrayList(); + + // create the list of scanners with maximally possible read point, meaning that + // all KVs are going to be returned by the pipeline traversing + for (Segment segment : segments) { + scanners.add(segment.getSegmentScanner(store.getSmallestReadPoint())); + } + + scanner = new MemStoreScanner(comparator, scanners, MemStoreScanner.Type.COMPACT_FORWARD); + + // reinitialize the compacting scanner for each instance of iterator + compactingScanner = createScanner(store, scanner); + + hasMore = compactingScanner.next(kvs, scannerContext); + + if (!kvs.isEmpty()) { + kvsIterator = kvs.iterator(); + } + + } + + @Override + public boolean hasNext() { + if (!kvsIterator.hasNext()) { + // refillKVS() method should be invoked only if !kvsIterator.hasNext() + if (!refillKVS()) { + return false; + } + } + return hasMore; + } + + @Override + public Cell next() { + if (!kvsIterator.hasNext()) { + // refillKVS() method should be invoked only if !kvsIterator.hasNext() + if (!refillKVS()) return null; + } + return (!hasMore) ? null : kvsIterator.next(); + } + + public void close() { + compactingScanner.close(); + compactingScanner = null; + scanner.close(); + scanner = null; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + /** + * Creates the scanner for compacting the pipeline. + * + * @return the scanner + */ + private StoreScanner createScanner(Store store, KeyValueScanner scanner) + throws IOException { + + Scan scan = new Scan(); + scan.setMaxVersions(); //Get all available versions + StoreScanner internalScanner = + new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(scanner), + ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), + HConstants.OLDEST_TIMESTAMP); + + return internalScanner; + } + + + + private boolean refillKVS() { + kvs.clear(); // clear previous KVS, first initiated in the constructor + if (!hasMore) { // if there is nothing expected next in compactingScanner + return false; + } + + try { // try to get next KVS + hasMore = compactingScanner.next(kvs, scannerContext); + } catch (IOException ie) { + throw new IllegalStateException(ie); + } + + if (!kvs.isEmpty() ) { // is the new KVS empty ? + kvsIterator = kvs.iterator(); + return true; + } + return false; + } + + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java index dfcec25e1b10..61f00c079d9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java @@ -23,6 +23,7 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; @@ -55,12 +56,11 @@ static public enum Type { // or according to the first usage private Type type = Type.UNDEFINED; - private long readPoint; // remember the initial version of the scanners list List scanners; - // pointer back to the relevant MemStore - // is needed for shouldSeek() method - private AbstractMemStore backwardReferenceToMemStore; + + private final CellComparator comparator; + /** * Constructor. @@ -68,44 +68,40 @@ static public enum Type { * After constructor only one heap is going to be initialized for entire lifespan * of the MemStoreScanner. A specific scanner can only be one directional! * - * @param ms Pointer back to the MemStore - * @param readPoint Read point below which we can safely remove duplicate KVs - * @param type The scan type COMPACT_FORWARD should be used for compaction + * @param comparator Cell Comparator + * @param scanners List of scanners, from which the heap will be built + * @param type The scan type COMPACT_FORWARD should be used for compaction */ - public MemStoreScanner(AbstractMemStore ms, long readPoint, Type type) throws IOException { - this(ms, ms.getListOfScanners(readPoint), readPoint, type); - } - - /* Constructor used only when the scan usage is unknown - and need to be defined according to the first move */ - public MemStoreScanner(AbstractMemStore ms, long readPt) throws IOException { - this(ms, readPt, Type.UNDEFINED); - } - - public MemStoreScanner(AbstractMemStore ms, List scanners, long readPoint, - Type type) throws IOException { + public MemStoreScanner(CellComparator comparator, List scanners, Type type) + throws IOException { super(); - this.readPoint = readPoint; this.type = type; switch (type) { - case UNDEFINED: - case USER_SCAN_FORWARD: - case COMPACT_FORWARD: - this.forwardHeap = new KeyValueHeap(scanners, ms.getComparator()); - break; - case USER_SCAN_BACKWARD: - this.backwardHeap = new ReversedKeyValueHeap(scanners, ms.getComparator()); - break; - default: - throw new IllegalArgumentException("Unknown scanner type in MemStoreScanner"); + case UNDEFINED: + case USER_SCAN_FORWARD: + case COMPACT_FORWARD: + this.forwardHeap = new KeyValueHeap(scanners, comparator); + break; + case USER_SCAN_BACKWARD: + this.backwardHeap = new ReversedKeyValueHeap(scanners, comparator); + break; + default: + throw new IllegalArgumentException("Unknown scanner type in MemStoreScanner"); } - this.backwardReferenceToMemStore = ms; + this.comparator = comparator; this.scanners = scanners; if (Trace.isTracing() && Trace.currentSpan() != null) { Trace.currentSpan().addTimelineAnnotation("Creating MemStoreScanner"); } } + /* Constructor used only when the scan usage is unknown + and need to be defined according to the first move */ + public MemStoreScanner(CellComparator comparator, List scanners) + throws IOException { + this(comparator, scanners, Type.UNDEFINED); + } + /** * Returns the cell from the top-most scanner without advancing the iterator. * The backward traversal is assumed, only if specified explicitly @@ -292,7 +288,7 @@ private boolean restartBackwardHeap(Cell cell) throws IOException { res |= scan.seekToPreviousRow(cell); } this.backwardHeap = - new ReversedKeyValueHeap(scanners, backwardReferenceToMemStore.getComparator()); + new ReversedKeyValueHeap(scanners, comparator); return res; } @@ -322,7 +318,7 @@ private boolean initBackwardHeapIfNeeded(Cell cell, boolean toLast) throws IOExc } } this.backwardHeap = - new ReversedKeyValueHeap(scanners, backwardReferenceToMemStore.getComparator()); + new ReversedKeyValueHeap(scanners, comparator); type = Type.USER_SCAN_BACKWARD; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java index aef70e710c75..da31167ea0aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ClassSize; /** * A mutable segment in memstore, specifically the active segment. @@ -30,7 +31,7 @@ public class MutableSegment extends Segment { protected MutableSegment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, long size) { - super(cellSet, comparator, memStoreLAB, size); + super(cellSet, comparator, memStoreLAB, size, ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY); } /** @@ -48,7 +49,7 @@ public long add(Cell cell) { public long rollback(Cell cell) { Cell found = getCellSet().get(cell); if (found != null && found.getSequenceId() == cell.getSequenceId()) { - long sz = AbstractMemStore.heapSizeChange(cell, true); + long sz = heapSizeChange(cell, true); getCellSet().remove(cell); incSize(-sz); return sz; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 34d0a51a27bb..05928f1d9f82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -21,15 +21,14 @@ import java.util.Iterator; import java.util.SortedSet; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.ByteRange; +import org.apache.hadoop.hbase.util.ClassSize; /** * This is an abstraction of a segment maintained in a memstore, e.g., the active @@ -41,33 +40,37 @@ @InterfaceAudience.Private public abstract class Segment { - private volatile CellSet cellSet; + private AtomicReference cellSet= new AtomicReference(); private final CellComparator comparator; private long minSequenceId; private volatile MemStoreLAB memStoreLAB; private final AtomicLong size; private final TimeRangeTracker timeRangeTracker; + protected long constantCellMetaDataSize; protected volatile boolean tagsPresent; - protected Segment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, long - size) { - this.cellSet = cellSet; + protected Segment( + CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, long size, + long constantCellSize) { + this.cellSet.set(cellSet); this.comparator = comparator; this.minSequenceId = Long.MAX_VALUE; this.memStoreLAB = memStoreLAB; this.size = new AtomicLong(size); this.timeRangeTracker = new TimeRangeTracker(); this.tagsPresent = false; + this.constantCellMetaDataSize = constantCellSize; } protected Segment(Segment segment) { - this.cellSet = segment.getCellSet(); + this.cellSet.set(segment.getCellSet()); this.comparator = segment.getComparator(); this.minSequenceId = segment.getMinSequenceId(); this.memStoreLAB = segment.getMemStoreLAB(); this.size = new AtomicLong(segment.getSize()); this.timeRangeTracker = segment.getTimeRangeTracker(); this.tagsPresent = segment.isTagsPresent(); + this.constantCellMetaDataSize = segment.getConstantCellMetaDataSize(); } /** @@ -178,6 +181,17 @@ public Segment setSize(long size) { return this; } + /** + * Setting the CellSet of the segment - used only for flat immutable segment for setting + * immutable CellSet after its creation in immutable segment constructor + * @return this object + */ + + protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) { + this.cellSet.compareAndSet(cellSetOld,cellSetNew); + return this; + } + /** * Returns the heap size of the segment * @return the heap size of the segment @@ -227,7 +241,7 @@ public int compareRows(Cell left, Cell right) { * @return a set of all cells in the segment */ protected CellSet getCellSet() { - return cellSet; + return cellSet.get(); } /** @@ -240,22 +254,23 @@ protected CellComparator getComparator() { protected long internalAdd(Cell cell) { boolean succ = getCellSet().add(cell); - long s = AbstractMemStore.heapSizeChange(cell, succ); - updateMetaInfo(cell, s); + long s = updateMetaInfo(cell, succ); return s; } - protected void updateMetaInfo(Cell toAdd, long s) { - getTimeRangeTracker().includeTimestamp(toAdd); + protected long updateMetaInfo(Cell cellToAdd, boolean succ) { + long s = heapSizeChange(cellToAdd, succ); + getTimeRangeTracker().includeTimestamp( cellToAdd); size.addAndGet(s); - minSequenceId = Math.min(minSequenceId, toAdd.getSequenceId()); + minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId()); // In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call. // When we use ACL CP or Visibility CP which deals with Tags during // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not // parse the byte[] to identify the tags length. - if(toAdd.getTagsLength() > 0) { + if( cellToAdd.getTagsLength() > 0) { tagsPresent = true; } + return s; } /** @@ -267,7 +282,7 @@ protected SortedSet tailSet(Cell firstCell) { return getCellSet().tailSet(firstCell); } - private MemStoreLAB getMemStoreLAB() { + protected MemStoreLAB getMemStoreLAB() { return memStoreLAB; } @@ -291,4 +306,20 @@ public String toString() { return res; } + /* + * Calculate how the MemStore size has changed. Includes overhead of the + * backing Map. + * @param cell + * @param notPresent True if the cell was NOT present in the set. + * @return change in size + */ + protected long heapSizeChange(final Cell cell, final boolean notPresent){ + return + notPresent ? + ClassSize.align(constantCellMetaDataSize + CellUtil.estimatedHeapSizeOf(cell)) : 0; + } + + public long getConstantCellMetaDataSize() { + return this.constantCellMetaDataSize; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java index 7ac80ae27749..6f923618b6b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java @@ -18,11 +18,15 @@ */ package org.apache.hadoop.hbase.regionserver; +import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ReflectionUtils; +import java.io.IOException; + /** * A singleton store segment factory. * Generate concrete store segments. @@ -40,28 +44,44 @@ public static SegmentFactory instance() { return instance; } + // create skip-list-based (non-flat) immutable segment from compacting old immutable segments public ImmutableSegment createImmutableSegment(final Configuration conf, - final CellComparator comparator, long size) { + final CellComparator comparator, MemStoreCompactorIterator iterator) { MemStoreLAB memStoreLAB = getMemStoreLAB(conf); - MutableSegment segment = generateMutableSegment(conf, comparator, memStoreLAB, size); - return createImmutableSegment(segment); + return + new ImmutableSegment(comparator, iterator, memStoreLAB); } - public ImmutableSegment createImmutableSegment(CellComparator comparator, - long size) { + // create empty immutable segment + public ImmutableSegment createImmutableSegment(CellComparator comparator, long size) { MutableSegment segment = generateMutableSegment(null, comparator, null, size); return createImmutableSegment(segment); } + // create immutable segment from mutable public ImmutableSegment createImmutableSegment(MutableSegment segment) { return new ImmutableSegment(segment); } + + // create mutable segment public MutableSegment createMutableSegment(final Configuration conf, CellComparator comparator, long size) { MemStoreLAB memStoreLAB = getMemStoreLAB(conf); return generateMutableSegment(conf, comparator, memStoreLAB, size); } + // create new flat immutable segment from compacting old immutable segment + public ImmutableSegment createImmutableSegment(final Configuration conf, final CellComparator comparator, + MemStoreCompactorIterator iterator, int numOfCells, ImmutableSegment.Type segmentType) + throws IOException { + Preconditions.checkArgument( + segmentType != ImmutableSegment.Type.SKIPLIST_MAP_BASED, "wrong immutable segment type"); + MemStoreLAB memStoreLAB = getMemStoreLAB(conf); + return + new ImmutableSegment( + conf, comparator, iterator, memStoreLAB, numOfCells, segmentType); + } + //****** private methods to instantiate concrete store segments **********// private MutableSegment generateMutableSegment( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java index 9d7a7230a4da..505ccf4ce2c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java @@ -38,8 +38,7 @@ public class VersionedSegmentsList { private final LinkedList storeSegments; private final long version; - public VersionedSegmentsList( - LinkedList storeSegments, long version) { + public VersionedSegmentsList(LinkedList storeSegments, long version) { this.storeSegments = storeSegments; this.version = version; } @@ -51,4 +50,13 @@ public LinkedList getStoreSegments() { public long getVersion() { return version; } + + public int getNumOfCells() { + int totalCells = 0; + for (ImmutableSegment s : storeSegments) { + totalCells += s.getCellsCount(); + } + return totalCells; + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 5c79d7257f05..7d4b52ef11ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -240,7 +240,7 @@ public void testNativeSizes() throws IOException { // CellSet cl = CellSet.class; expected = ClassSize.estimateBase(cl, false); - actual = ClassSize.CELL_SKIPLIST_SET; + actual = ClassSize.CELL_SET; if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java new file mode 100644 index 000000000000..e80c030b1fd5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java @@ -0,0 +1,193 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import junit.framework.TestCase; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.experimental.categories.Category; + +import java.util.Iterator; +import java.util.NavigableMap; +import java.util.SortedSet; +import static org.junit.Assert.assertTrue; + +@Category({RegionServerTests.class, SmallTests.class}) +public class TestCellFlatSet extends TestCase { + + private static final int NUM_OF_CELLS = 4; + + private Cell cells[]; + private CellArrayMap cbOnHeap; + private CellChunkMap cbOffHeap; + + private final static Configuration conf = new Configuration(); + private HeapMemStoreLAB mslab; + + + protected void setUp() throws Exception { + super.setUp(); + + // create array of Cells to bass to the CellFlatMap under CellSet + final byte[] one = Bytes.toBytes(15); + final byte[] two = Bytes.toBytes(25); + final byte[] three = Bytes.toBytes(35); + final byte[] four = Bytes.toBytes(45); + + final byte[] f = Bytes.toBytes("f"); + final byte[] q = Bytes.toBytes("q"); + final byte[] v = Bytes.toBytes(4); + + final KeyValue kv1 = new KeyValue(one, f, q, 10, v); + final KeyValue kv2 = new KeyValue(two, f, q, 20, v); + final KeyValue kv3 = new KeyValue(three, f, q, 30, v); + final KeyValue kv4 = new KeyValue(four, f, q, 40, v); + + cells = new Cell[] {kv1,kv2,kv3,kv4}; + cbOnHeap = new CellArrayMap(CellComparator.COMPARATOR,cells,0,NUM_OF_CELLS,false); + + conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true); + conf.setFloat(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.2f); + MemStoreChunkPool.chunkPoolDisabled = false; + mslab = new HeapMemStoreLAB(conf); + + HeapMemStoreLAB.Chunk[] c = shallowCellsToBuffer(kv1, kv2, kv3, kv4); + int chunkSize = conf.getInt(HeapMemStoreLAB.CHUNK_SIZE_KEY, HeapMemStoreLAB.CHUNK_SIZE_DEFAULT); + cbOffHeap = new CellChunkMap(CellComparator.COMPARATOR, mslab, + c, 0, NUM_OF_CELLS, chunkSize, false); + } + + /* Create and test CellSet based on CellArrayMap */ + public void testCellBlocksOnHeap() throws Exception { + CellSet cs = new CellSet(cbOnHeap); + testCellBlocks(cs); + testIterators(cs); + } + + /* Create and test CellSet based on CellChunkMap */ + public void testCellBlocksOffHeap() throws Exception { + CellSet cs = new CellSet(cbOffHeap); + testCellBlocks(cs); + testIterators(cs); + } + + /* Generic basic test for immutable CellSet */ + private void testCellBlocks(CellSet cs) throws Exception { + final byte[] oneAndHalf = Bytes.toBytes(20); + final byte[] f = Bytes.toBytes("f"); + final byte[] q = Bytes.toBytes("q"); + final byte[] v = Bytes.toBytes(4); + final KeyValue outerCell = new KeyValue(oneAndHalf, f, q, 10, v); + + assertEquals(NUM_OF_CELLS, cs.size()); // check size + assertFalse(cs.contains(outerCell)); // check outer cell + + assertTrue(cs.contains(cells[0])); // check existence of the first + Cell first = cs.first(); + assertTrue(cells[0].equals(first)); + + assertTrue(cs.contains(cells[NUM_OF_CELLS - 1])); // check last + Cell last = cs.last(); + assertTrue(cells[NUM_OF_CELLS - 1].equals(last)); + + SortedSet tail = cs.tailSet(cells[1]); // check tail abd head sizes + assertEquals(NUM_OF_CELLS - 1, tail.size()); + SortedSet head = cs.headSet(cells[1]); + assertEquals(1, head.size()); + + SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer cell + assertEquals(NUM_OF_CELLS - 1, tailOuter.size()); + + Cell tailFirst = tail.first(); + assertTrue(cells[1].equals(tailFirst)); + Cell tailLast = tail.last(); + assertTrue(cells[NUM_OF_CELLS - 1].equals(tailLast)); + + Cell headFirst = head.first(); + assertTrue(cells[0].equals(headFirst)); + Cell headLast = head.last(); + assertTrue(cells[0].equals(headLast)); + } + + /* Generic iterators test for immutable CellSet */ + private void testIterators(CellSet cs) throws Exception { + + // Assert that we have NUM_OF_CELLS values and that they are in order + int count = 0; + for (Cell kv: cs) { + assertEquals("\n\n-------------------------------------------------------------------\n" + + "Comparing iteration number " + (count + 1) + " the returned cell: " + kv + + ", the first Cell in the CellBlocksMap: " + cells[count] + + ", and the same transformed to String: " + cells[count].toString() + + "\n-------------------------------------------------------------------\n", + cells[count], kv); + count++; + } + assertEquals(NUM_OF_CELLS, count); + + // Test descending iterator + count = 0; + for (Iterator i = cs.descendingIterator(); i.hasNext();) { + Cell kv = i.next(); + assertEquals(cells[NUM_OF_CELLS - (count + 1)], kv); + count++; + } + assertEquals(NUM_OF_CELLS, count); + } + + /* Create byte array holding shallow Cells referencing to the deep Cells data */ + private HeapMemStoreLAB.Chunk[] shallowCellsToBuffer(Cell kv1, Cell kv2, Cell kv3, Cell kv4) { + HeapMemStoreLAB.Chunk chunkD = mslab.allocateChunk(); + HeapMemStoreLAB.Chunk chunkS = mslab.allocateChunk(); + HeapMemStoreLAB.Chunk result[] = {chunkS}; + + byte[] deepBuffer = chunkD.getData(); + byte[] shallowBuffer = chunkS.getData(); + int offset = 0; + int pos = offset; + KeyValueUtil.appendToByteArray(kv1, deepBuffer, offset); // write deep cell data + + pos = Bytes.putInt(shallowBuffer, pos, chunkD.getId()); // write deep chunk index + pos = Bytes.putInt(shallowBuffer, pos, offset); // offset + pos = Bytes.putInt(shallowBuffer, pos, KeyValueUtil.length(kv1)); // length + offset += KeyValueUtil.length(kv1); + + KeyValueUtil.appendToByteArray(kv2, deepBuffer, offset); // write deep cell data + pos = Bytes.putInt(shallowBuffer, pos, chunkD.getId()); // deep chunk index + pos = Bytes.putInt(shallowBuffer, pos, offset); // offset + pos = Bytes.putInt(shallowBuffer, pos, KeyValueUtil.length(kv2)); // length + offset += KeyValueUtil.length(kv2); + + KeyValueUtil.appendToByteArray(kv3, deepBuffer, offset); // write deep cell data + pos = Bytes.putInt(shallowBuffer, pos, chunkD.getId()); // deep chunk index + pos = Bytes.putInt(shallowBuffer, pos, offset); // offset + pos = Bytes.putInt(shallowBuffer, pos, KeyValueUtil.length(kv3)); // length + offset += KeyValueUtil.length(kv3); + + KeyValueUtil.appendToByteArray(kv4, deepBuffer, offset); // write deep cell data + pos = Bytes.putInt(shallowBuffer, pos, chunkD.getId()); // deep chunk index + pos = Bytes.putInt(shallowBuffer, pos, offset); // offset + pos = Bytes.putInt(shallowBuffer, pos, KeyValueUtil.length(kv4)); // length + + return result; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index f84de134521e..0dacb7ef5d55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -61,15 +61,15 @@ public class TestCompactingMemStore extends TestDefaultMemStore { private static final Log LOG = LogFactory.getLog(TestCompactingMemStore.class); - private static MemStoreChunkPool chunkPool; - private HRegion region; - private RegionServicesForStores regionServicesForStores; - private HStore store; + protected static MemStoreChunkPool chunkPool; + protected HRegion region; + protected RegionServicesForStores regionServicesForStores; + protected HStore store; ////////////////////////////////////////////////////////////////////////////// // Helpers ////////////////////////////////////////////////////////////////////////////// - private static byte[] makeQualifier(final int i1, final int i2) { + protected static byte[] makeQualifier(final int i1, final int i2) { return Bytes.toBytes(Integer.toString(i1) + ";" + Integer.toString(i2)); } @@ -81,6 +81,12 @@ public void tearDown() throws Exception { @Override public void setUp() throws Exception { + compactingSetUp(); + this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, + store, regionServicesForStores); + } + + protected void compactingSetUp() throws Exception { super.internalSetUp(); Configuration conf = new Configuration(); conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true); @@ -91,13 +97,11 @@ public void setUp() throws Exception { this.region = hbaseUtility.createTestRegion("foobar", hcd); this.regionServicesForStores = region.getRegionServicesForStores(); this.store = new HStore(region, hcd, conf); - this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, - store, regionServicesForStores); + chunkPool = MemStoreChunkPool.getPool(conf); assertTrue(chunkPool != null); } - /** * A simple test which verifies the 3 possible states when scanning across snapshot. * @@ -609,6 +613,11 @@ public void testCompaction2Buckets() throws IOException { while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { Threads.sleep(1000); } + int counter = 0; + for ( Segment s : memstore.getListOfSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3, counter); assertEquals(0, memstore.getSnapshot().getCellsCount()); assertEquals(528, regionServicesForStores.getGlobalMemstoreTotalSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java new file mode 100644 index 000000000000..e2ae2534f7e4 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java @@ -0,0 +1,242 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdge; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.util.ArrayList; +import java.util.List; + +/** + * compacted memstore test case + */ +@Category({RegionServerTests.class, MediumTests.class}) +public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore { + + private static final Log LOG = LogFactory.getLog(TestCompactingToCellArrayMapMemStore.class); + //private static MemStoreChunkPool chunkPool; + //private HRegion region; + //private RegionServicesForStores regionServicesForStores; + //private HStore store; + + ////////////////////////////////////////////////////////////////////////////// + // Helpers + ////////////////////////////////////////////////////////////////////////////// + + @Override public void tearDown() throws Exception { + chunkPool.clearChunks(); + } + + @Override public void setUp() throws Exception { + compactingSetUp(); + this.memstore = + new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, store, + regionServicesForStores, CompactingMemStore.Type.COMPACT_TO_ARRAY_MAP); + } + + ////////////////////////////////////////////////////////////////////////////// + // Compaction tests + ////////////////////////////////////////////////////////////////////////////// + public void testCompaction1Bucket() throws IOException { + int counter = 0; + String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 + + // test 1 bucket + addRowsByKeys(memstore, keys1); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + assertEquals(4, memstore.getActive().getCellsCount()); + long size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(440, regionServicesForStores.getGlobalMemstoreTotalSize()); + for ( Segment s : memstore.getListOfSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3, counter); + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(3, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + public void testCompaction2Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + long size = memstore.getFlushableSize(); + +// assertTrue( +// "\n\n<<< This is the active size with 4 keys - " + memstore.getActive().getSize() +// + ". This is the memstore flushable size - " + size + "\n",false); + + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(1000); + } + int counter = 0; + for ( Segment s : memstore.getListOfSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3,counter); + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(440, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + assertEquals(968, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + int i = 0; + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + if (i > 10000000) { + ((CompactingMemStore) memstore).debug(); + assertTrue("\n\n<<< Infinite loop! :( \n", false); + } + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + counter = 0; + for ( Segment s : memstore.getListOfSegments()) { + counter += s.getCellsCount(); + } + assertEquals(4,counter); + assertEquals(592, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + public void testCompaction3Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + String[] keys3 = { "D", "B", "B" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, region.getMemstoreSize()); + + long size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + + String tstStr = "\n\nFlushable size after first flush in memory:" + size + ". Is MemmStore in compaction?:" + + ((CompactingMemStore) memstore).isMemStoreFlushingInMemory(); + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(440, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + + tstStr += " After adding second part of the keys. Memstore size: " + + region.getMemstoreSize() + ", Memstore Total Size: " + + regionServicesForStores.getGlobalMemstoreTotalSize() + "\n\n"; + + assertEquals(968, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore) memstore).disableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(968, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys3); + assertEquals(1496, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore) memstore).enableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(592, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + + //assertTrue(tstStr, false); + } + + private void addRowsByKeys(final AbstractMemStore hmc, String[] keys) { + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf = Bytes.toBytes("testqualifier"); + for (int i = 0; i < keys.length; i++) { + long timestamp = System.currentTimeMillis(); + Threads.sleep(1); // to make sure each kv gets a different ts + byte[] row = Bytes.toBytes(keys[i]); + byte[] val = Bytes.toBytes(keys[i] + i); + KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); + hmc.add(kv); + LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + kv.getTimestamp()); + long size = AbstractMemStore.heapSizeChange(kv, true); + regionServicesForStores.addAndGetGlobalMemstoreSize(size); + } + } + + private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { + long t = 1234; + + @Override public long currentTime() { + return t; + } + + public void setCurrentTimeMillis(long t) { + this.t = t; + } + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellChunkMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellChunkMapMemStore.java new file mode 100644 index 000000000000..f3d7170c59a3 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellChunkMapMemStore.java @@ -0,0 +1,236 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdge; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.experimental.categories.Category; + +import java.io.IOException; + +/** + * compacted memstore test case + */ +@Category({RegionServerTests.class, MediumTests.class}) +public class TestCompactingToCellChunkMapMemStore extends TestCompactingMemStore { + + private static final Log LOG = LogFactory.getLog(TestCompactingToCellChunkMapMemStore.class); + //private static MemStoreChunkPool chunkPool; + //private HRegion region; + //private RegionServicesForStores regionServicesForStores; + //private HStore store; + + ////////////////////////////////////////////////////////////////////////////// + // Helpers + ////////////////////////////////////////////////////////////////////////////// + + @Override public void tearDown() throws Exception { + chunkPool.clearChunks(); + } + + @Override public void setUp() throws Exception { + compactingSetUp(); + this.memstore = + new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, store, + regionServicesForStores, CompactingMemStore.Type.COMPACT_TO_CHUNK_MAP); + } + + ////////////////////////////////////////////////////////////////////////////// + // Compaction tests + ////////////////////////////////////////////////////////////////////////////// + public void testCompaction1Bucket() throws IOException { + int counter = 0; + String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 + + // test 1 bucket + addRowsByKeys(memstore, keys1); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + assertEquals(4, memstore.getActive().getCellsCount()); + long size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(440, regionServicesForStores.getGlobalMemstoreTotalSize()); + for ( Segment s : memstore.getListOfSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3, counter); + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(3, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + public void testCompaction2Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + long size = memstore.getFlushableSize(); + +// assertTrue( +// "\n\n<<< This is the active size with 4 keys - " + memstore.getActive().getSize() +// + ". This is the memstore flushable size - " + size + "\n",false); + + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(1000); + } + int counter = 0; + for ( Segment s : memstore.getListOfSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3,counter); + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(440, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + assertEquals(968, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + int i = 0; + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + if (i > 10000000) { + ((CompactingMemStore) memstore).debug(); + assertTrue("\n\n<<< Infinite loop! :( \n", false); + } + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + counter = 0; + for ( Segment s : memstore.getListOfSegments()) { + counter += s.getCellsCount(); + } + assertEquals(4,counter); + assertEquals(592, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + public void testCompaction3Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + String[] keys3 = { "D", "B", "B" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, region.getMemstoreSize()); + + long size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + + String tstStr = "\n\nFlushable size after first flush in memory:" + size + ". Is MemmStore in compaction?:" + + ((CompactingMemStore) memstore).isMemStoreFlushingInMemory(); + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(440, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + + tstStr += " After adding second part of the keys. Memstore size: " + + region.getMemstoreSize() + ", Memstore Total Size: " + + regionServicesForStores.getGlobalMemstoreTotalSize() + "\n\n"; + + assertEquals(968, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore) memstore).disableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(968, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys3); + assertEquals(1496, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore) memstore).enableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(592, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + + //assertTrue(tstStr, false); + } + + private void addRowsByKeys(final AbstractMemStore hmc, String[] keys) { + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf = Bytes.toBytes("testqualifier"); + for (int i = 0; i < keys.length; i++) { + long timestamp = System.currentTimeMillis(); + Threads.sleep(1); // to make sure each kv gets a different ts + byte[] row = Bytes.toBytes(keys[i]); + byte[] val = Bytes.toBytes(keys[i] + i); + KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); + hmc.add(kv); + LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + kv.getTimestamp()); + long size = AbstractMemStore.heapSizeChange(kv, true); + regionServicesForStores.addAndGetGlobalMemstoreSize(size); + } + } + + private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { + long t = 1234; + + @Override public long currentTime() { + return t; + } + + public void setCurrentTimeMillis(long t) { + this.t = t; + } + } + +}