From f72afc8fe3496464491a3caaae502ec2868e1f82 Mon Sep 17 00:00:00 2001 From: Charles Connell Date: Fri, 12 Sep 2025 10:52:49 -0400 Subject: [PATCH 01/92] HBASE-29573: Fully load QuotaCache instead of reading individual rows on demand (#7282) Signed-off by: Ray Mattingly --- .../hadoop/hbase/quotas/QuotaTableUtil.java | 31 -- .../hadoop/hbase/quotas/QuotaCache.java | 297 +++++++----------- .../hadoop/hbase/quotas/QuotaState.java | 38 +-- .../apache/hadoop/hbase/quotas/QuotaUtil.java | 163 +++++----- .../hadoop/hbase/quotas/UserQuotaState.java | 22 +- .../hbase/quotas/TestAtomicReadQuota.java | 1 - .../quotas/TestBlockBytesScannedQuota.java | 1 - .../quotas/TestClusterScopeQuotaThrottle.java | 1 - .../hbase/quotas/TestDefaultAtomicQuota.java | 1 - .../quotas/TestDefaultHandlerUsageQuota.java | 1 - .../hadoop/hbase/quotas/TestDefaultQuota.java | 7 +- .../hadoop/hbase/quotas/TestQuotaCache.java | 40 +-- .../hadoop/hbase/quotas/TestQuotaCache2.java | 130 ++++++++ .../hadoop/hbase/quotas/TestQuotaState.java | 58 +--- .../hbase/quotas/TestQuotaThrottle.java | 1 - .../hbase/quotas/TestQuotaUserOverride.java | 1 - .../quotas/TestThreadHandlerUsageQuota.java | 8 +- 17 files changed, 362 insertions(+), 439 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 1afb15c0ac61..4bdf5e5af049 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -206,37 +206,6 @@ private static Quotas getQuotas(final Connection connection, final byte[] rowKey return quotasFromData(result.getValue(QUOTA_FAMILY_INFO, qualifier)); } - public static Get makeGetForTableQuotas(final TableName table) { - Get get = new Get(getTableRowKey(table)); - get.addFamily(QUOTA_FAMILY_INFO); - return get; - } - - public static Get makeGetForNamespaceQuotas(final String namespace) { - Get get = new Get(getNamespaceRowKey(namespace)); - get.addFamily(QUOTA_FAMILY_INFO); - return get; - } - - public static Get makeGetForRegionServerQuotas(final String regionServer) { - Get get = new Get(getRegionServerRowKey(regionServer)); - get.addFamily(QUOTA_FAMILY_INFO); - return get; - } - - public static Get makeGetForUserQuotas(final String user, final Iterable tables, - final Iterable namespaces) { - Get get = new Get(getUserRowKey(user)); - get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - for (final TableName table : tables) { - get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserTable(table)); - } - for (final String ns : namespaces) { - get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserNamespace(ns)); - } - return get; - } - public static Scan makeScan(final QuotaFilter filter) { Scan scan = new Scan(); scan.addFamily(QUOTA_FAMILY_INFO); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index fb1b6e4b0d96..6b1585c58550 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -19,30 +19,23 @@ import java.io.IOException; import java.time.Duration; -import java.util.ArrayList; import java.util.EnumSet; -import java.util.List; +import java.util.HashMap; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.security.UserGroupInformation; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -73,18 +66,15 @@ public class QuotaCache implements Stoppable { public static final String QUOTA_USER_REQUEST_ATTRIBUTE_OVERRIDE_KEY = "hbase.quota.user.override.key"; private static final int REFRESH_DEFAULT_PERIOD = 43_200_000; // 12 hours - private static final int EVICT_PERIOD_FACTOR = 5; - // for testing purpose only, enforce the cache to be always refreshed - static boolean TEST_FORCE_REFRESH = false; - // for testing purpose only, block cache refreshes to reliably verify state - static boolean TEST_BLOCK_REFRESH = false; + private final Object initializerLock = new Object(); + private volatile boolean initialized = false; + + private volatile Map namespaceQuotaCache = new HashMap<>(); + private volatile Map tableQuotaCache = new HashMap<>(); + private volatile Map userQuotaCache = new HashMap<>(); + private volatile Map regionServerQuotaCache = new HashMap<>(); - private final ConcurrentMap namespaceQuotaCache = new ConcurrentHashMap<>(); - private final ConcurrentMap tableQuotaCache = new ConcurrentHashMap<>(); - private final ConcurrentMap userQuotaCache = new ConcurrentHashMap<>(); - private final ConcurrentMap regionServerQuotaCache = - new ConcurrentHashMap<>(); private volatile boolean exceedThrottleQuotaEnabled = false; // factors used to divide cluster scope quota into machine scope quota private volatile double machineQuotaFactor = 1; @@ -96,57 +86,6 @@ public class QuotaCache implements Stoppable { private QuotaRefresherChore refreshChore; private boolean stopped = true; - private final Fetcher userQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final String user) { - final Set namespaces = QuotaCache.this.namespaceQuotaCache.keySet(); - final Set tables = QuotaCache.this.tableQuotaCache.keySet(); - return QuotaUtil.makeGetForUserQuotas(user, tables, namespaces); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), gets, tableMachineQuotaFactors, - machineQuotaFactor); - } - }; - - private final Fetcher regionServerQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final String regionServer) { - return QuotaUtil.makeGetForRegionServerQuotas(regionServer); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchRegionServerQuotas(rsServices.getConnection(), gets); - } - }; - - private final Fetcher tableQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final TableName table) { - return QuotaUtil.makeGetForTableQuotas(table); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), gets, tableMachineQuotaFactors); - } - }; - - private final Fetcher namespaceQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final String namespace) { - return QuotaUtil.makeGetForNamespaceQuotas(namespace); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), gets, machineQuotaFactor); - } - }; - public QuotaCache(final RegionServerServices rsServices) { this.rsServices = rsServices; this.userOverrideRequestAttributeKey = @@ -158,10 +97,8 @@ public void start() throws IOException { Configuration conf = rsServices.getConfiguration(); // Refresh the cache every 12 hours, and every time a quota is changed, and every time a - // configuration - // reload is triggered. Periodic reloads are kept to a minimum to avoid flooding the - // RegionServer - // holding the hbase:quota table with requests. + // configuration reload is triggered. Periodic reloads are kept to a minimum to avoid + // flooding the RegionServer holding the hbase:quota table with requests. int period = conf.getInt(REFRESH_CONF_KEY, REFRESH_DEFAULT_PERIOD); refreshChore = new QuotaRefresherChore(conf, period, this); rsServices.getChoreService().scheduleChore(refreshChore); @@ -181,6 +118,34 @@ public boolean isStopped() { return stopped; } + private void ensureInitialized() { + if (!initialized) { + synchronized (initializerLock) { + if (!initialized) { + refreshChore.chore(); + initialized = true; + } + } + } + } + + private Map fetchUserQuotaStateEntries() throws IOException { + return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), tableMachineQuotaFactors, + machineQuotaFactor); + } + + private Map fetchRegionServerQuotaStateEntries() throws IOException { + return QuotaUtil.fetchRegionServerQuotas(rsServices.getConnection()); + } + + private Map fetchTableQuotaStateEntries() throws IOException { + return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), tableMachineQuotaFactors); + } + + private Map fetchNamespaceQuotaStateEntries() throws IOException { + return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), machineQuotaFactor); + } + /** * Returns the limiter associated to the specified user/table. * @param ugi the user to limit @@ -201,12 +166,13 @@ public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableNa */ public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) { String user = getQuotaUserName(ugi); - if (!userQuotaCache.containsKey(user)) { - userQuotaCache.put(user, - QuotaUtil.buildDefaultUserQuotaState(rsServices.getConfiguration(), 0L)); - fetch("user", userQuotaCache, userQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to userQuotaCache + Map cache = userQuotaCache; + if (!cache.containsKey(user)) { + cache.put(user, QuotaUtil.buildDefaultUserQuotaState(rsServices.getConfiguration())); } - return userQuotaCache.get(user); + return cache.get(user); } /** @@ -215,11 +181,13 @@ public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) { * @return the limiter associated to the specified table */ public QuotaLimiter getTableLimiter(final TableName table) { - if (!tableQuotaCache.containsKey(table)) { - tableQuotaCache.put(table, new QuotaState()); - fetch("table", tableQuotaCache, tableQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to tableQuotaCache + Map cache = tableQuotaCache; + if (!cache.containsKey(table)) { + cache.put(table, new QuotaState()); } - return tableQuotaCache.get(table).getGlobalLimiter(); + return cache.get(table).getGlobalLimiter(); } /** @@ -228,11 +196,13 @@ public QuotaLimiter getTableLimiter(final TableName table) { * @return the limiter associated to the specified namespace */ public QuotaLimiter getNamespaceLimiter(final String namespace) { - if (!namespaceQuotaCache.containsKey(namespace)) { - namespaceQuotaCache.put(namespace, new QuotaState()); - fetch("namespace", namespaceQuotaCache, namespaceQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to namespaceQuotaCache + Map cache = namespaceQuotaCache; + if (!cache.containsKey(namespace)) { + cache.put(namespace, new QuotaState()); } - return namespaceQuotaCache.get(namespace).getGlobalLimiter(); + return cache.get(namespace).getGlobalLimiter(); } /** @@ -241,41 +211,19 @@ public QuotaLimiter getNamespaceLimiter(final String namespace) { * @return the limiter associated to the specified region server */ public QuotaLimiter getRegionServerQuotaLimiter(final String regionServer) { - if (!regionServerQuotaCache.containsKey(regionServer)) { - regionServerQuotaCache.put(regionServer, new QuotaState()); - fetch("regionServer", regionServerQuotaCache, regionServerQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to regionServerQuotaCache + Map cache = regionServerQuotaCache; + if (!cache.containsKey(regionServer)) { + cache.put(regionServer, new QuotaState()); } - return regionServerQuotaCache.get(regionServer).getGlobalLimiter(); + return cache.get(regionServer).getGlobalLimiter(); } protected boolean isExceedThrottleQuotaEnabled() { return exceedThrottleQuotaEnabled; } - private void fetch(final String type, final Map quotasMap, - final Fetcher fetcher) { - // Find the quota entries to update - List gets = quotasMap.keySet().stream().map(fetcher::makeGet).collect(Collectors.toList()); - - // fetch and update the quota entries - if (!gets.isEmpty()) { - try { - for (Map.Entry entry : fetcher.fetchEntries(gets).entrySet()) { - V quotaInfo = quotasMap.putIfAbsent(entry.getKey(), entry.getValue()); - if (quotaInfo != null) { - quotaInfo.update(entry.getValue()); - } - - if (LOG.isTraceEnabled()) { - LOG.trace("Loading {} key={} quotas={}", type, entry.getKey(), quotaInfo); - } - } - } catch (IOException e) { - LOG.warn("Unable to read {} from quota table", type, e); - } - } - } - /** * Applies a request attribute user override if available, otherwise returns the UGI's short * username @@ -306,18 +254,22 @@ void forceSynchronousCacheRefresh() { refreshChore.chore(); } + /** visible for testing */ Map getNamespaceQuotaCache() { return namespaceQuotaCache; } + /** visible for testing */ Map getRegionServerQuotaCache() { return regionServerQuotaCache; } + /** visible for testing */ Map getTableQuotaCache() { return tableQuotaCache; } + /** visible for testing */ Map getUserQuotaCache() { return userQuotaCache; } @@ -354,38 +306,44 @@ public synchronized boolean triggerNow() { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "GC_UNRELATED_TYPES", - justification = "I do not understand why the complaints, it looks good to me -- FIX") protected void chore() { - while (TEST_BLOCK_REFRESH) { - LOG.info("TEST_BLOCK_REFRESH=true, so blocking QuotaCache refresh until it is false"); - try { - Thread.sleep(10); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + updateQuotaFactors(); + + try { + Map newUserQuotaCache = new HashMap<>(fetchUserQuotaStateEntries()); + updateNewCacheFromOld(userQuotaCache, newUserQuotaCache); + userQuotaCache = newUserQuotaCache; + } catch (IOException e) { + LOG.error("Error while fetching user quotas", e); } - // Prefetch online tables/namespaces - for (TableName table : ((HRegionServer) QuotaCache.this.rsServices).getOnlineTables()) { - if (table.isSystemTable()) { - continue; - } - QuotaCache.this.tableQuotaCache.computeIfAbsent(table, key -> new QuotaState()); - final String ns = table.getNamespaceAsString(); + try { + Map newRegionServerQuotaCache = + new HashMap<>(fetchRegionServerQuotaStateEntries()); + updateNewCacheFromOld(regionServerQuotaCache, newRegionServerQuotaCache); + regionServerQuotaCache = newRegionServerQuotaCache; + } catch (IOException e) { + LOG.error("Error while fetching region server quotas", e); + } - QuotaCache.this.namespaceQuotaCache.computeIfAbsent(ns, key -> new QuotaState()); + try { + Map newTableQuotaCache = + new HashMap<>(fetchTableQuotaStateEntries()); + updateNewCacheFromOld(tableQuotaCache, newTableQuotaCache); + tableQuotaCache = newTableQuotaCache; + } catch (IOException e) { + LOG.error("Error while refreshing table quotas", e); } - QuotaCache.this.regionServerQuotaCache - .computeIfAbsent(QuotaTableUtil.QUOTA_REGION_SERVER_ROW_KEY, key -> new QuotaState()); + try { + Map newNamespaceQuotaCache = + new HashMap<>(fetchNamespaceQuotaStateEntries()); + updateNewCacheFromOld(namespaceQuotaCache, newNamespaceQuotaCache); + namespaceQuotaCache = newNamespaceQuotaCache; + } catch (IOException e) { + LOG.error("Error while refreshing namespace quotas", e); + } - updateQuotaFactors(); - fetchAndEvict("namespace", QuotaCache.this.namespaceQuotaCache, namespaceQuotaStateFetcher); - fetchAndEvict("table", QuotaCache.this.tableQuotaCache, tableQuotaStateFetcher); - fetchAndEvict("user", QuotaCache.this.userQuotaCache, userQuotaStateFetcher); - fetchAndEvict("regionServer", QuotaCache.this.regionServerQuotaCache, - regionServerQuotaStateFetcher); fetchExceedThrottleQuota(); } @@ -398,48 +356,6 @@ private void fetchExceedThrottleQuota() { } } - private void fetchAndEvict(final String type, - final ConcurrentMap quotasMap, final Fetcher fetcher) { - long now = EnvironmentEdgeManager.currentTime(); - long evictPeriod = getPeriod() * EVICT_PERIOD_FACTOR; - // Find the quota entries to update - List gets = new ArrayList<>(); - List toRemove = new ArrayList<>(); - for (Map.Entry entry : quotasMap.entrySet()) { - long lastQuery = entry.getValue().getLastQuery(); - if (lastQuery > 0 && (now - lastQuery) >= evictPeriod) { - toRemove.add(entry.getKey()); - } else { - gets.add(fetcher.makeGet(entry.getKey())); - } - } - - for (final K key : toRemove) { - if (LOG.isTraceEnabled()) { - LOG.trace("evict " + type + " key=" + key); - } - quotasMap.remove(key); - } - - // fetch and update the quota entries - if (!gets.isEmpty()) { - try { - for (Map.Entry entry : fetcher.fetchEntries(gets).entrySet()) { - V quotaInfo = quotasMap.putIfAbsent(entry.getKey(), entry.getValue()); - if (quotaInfo != null) { - quotaInfo.update(entry.getValue()); - } - - if (LOG.isTraceEnabled()) { - LOG.trace("refresh " + type + " key=" + entry.getKey() + " quotas=" + quotaInfo); - } - } - } catch (IOException e) { - LOG.warn("Unable to read " + type + " from quota table", e); - } - } - } - /** * Update quota factors which is used to divide cluster scope quota into machine scope quota For * user/namespace/user over namespace quota, use [1 / RSNum] as machine factor. For table/user @@ -515,6 +431,20 @@ private void updateMachineQuotaFactors(int rsSize) { } } + /** visible for testing */ + static void updateNewCacheFromOld(Map oldCache, + Map newCache) { + for (Map.Entry entry : oldCache.entrySet()) { + K key = entry.getKey(); + if (newCache.containsKey(key)) { + V newState = newCache.get(key); + V oldState = entry.getValue(); + oldState.update(newState); + newCache.put(key, oldState); + } + } + } + static class RefreshableExpiringValueCache { private final String name; private final LoadingCache> cache; @@ -555,9 +485,4 @@ static interface ThrowingSupplier { T get() throws Exception; } - interface Fetcher { - Get makeGet(Key key); - - Map fetchEntries(List gets) throws IOException; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java index 7c9445e15587..61aa9d7f068f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.quotas; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,33 +31,14 @@ justification = "FindBugs seems confused; says globalLimiter and lastUpdate " + "are mostly synchronized...but to me it looks like they are totally synchronized") public class QuotaState { - protected long lastUpdate = 0; - protected long lastQuery = 0; - protected QuotaLimiter globalLimiter = NoopQuotaLimiter.get(); - public QuotaState() { - this(0); - } - - public QuotaState(final long updateTs) { - lastUpdate = updateTs; - } - - public synchronized long getLastUpdate() { - return lastUpdate; - } - - public synchronized long getLastQuery() { - return lastQuery; - } - @Override public synchronized String toString() { StringBuilder builder = new StringBuilder(); - builder.append("QuotaState(ts=" + getLastUpdate()); + builder.append("QuotaState("); if (isBypass()) { - builder.append(" bypass"); + builder.append("bypass"); } else { if (globalLimiter != NoopQuotaLimiter.get()) { // builder.append(" global-limiter"); @@ -85,6 +65,11 @@ public synchronized void setQuotas(final Quotas quotas) { } } + /** visible for testing */ + void setGlobalLimiter(QuotaLimiter globalLimiter) { + this.globalLimiter = globalLimiter; + } + /** * Perform an update of the quota info based on the other quota info object. (This operation is * executed by the QuotaCache) @@ -97,7 +82,6 @@ public synchronized void update(final QuotaState other) { } else { globalLimiter = QuotaLimiterFactory.update(globalLimiter, other.globalLimiter); } - lastUpdate = other.lastUpdate; } /** @@ -105,15 +89,7 @@ public synchronized void update(final QuotaState other) { * @return the quota limiter */ public synchronized QuotaLimiter getGlobalLimiter() { - lastQuery = EnvironmentEdgeManager.currentTime(); return globalLimiter; } - /** - * Return the limiter associated with this quota without updating internal last query stats - * @return the quota limiter - */ - synchronized QuotaLimiter getGlobalLimiterWithoutUpdatingLastQuery() { - return globalLimiter; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 687522783832..d49ce248916c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -38,12 +38,13 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -330,59 +331,56 @@ private static void deleteQuotas(final Connection connection, final byte[] rowKe } public static Map fetchUserQuotas(final Connection connection, - final List gets, Map tableMachineQuotaFactors, double factor) - throws IOException { - long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(connection, gets); - - Map userQuotas = new HashMap<>(results.length); - for (int i = 0; i < results.length; ++i) { - byte[] key = gets.get(i).getRow(); - assert isUserRowKey(key); - String user = getUserFromRowKey(key); - - if (results[i].isEmpty()) { - userQuotas.put(user, buildDefaultUserQuotaState(connection.getConfiguration(), nowTs)); - continue; - } - - final UserQuotaState quotaInfo = new UserQuotaState(nowTs); - userQuotas.put(user, quotaInfo); - - assert Bytes.equals(key, results[i].getRow()); - - try { - parseUserResult(user, results[i], new UserQuotasVisitor() { - @Override - public void visitUserQuotas(String userName, String namespace, Quotas quotas) { - quotas = updateClusterQuotaToMachineQuota(quotas, factor); - quotaInfo.setQuotas(namespace, quotas); + Map tableMachineQuotaFactors, double factor) throws IOException { + Map userQuotas = new HashMap<>(); + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_USER_ROW_KEY_PREFIX); + try (ResultScanner resultScanner = table.getScanner(scan)) { + for (Result result : resultScanner) { + byte[] key = result.getRow(); + assert isUserRowKey(key); + String user = getUserFromRowKey(key); + + final UserQuotaState quotaInfo = new UserQuotaState(); + userQuotas.put(user, quotaInfo); + + try { + parseUserResult(user, result, new UserQuotasVisitor() { + @Override + public void visitUserQuotas(String userName, String namespace, Quotas quotas) { + quotas = updateClusterQuotaToMachineQuota(quotas, factor); + quotaInfo.setQuotas(namespace, quotas); + } + + @Override + public void visitUserQuotas(String userName, TableName table, Quotas quotas) { + quotas = updateClusterQuotaToMachineQuota(quotas, + tableMachineQuotaFactors.containsKey(table) + ? tableMachineQuotaFactors.get(table) + : 1); + quotaInfo.setQuotas(table, quotas); + } + + @Override + public void visitUserQuotas(String userName, Quotas quotas) { + quotas = updateClusterQuotaToMachineQuota(quotas, factor); + quotaInfo.setQuotas(quotas); + } + }); + } catch (IOException e) { + LOG.error("Unable to parse user '" + user + "' quotas", e); + userQuotas.remove(user); } - - @Override - public void visitUserQuotas(String userName, TableName table, Quotas quotas) { - quotas = updateClusterQuotaToMachineQuota(quotas, - tableMachineQuotaFactors.containsKey(table) - ? tableMachineQuotaFactors.get(table) - : 1); - quotaInfo.setQuotas(table, quotas); - } - - @Override - public void visitUserQuotas(String userName, Quotas quotas) { - quotas = updateClusterQuotaToMachineQuota(quotas, factor); - quotaInfo.setQuotas(quotas); - } - }); - } catch (IOException e) { - LOG.error("Unable to parse user '" + user + "' quotas", e); - userQuotas.remove(user); + } } } + return userQuotas; } - protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf, long nowTs) { + protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf) { QuotaProtos.Throttle.Builder throttleBuilder = QuotaProtos.Throttle.newBuilder(); buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_READ_NUM) @@ -406,7 +404,7 @@ protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf, l buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_REQUEST_HANDLER_USAGE_MS) .ifPresent(throttleBuilder::setReqHandlerUsageMs); - UserQuotaState state = new UserQuotaState(nowTs); + UserQuotaState state = new UserQuotaState(); QuotaProtos.Quotas defaultQuotas = QuotaProtos.Quotas.newBuilder().setThrottle(throttleBuilder.build()).build(); state.setQuotas(defaultQuotas); @@ -423,8 +421,11 @@ private static Optional buildDefaultTimedQuota(Configuration conf, S } public static Map fetchTableQuotas(final Connection connection, - final List gets, Map tableMachineFactors) throws IOException { - return fetchGlobalQuotas("table", connection, gets, new KeyFromRow() { + Map tableMachineFactors) throws IOException { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_TABLE_ROW_KEY_PREFIX); + return fetchGlobalQuotas("table", scan, connection, new KeyFromRow() { @Override public TableName getKeyFromRow(final byte[] row) { assert isTableRowKey(row); @@ -439,8 +440,11 @@ public double getFactor(TableName tableName) { } public static Map fetchNamespaceQuotas(final Connection connection, - final List gets, double factor) throws IOException { - return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow() { + double factor) throws IOException { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_NAMESPACE_ROW_KEY_PREFIX); + return fetchGlobalQuotas("namespace", scan, connection, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isNamespaceRowKey(row); @@ -454,9 +458,12 @@ public double getFactor(String s) { }); } - public static Map fetchRegionServerQuotas(final Connection connection, - final List gets) throws IOException { - return fetchGlobalQuotas("regionServer", connection, gets, new KeyFromRow() { + public static Map fetchRegionServerQuotas(final Connection connection) + throws IOException { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_REGION_SERVER_ROW_KEY_PREFIX); + return fetchGlobalQuotas("regionServer", scan, connection, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isRegionServerRowKey(row); @@ -470,32 +477,34 @@ public double getFactor(String s) { }); } - public static Map fetchGlobalQuotas(final String type, - final Connection connection, final List gets, final KeyFromRow kfr) throws IOException { - long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(connection, gets); + public static Map fetchGlobalQuotas(final String type, final Scan scan, + final Connection connection, final KeyFromRow kfr) throws IOException { - Map globalQuotas = new HashMap<>(results.length); - for (int i = 0; i < results.length; ++i) { - byte[] row = gets.get(i).getRow(); - K key = kfr.getKeyFromRow(row); + Map globalQuotas = new HashMap<>(); + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + try (ResultScanner resultScanner = table.getScanner(scan)) { + for (Result result : resultScanner) { - QuotaState quotaInfo = new QuotaState(nowTs); - globalQuotas.put(key, quotaInfo); + byte[] row = result.getRow(); + K key = kfr.getKeyFromRow(row); - if (results[i].isEmpty()) continue; - assert Bytes.equals(row, results[i].getRow()); + QuotaState quotaInfo = new QuotaState(); + globalQuotas.put(key, quotaInfo); - byte[] data = results[i].getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - if (data == null) continue; + byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); + if (data == null) { + continue; + } - try { - Quotas quotas = quotasFromData(data); - quotas = updateClusterQuotaToMachineQuota(quotas, kfr.getFactor(key)); - quotaInfo.setQuotas(quotas); - } catch (IOException e) { - LOG.error("Unable to parse " + type + " '" + key + "' quotas", e); - globalQuotas.remove(key); + try { + Quotas quotas = quotasFromData(data); + quotas = updateClusterQuotaToMachineQuota(quotas, kfr.getFactor(key)); + quotaInfo.setQuotas(quotas); + } catch (IOException e) { + LOG.error("Unable to parse {} '{}' quotas", type, key, e); + globalQuotas.remove(key); + } + } } } return globalQuotas; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java index a3ec97994363..877ad195c716 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java @@ -22,7 +22,6 @@ import java.util.Map; import java.util.Set; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -42,24 +41,18 @@ public class UserQuotaState extends QuotaState { private Map tableLimiters = null; private boolean bypassGlobals = false; - public UserQuotaState() { - super(); - } - - public UserQuotaState(final long updateTs) { - super(updateTs); - } - @Override public synchronized String toString() { StringBuilder builder = new StringBuilder(); - builder.append("UserQuotaState(ts=" + getLastUpdate()); - if (bypassGlobals) builder.append(" bypass-globals"); + builder.append("UserQuotaState("); + if (bypassGlobals) { + builder.append("bypass-globals"); + } if (isBypass()) { builder.append(" bypass"); } else { - if (getGlobalLimiterWithoutUpdatingLastQuery() != NoopQuotaLimiter.get()) { + if (getGlobalLimiter() != NoopQuotaLimiter.get()) { builder.append(" global-limiter"); } @@ -86,7 +79,7 @@ public synchronized String toString() { /** Returns true if there is no quota information associated to this object */ @Override public synchronized boolean isBypass() { - return !bypassGlobals && getGlobalLimiterWithoutUpdatingLastQuery() == NoopQuotaLimiter.get() + return !bypassGlobals && getGlobalLimiter() == NoopQuotaLimiter.get() && (tableLimiters == null || tableLimiters.isEmpty()) && (namespaceLimiters == null || namespaceLimiters.isEmpty()); } @@ -191,7 +184,6 @@ private static Map updateLimiters(final Map userQuotaState.getLastUpdate() != 0); - long lastUpdate = userQuotaState.getLastUpdate(); - - // refresh should not apply to recently refreshed quota - quotaCache.triggerCacheRefresh(); - Thread.sleep(250); - long newLastUpdate = userQuotaState.getLastUpdate(); - assertEquals(lastUpdate, newLastUpdate); - - quotaCache.triggerCacheRefresh(); - waitMinuteQuota(); - // should refresh after time has passed - TEST_UTIL.waitFor(5_000, () -> lastUpdate != userQuotaState.getLastUpdate()); - } - @Test public void testUserQuotaLookup() throws Exception { QuotaCache quotaCache = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java new file mode 100644 index 000000000000..2c33b265771a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; + +/** + * Tests of QuotaCache that don't require a minicluster, unlike in TestQuotaCache + */ +@Category({ RegionServerTests.class, SmallTests.class }) +public class TestQuotaCache2 { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestQuotaCache2.class); + + @Test + public void testPreserveLimiterAvailability() throws Exception { + // establish old cache with a limiter for 100 read bytes per second + QuotaState oldState = new QuotaState(); + Map oldCache = new HashMap<>(); + oldCache.put("my_table", oldState); + QuotaProtos.Throttle throttle1 = QuotaProtos.Throttle.newBuilder() + .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) + .setSoftLimit(100).setScope(QuotaProtos.QuotaScope.MACHINE).build()) + .build(); + QuotaLimiter limiter1 = TimeBasedLimiter.fromThrottle(throttle1); + oldState.setGlobalLimiter(limiter1); + + // consume one byte from the limiter, so 99 will be left + limiter1.consumeRead(1, 1, false); + + // establish new cache, also with a limiter for 100 read bytes per second + QuotaState newState = new QuotaState(); + Map newCache = new HashMap<>(); + newCache.put("my_table", newState); + QuotaProtos.Throttle throttle2 = QuotaProtos.Throttle.newBuilder() + .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) + .setSoftLimit(100).setScope(QuotaProtos.QuotaScope.MACHINE).build()) + .build(); + QuotaLimiter limiter2 = TimeBasedLimiter.fromThrottle(throttle2); + newState.setGlobalLimiter(limiter2); + + // update new cache from old cache + QuotaCache.updateNewCacheFromOld(oldCache, newCache); + + // verify that the 99 available bytes from the limiter was carried over + TimeBasedLimiter updatedLimiter = + (TimeBasedLimiter) newCache.get("my_table").getGlobalLimiter(); + assertEquals(99, updatedLimiter.getReadAvailable()); + } + + @Test + public void testClobberLimiterLimit() throws Exception { + // establish old cache with a limiter for 100 read bytes per second + QuotaState oldState = new QuotaState(); + Map oldCache = new HashMap<>(); + oldCache.put("my_table", oldState); + QuotaProtos.Throttle throttle1 = QuotaProtos.Throttle.newBuilder() + .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) + .setSoftLimit(100).setScope(QuotaProtos.QuotaScope.MACHINE).build()) + .build(); + QuotaLimiter limiter1 = TimeBasedLimiter.fromThrottle(throttle1); + oldState.setGlobalLimiter(limiter1); + + // establish new cache, also with a limiter for 100 read bytes per second + QuotaState newState = new QuotaState(); + Map newCache = new HashMap<>(); + newCache.put("my_table", newState); + QuotaProtos.Throttle throttle2 = QuotaProtos.Throttle.newBuilder() + .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) + .setSoftLimit(50).setScope(QuotaProtos.QuotaScope.MACHINE).build()) + .build(); + QuotaLimiter limiter2 = TimeBasedLimiter.fromThrottle(throttle2); + newState.setGlobalLimiter(limiter2); + + // update new cache from old cache + QuotaCache.updateNewCacheFromOld(oldCache, newCache); + + // verify that the 99 available bytes from the limiter was carried over + TimeBasedLimiter updatedLimiter = + (TimeBasedLimiter) newCache.get("my_table").getGlobalLimiter(); + assertEquals(50, updatedLimiter.getReadLimit()); + } + + @Test + public void testForgetsDeletedQuota() { + QuotaState oldState = new QuotaState(); + Map oldCache = new HashMap<>(); + oldCache.put("my_table1", oldState); + + QuotaState newState = new QuotaState(); + Map newCache = new HashMap<>(); + newCache.put("my_table2", newState); + + QuotaCache.updateNewCacheFromOld(oldCache, newCache); + + assertTrue(newCache.containsKey("my_table2")); + assertFalse(newCache.containsKey("my_table1")); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java index 59b26f3f0d91..ff4b6bc9949b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.quotas; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -81,67 +80,38 @@ public void testSimpleQuotaStateOperation() { assertThrottleException(quotaInfo.getTableLimiter(tableName), NUM_TABLE_THROTTLE); } - @Test - public void testQuotaStateUpdateBypassThrottle() { - final long LAST_UPDATE = 10; - - UserQuotaState quotaInfo = new UserQuotaState(); - assertEquals(0, quotaInfo.getLastUpdate()); - assertTrue(quotaInfo.isBypass()); - - UserQuotaState otherQuotaState = new UserQuotaState(LAST_UPDATE); - assertEquals(LAST_UPDATE, otherQuotaState.getLastUpdate()); - assertTrue(otherQuotaState.isBypass()); - - quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE, quotaInfo.getLastUpdate()); - assertTrue(quotaInfo.isBypass()); - assertTrue(quotaInfo.getGlobalLimiter() == quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); - assertNoopLimiter(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); - } - @Test public void testQuotaStateUpdateGlobalThrottle() { final int NUM_GLOBAL_THROTTLE_1 = 3; final int NUM_GLOBAL_THROTTLE_2 = 11; - final long LAST_UPDATE_1 = 10; - final long LAST_UPDATE_2 = 20; - final long LAST_UPDATE_3 = 30; QuotaState quotaInfo = new QuotaState(); - assertEquals(0, quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); // Add global throttle - QuotaState otherQuotaState = new QuotaState(LAST_UPDATE_1); + QuotaState otherQuotaState = new QuotaState(); otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_1)); - assertEquals(LAST_UPDATE_1, otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE_1, quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getGlobalLimiter(), NUM_GLOBAL_THROTTLE_1); // Update global Throttle - otherQuotaState = new QuotaState(LAST_UPDATE_2); + otherQuotaState = new QuotaState(); otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_2)); - assertEquals(LAST_UPDATE_2, otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE_2, quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getGlobalLimiter(), NUM_GLOBAL_THROTTLE_2 - NUM_GLOBAL_THROTTLE_1); // Remove global throttle - otherQuotaState = new QuotaState(LAST_UPDATE_3); - assertEquals(LAST_UPDATE_3, otherQuotaState.getLastUpdate()); + otherQuotaState = new QuotaState(); assertTrue(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE_3, quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); assertNoopLimiter(quotaInfo.getGlobalLimiter()); } @@ -155,37 +125,29 @@ public void testQuotaStateUpdateTableThrottle() { final int TABLE_A_THROTTLE_2 = 11; final int TABLE_B_THROTTLE = 4; final int TABLE_C_THROTTLE = 5; - final long LAST_UPDATE_1 = 10; - final long LAST_UPDATE_2 = 20; - final long LAST_UPDATE_3 = 30; UserQuotaState quotaInfo = new UserQuotaState(); - assertEquals(0, quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); // Add A B table limiters - UserQuotaState otherQuotaState = new UserQuotaState(LAST_UPDATE_1); + UserQuotaState otherQuotaState = new UserQuotaState(); otherQuotaState.setQuotas(tableNameA, buildReqNumThrottle(TABLE_A_THROTTLE_1)); otherQuotaState.setQuotas(tableNameB, buildReqNumThrottle(TABLE_B_THROTTLE)); - assertEquals(LAST_UPDATE_1, otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE_1, quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getTableLimiter(tableNameA), TABLE_A_THROTTLE_1); assertThrottleException(quotaInfo.getTableLimiter(tableNameB), TABLE_B_THROTTLE); assertNoopLimiter(quotaInfo.getTableLimiter(tableNameC)); // Add C, Remove B, Update A table limiters - otherQuotaState = new UserQuotaState(LAST_UPDATE_2); + otherQuotaState = new UserQuotaState(); otherQuotaState.setQuotas(tableNameA, buildReqNumThrottle(TABLE_A_THROTTLE_2)); otherQuotaState.setQuotas(tableNameC, buildReqNumThrottle(TABLE_C_THROTTLE)); - assertEquals(LAST_UPDATE_2, otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE_2, quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getTableLimiter(tableNameA), TABLE_A_THROTTLE_2 - TABLE_A_THROTTLE_1); @@ -193,12 +155,10 @@ public void testQuotaStateUpdateTableThrottle() { assertNoopLimiter(quotaInfo.getTableLimiter(tableNameB)); // Remove table limiters - otherQuotaState = new UserQuotaState(LAST_UPDATE_3); - assertEquals(LAST_UPDATE_3, otherQuotaState.getLastUpdate()); + otherQuotaState = new UserQuotaState(); assertTrue(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE_3, quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); assertNoopLimiter(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); } @@ -207,20 +167,16 @@ public void testQuotaStateUpdateTableThrottle() { public void testTableThrottleWithBatch() { final TableName TABLE_A = TableName.valueOf("TableA"); final int TABLE_A_THROTTLE_1 = 3; - final long LAST_UPDATE_1 = 10; UserQuotaState quotaInfo = new UserQuotaState(); - assertEquals(0, quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); // Add A table limiters - UserQuotaState otherQuotaState = new UserQuotaState(LAST_UPDATE_1); + UserQuotaState otherQuotaState = new UserQuotaState(); otherQuotaState.setQuotas(TABLE_A, buildReqNumThrottle(TABLE_A_THROTTLE_1)); - assertEquals(LAST_UPDATE_1, otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); - assertEquals(LAST_UPDATE_1, quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); QuotaLimiter limiter = quotaInfo.getTableLimiter(TABLE_A); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java index 391b7a088691..4309c3640dde 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java @@ -88,7 +88,6 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); TEST_UTIL.startMiniCluster(1); TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); - QuotaCache.TEST_FORCE_REFRESH = true; tables = new Table[TABLE_NAMES.length]; for (int i = 0; i < TABLE_NAMES.length; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java index 54931d471228..2c8420ef932e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaUserOverride.java @@ -65,7 +65,6 @@ public static void setUpBeforeClass() throws Exception { CUSTOM_OVERRIDE_KEY); TEST_UTIL.startMiniCluster(NUM_SERVERS); TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); - QuotaCache.TEST_FORCE_REFRESH = true; TEST_UTIL.createTable(TABLE_NAME, FAMILY); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestThreadHandlerUsageQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestThreadHandlerUsageQuota.java index 5c446b6d7c25..cf82a0ed52b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestThreadHandlerUsageQuota.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestThreadHandlerUsageQuota.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.quotas; +import static org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.triggerUserCacheRefresh; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -74,7 +75,6 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.createTable(TABLE_NAME, FAMILY); TEST_UTIL.waitTableAvailable(TABLE_NAME); - QuotaCache.TEST_FORCE_REFRESH = true; TEST_UTIL.flush(TABLE_NAME); } @@ -104,11 +104,12 @@ public void testHandlerUsageThrottleForWrites() throws Exception { } } - private void configureThrottle() throws IOException { + private void configureThrottle() throws Exception { try (Admin admin = TEST_UTIL.getAdmin()) { admin.setQuota(QuotaSettingsFactory.throttleUser(getUserName(), - ThrottleType.REQUEST_HANDLER_USAGE_MS, 10000, TimeUnit.SECONDS)); + ThrottleType.REQUEST_HANDLER_USAGE_MS, 1, TimeUnit.SECONDS)); } + triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAME); } private void unthrottleUser() throws Exception { @@ -116,6 +117,7 @@ private void unthrottleUser() throws Exception { admin.setQuota(QuotaSettingsFactory.unthrottleUserByThrottleType(getUserName(), ThrottleType.REQUEST_HANDLER_USAGE_MS)); } + triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAME); } private static String getUserName() throws IOException { From ffed09d96bbaccdc83e1d7df66d640cf10b2f191 Mon Sep 17 00:00:00 2001 From: Ruanhui <32773751+frostruan@users.noreply.github.com> Date: Fri, 12 Sep 2025 23:09:34 +0800 Subject: [PATCH 02/92] HBASE-26974 Introduce a LogRollProcedure (#5408) Co-authored-by: huiruan Signed-off-by: Duo Zhang --- .../backup/impl/FullTableBackupClient.java | 7 +- .../backup/impl/IncrementalBackupManager.java | 10 +- .../hadoop/hbase/backup/util/BackupUtils.java | 51 +++++ .../hadoop/hbase/backup/TestBackupBase.java | 7 +- .../hadoop/hbase/backup/TestBackupMerge.java | 19 +- .../org/apache/hadoop/hbase/client/Admin.java | 10 + .../hbase/client/AdminOverAsyncAdmin.java | 5 + .../hadoop/hbase/client/AsyncAdmin.java | 9 + .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 + .../hbase/client/RawAsyncHBaseAdmin.java | 138 +++++++++++--- .../shaded/protobuf/RequestConverter.java | 6 + .../apache/hadoop/hbase/util/FutureUtils.java | 2 +- .../procedure2/RemoteProcedureDispatcher.java | 2 +- .../src/main/protobuf/HBase.proto | 4 + .../main/protobuf/server/master/Master.proto | 12 ++ .../server/master/MasterProcedure.proto | 18 ++ .../server/master/RegionServerStatus.proto | 1 + .../main/protobuf/server/region/Admin.proto | 1 - .../hadoop/hbase/executor/EventType.java | 8 +- .../hadoop/hbase/executor/ExecutorType.java | 3 +- .../apache/hadoop/hbase/master/HMaster.java | 24 ++- .../hbase/master/MasterRpcServices.java | 19 +- .../hadoop/hbase/master/MasterServices.java | 6 + .../hadoop/hbase/master/ServerManager.java | 8 + .../assignment/RegionRemoteProcedureBase.java | 2 +- .../assignment/RegionTransitionProcedure.java | 2 +- .../procedure/FlushRegionProcedure.java | 2 +- .../master/procedure/LogRollProcedure.java | 178 ++++++++++++++++++ .../procedure/LogRollRemoteProcedure.java | 113 +++++++++++ .../procedure/ServerProcedureInterface.java | 5 + .../hbase/master/procedure/ServerQueue.java | 1 + .../procedure/ServerRemoteProcedure.java | 3 +- .../procedure/SnapshotRegionProcedure.java | 2 +- .../procedure2/BaseRSProcedureCallable.java | 7 +- .../hbase/procedure2/RSProcedureCallable.java | 2 +- .../regionserver/FlushRegionCallable.java | 3 +- .../hbase/regionserver/HRegionServer.java | 11 +- .../hbase/regionserver/LogRollCallable.java | 84 +++++++++ .../hbase/regionserver/RSRpcServices.java | 2 +- .../regionserver/ReloadQuotasCallable.java | 3 +- .../RemoteProcedureResultReporter.java | 7 +- .../regionserver/SnapshotRegionCallable.java | 3 +- .../regionserver/SnapshotVerifyCallable.java | 3 +- .../hbase/regionserver/SplitWALCallable.java | 3 +- .../handler/RSProcedureHandler.java | 8 +- .../ClaimReplicationQueueCallable.java | 3 +- .../regionserver/RefreshPeerCallable.java | 3 +- .../ReplaySyncReplicationWALCallable.java | 3 +- .../SwitchRpcThrottleRemoteCallable.java | 3 +- .../hadoop/hbase/wal/AbstractWALRoller.java | 2 +- .../hbase/master/MockNoopMasterServices.java | 5 + .../procedure/TestLogRollProcedure.java | 104 ++++++++++ .../procedure/TestServerRemoteProcedure.java | 3 +- .../TestRegisterPeerWorkerWhenRestarting.java | 4 +- .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 + hbase-shell/src/main/ruby/hbase/admin.rb | 6 + hbase-shell/src/main/ruby/shell.rb | 1 + .../main/ruby/shell/commands/wal_roll_all.rb | 37 ++++ .../hbase/thrift2/client/ThriftAdmin.java | 4 + 59 files changed, 901 insertions(+), 101 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollRemoteProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRollCallable.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestLogRollProcedure.java create mode 100644 hbase-shell/src/main/ruby/shell/commands/wal_roll_all.rb diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index f21ced9bf2ff..2293fd4f8149 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -25,7 +25,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.hbase.TableName; @@ -36,7 +35,6 @@ import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupRestoreFactory; import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -158,10 +156,7 @@ public void execute() throws IOException { // snapshots for the same reason as the log rolls. List bulkLoadsToDelete = backupManager.readBulkloadRows(tableList); - Map props = new HashMap<>(); - props.put("backupRoot", backupInfo.getBackupRootDir()); - admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); + BackupUtils.logRoll(conn, backupInfo.getBackupRootDir(), conf); newTimestamps = backupManager.readRegionServerLastLogRollResult(); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index c92c0747e83c..20884edf836e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -29,9 +28,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -84,13 +81,8 @@ public Map getIncrBackupLogFileMap() throws IOException { } LOG.info("Execute roll log procedure for incremental backup ..."); - HashMap props = new HashMap<>(); - props.put("backupRoot", backupInfo.getBackupRootDir()); + BackupUtils.logRoll(conn, backupInfo.getBackupRootDir(), conf); - try (Admin admin = conn.getAdmin()) { - admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); - } newTimestamps = readRegionServerLastLogRollResult(); logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index 15159ed73e46..183cc2054f1a 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -49,6 +49,8 @@ import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; @@ -65,6 +67,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; @@ -770,4 +773,52 @@ public static String findMostRecentBackupId(String[] backupIds) { return BackupRestoreConstants.BACKUPID_PREFIX + recentTimestamp; } + /** + * roll WAL writer for all region servers and record the newest log roll result + */ + public static void logRoll(Connection conn, String backupRootDir, Configuration conf) + throws IOException { + boolean legacy = conf.getBoolean("hbase.backup.logroll.legacy.used", false); + if (legacy) { + logRollV1(conn, backupRootDir); + } else { + logRollV2(conn, backupRootDir); + } + } + + private static void logRollV1(Connection conn, String backupRootDir) throws IOException { + try (Admin admin = conn.getAdmin()) { + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, + ImmutableMap.of("backupRoot", backupRootDir)); + } + } + + private static void logRollV2(Connection conn, String backupRootDir) throws IOException { + BackupSystemTable backupSystemTable = new BackupSystemTable(conn); + HashMap lastLogRollResult = + backupSystemTable.readRegionServerLastLogRollResult(backupRootDir); + try (Admin admin = conn.getAdmin()) { + Map newLogRollResult = admin.rollAllWALWriters(); + + for (Map.Entry entry : newLogRollResult.entrySet()) { + ServerName serverName = entry.getKey(); + long newHighestWALFilenum = entry.getValue(); + + String address = serverName.getAddress().toString(); + Long lastHighestWALFilenum = lastLogRollResult.get(address); + if (lastHighestWALFilenum != null && lastHighestWALFilenum > newHighestWALFilenum) { + LOG.warn("Won't update last roll log result for server {}: current = {}, new = {}", + serverName, lastHighestWALFilenum, newHighestWALFilenum); + } else { + backupSystemTable.writeRegionServerLastLogRollResult(address, newHighestWALFilenum, + backupRootDir); + if (LOG.isDebugEnabled()) { + LOG.debug("updated last roll log result for {} from {} to {}", serverName, + lastHighestWALFilenum, newHighestWALFilenum); + } + } + } + } + } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index b5f58508441b..a14fce59faf2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager; import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -239,10 +237,7 @@ public void execute() throws IOException { // the snapshot. LOG.info("Execute roll log procedure for full backup ..."); - Map props = new HashMap<>(); - props.put("backupRoot", backupInfo.getBackupRootDir()); - admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); + BackupUtils.logRoll(conn, backupInfo.getBackupRootDir(), conf); failStageIf(Stage.stage_2); newTimestamps = backupManager.readRegionServerLastLogRollResult(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 38204f68e31a..b91976325447 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -23,6 +23,7 @@ import java.io.File; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupUtils; @@ -70,17 +71,17 @@ public void TestIncBackupMergeRestore() throws Exception { // #2 - insert some data to table1 Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); - LOG.debug("writing " + ADD_ROWS + " rows to " + table1); + LOG.debug("writing {} rows to {}", ADD_ROWS, table1); - Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS); + Assert.assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS); t1.close(); - LOG.debug("written " + ADD_ROWS + " rows to " + table1); + LOG.debug("written {} rows to {}", ADD_ROWS, table1); Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS); - Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS); + Assert.assertEquals(HBaseTestingUtil.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS); t2.close(); - LOG.debug("written " + ADD_ROWS + " rows to " + table2); + LOG.debug("written {} rows to {}", ADD_ROWS, table2); // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); @@ -112,15 +113,15 @@ public void TestIncBackupMergeRestore() throws Exception { tablesRestoreIncMultiple, tablesMapIncMultiple, true)); Table hTable = conn.getTable(table1_restore); - LOG.debug("After incremental restore: " + hTable.getDescriptor()); - int countRows = TEST_UTIL.countRows(hTable, famName); - LOG.debug("f1 has " + countRows + " rows"); + LOG.debug("After incremental restore: {}", hTable.getDescriptor()); + int countRows = HBaseTestingUtil.countRows(hTable, famName); + LOG.debug("f1 has {} rows", countRows); Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows); hTable.close(); hTable = conn.getTable(table2_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS); + Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS); hTable.close(); admin.close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 75dd2ef07b38..43a004a471cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1404,6 +1404,16 @@ Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) */ void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException; + /** + * Roll log writer for all RegionServers. Note that unlike + * {@link Admin#rollWALWriter(ServerName)}, this method is synchronous, which means it will block + * until all RegionServers have completed the log roll, or a RegionServer fails due to an + * exception that retry will not work. + * @return server and the highest wal filenum of server before performing log roll + * @throws IOException if a remote or network exception occurs + */ + Map rollAllWALWriters() throws IOException; + /** * Helper that delegates to getClusterMetrics().getMasterCoprocessorNames(). * @return an array of master coprocessors diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index c13dfc33e3d2..c866f434e63a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -635,6 +635,11 @@ public void rollWALWriter(ServerName serverName) throws IOException, FailedLogCl get(admin.rollWALWriter(serverName)); } + @Override + public Map rollAllWALWriters() throws IOException { + return get(admin.rollAllWALWriters()); + } + @Override public CompactionState getCompactionState(TableName tableName) throws IOException { return get(admin.getCompactionState(tableName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 331aa4a254af..d808aecc815c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1270,6 +1270,15 @@ default CompletableFuture getMasterInfoPort() { */ CompletableFuture rollWALWriter(ServerName serverName); + /** + * Roll log writer for all RegionServers. Note that unlike + * {@link Admin#rollWALWriter(ServerName)}, this method is synchronous, which means it will block + * until all RegionServers have completed the log roll, or a RegionServer fails due to an + * exception that retry will not work. + * @return server and the highest wal filenum of server before performing log roll + */ + CompletableFuture> rollAllWALWriters(); + /** * Clear compacting queues on a region server. * @param serverName The servername of the region server. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 69f353600036..33ac47c73d69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -691,6 +691,11 @@ public CompletableFuture rollWALWriter(ServerName serverName) { return wrap(rawAdmin.rollWALWriter(serverName)); } + @Override + public CompletableFuture> rollAllWALWriters() { + return wrap(rawAdmin.rollAllWALWriters()); + } + @Override public CompletableFuture clearCompactionQueues(ServerName serverName, Set queues) { return wrap(rawAdmin.clearCompactionQueues(serverName, queues)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 7cb0e4689510..2373e936726e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -105,6 +105,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; @@ -149,6 +150,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LastHighestWalFilenum; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; @@ -263,6 +265,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; @@ -497,28 +501,70 @@ public void run(PRESP resp) { return future; } + /** + * short-circuit call for + * {@link RawAsyncHBaseAdmin#procedureCall(Object, MasterRpcCall, Converter, Converter, ProcedureBiConsumer)} + * by ignoring procedure result + */ private CompletableFuture procedureCall(PREQ preq, MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + ProcedureBiConsumer consumer) { + return procedureCall(preq, rpcCall, respConverter, result -> null, consumer); + } + + /** + * short-circuit call for procedureCall(Consumer, Object, MasterRpcCall, Converter, Converter, + * ProcedureBiConsumer) by skip setting priority for request + */ + private CompletableFuture procedureCall(PREQ preq, + MasterRpcCall rpcCall, Converter respConverter, + Converter resultConverter, ProcedureBiConsumer consumer) { return procedureCall(b -> { - }, preq, rpcCall, respConverter, consumer); + }, preq, rpcCall, respConverter, resultConverter, consumer); } + /** + * short-circuit call for procedureCall(TableName, Object, MasterRpcCall, Converter, Converter, + * ProcedureBiConsumer) by ignoring procedure result + */ private CompletableFuture procedureCall(TableName tableName, PREQ preq, MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { - return procedureCall(b -> b.priority(tableName), preq, rpcCall, respConverter, consumer); + ProcedureBiConsumer consumer) { + return procedureCall(tableName, preq, rpcCall, respConverter, result -> null, consumer); + } + + /** + * short-circuit call for procedureCall(Consumer, Object, MasterRpcCall, Converter, Converter, + * ProcedureBiConsumer) by skip setting priority for request + */ + private CompletableFuture procedureCall(TableName tableName, PREQ preq, + MasterRpcCall rpcCall, Converter respConverter, + Converter resultConverter, ProcedureBiConsumer consumer) { + return procedureCall(b -> b.priority(tableName), preq, rpcCall, respConverter, resultConverter, + consumer); } - private CompletableFuture procedureCall( + /** + * @param type of request + * @param type of response + * @param type of procedure call result + * @param prioritySetter prioritySetter set priority by table for request + * @param preq procedure call request + * @param rpcCall procedure rpc call + * @param respConverter extract proc id from procedure call response + * @param resultConverter extract result from procedure call result + * @param consumer action performs on result + * @return procedure call result, null if procedure is void + */ + private CompletableFuture procedureCall( Consumer> prioritySetter, PREQ preq, MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { - MasterRequestCallerBuilder builder = this. newMasterCaller().action((controller, - stub) -> this. call(controller, stub, preq, rpcCall, respConverter)); + Converter resultConverter, ProcedureBiConsumer consumer) { + MasterRequestCallerBuilder builder = this. newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, preq, rpcCall, respConverter)); prioritySetter.accept(builder); CompletableFuture procFuture = builder.call(); - CompletableFuture future = waitProcedureResult(procFuture); + CompletableFuture future = waitProcedureResult(procFuture, resultConverter); addListener(future, consumer); return future; } @@ -1935,7 +1981,7 @@ public CompletableFuture appendReplicationPeerTableCFs(String id, return failedFuture(new ReplicationException("tableCfs is null")); } - CompletableFuture future = new CompletableFuture(); + CompletableFuture future = new CompletableFuture<>(); addListener(getReplicationPeerConfig(id), (peerConfig, error) -> { if (!completeExceptionally(future, error)) { ReplicationPeerConfig newPeerConfig = @@ -1957,7 +2003,7 @@ public CompletableFuture removeReplicationPeerTableCFs(String id, return failedFuture(new ReplicationException("tableCfs is null")); } - CompletableFuture future = new CompletableFuture(); + CompletableFuture future = new CompletableFuture<>(); addListener(getReplicationPeerConfig(id), (peerConfig, error) -> { if (!completeExceptionally(future, error)) { ReplicationPeerConfig newPeerConfig = null; @@ -2056,7 +2102,7 @@ public CompletableFuture snapshot(SnapshotDescription snapshotDesc) { private void waitSnapshotFinish(SnapshotDescription snapshot, CompletableFuture future, SnapshotResponse resp) { if (resp.hasProcId()) { - getProcedureResult(resp.getProcId(), future, 0); + getProcedureResult(resp.getProcId(), src -> null, future, 0); addListener(future, new SnapshotProcedureBiConsumer(snapshot.getTableName())); } else { long expectedTimeout = resp.getExpectedTimeout(); @@ -2272,7 +2318,7 @@ private CompletableFuture internalRestoreSnapshot(String snapshotName, Tab .action((controller, stub) -> this. call(controller, stub, builder.build(), (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) - .call()); + .call(), result -> null); } @Override @@ -2684,14 +2730,14 @@ private void verifySplitKeys(byte[][] splitKeys) { } } - private static abstract class ProcedureBiConsumer implements BiConsumer { + private static abstract class ProcedureBiConsumer implements BiConsumer { abstract void onFinished(); abstract void onError(Throwable error); @Override - public void accept(Void v, Throwable error) { + public void accept(T value, Throwable error) { if (error != null) { onError(error); return; @@ -2700,7 +2746,7 @@ public void accept(Void v, Throwable error) { } } - private static abstract class TableProcedureBiConsumer extends ProcedureBiConsumer { + private static abstract class TableProcedureBiConsumer extends ProcedureBiConsumer { protected final TableName tableName; TableProcedureBiConsumer(TableName tableName) { @@ -2725,7 +2771,7 @@ void onError(Throwable error) { } } - private static abstract class NamespaceProcedureBiConsumer extends ProcedureBiConsumer { + private static abstract class NamespaceProcedureBiConsumer extends ProcedureBiConsumer { protected final String namespaceName; NamespaceProcedureBiConsumer(String namespaceName) { @@ -2740,12 +2786,12 @@ String getDescription() { @Override void onFinished() { - LOG.info(getDescription() + " completed"); + LOG.info("{} completed", getDescription()); } @Override void onError(Throwable error) { - LOG.info(getDescription() + " failed with " + error.getMessage()); + LOG.info("{} failed with {}", getDescription(), error.getMessage()); } } @@ -2984,7 +3030,7 @@ String getOperationType() { } } - private static class ReplicationProcedureBiConsumer extends ProcedureBiConsumer { + private static class ReplicationProcedureBiConsumer extends ProcedureBiConsumer { private final String peerId; private final Supplier getOperation; @@ -2999,28 +3045,44 @@ String getDescription() { @Override void onFinished() { - LOG.info(getDescription() + " completed"); + LOG.info("{} completed", getDescription()); } @Override void onError(Throwable error) { - LOG.info(getDescription() + " failed with " + error.getMessage()); + LOG.info("{} failed with {}", getDescription(), error.getMessage()); } } - private CompletableFuture waitProcedureResult(CompletableFuture procFuture) { - CompletableFuture future = new CompletableFuture<>(); + private static final class RollAllWALWritersBiConsumer + extends ProcedureBiConsumer> { + + @Override + void onFinished() { + LOG.info("Rolling all WAL writers completed"); + } + + @Override + void onError(Throwable error) { + LOG.warn("Rolling all WAL writers failed with {}", error.getMessage()); + } + } + + private CompletableFuture waitProcedureResult(CompletableFuture procFuture, + Converter converter) { + CompletableFuture future = new CompletableFuture<>(); addListener(procFuture, (procId, error) -> { if (error != null) { future.completeExceptionally(error); return; } - getProcedureResult(procId, future, 0); + getProcedureResult(procId, converter, future, 0); }); return future; } - private void getProcedureResult(long procId, CompletableFuture future, int retries) { + private void getProcedureResult(long procId, Converter converter, + CompletableFuture future, int retries) { addListener( this. newMasterCaller() .action((controller, stub) -> this. call(controller, stub, if (error != null) { LOG.warn("failed to get the procedure result procId={}", procId, ConnectionUtils.translateException(error)); - retryTimer.newTimeout(t -> getProcedureResult(procId, future, retries + 1), + retryTimer.newTimeout(t -> getProcedureResult(procId, converter, future, retries + 1), ConnectionUtils.getPauseTime(pauseNs, retries), TimeUnit.NANOSECONDS); return; } if (response.getState() == GetProcedureResultResponse.State.RUNNING) { - retryTimer.newTimeout(t -> getProcedureResult(procId, future, retries + 1), + retryTimer.newTimeout(t -> getProcedureResult(procId, converter, future, retries + 1), ConnectionUtils.getPauseTime(pauseNs, retries), TimeUnit.NANOSECONDS); return; } @@ -3045,7 +3107,11 @@ GetProcedureResultResponse> call(controller, stub, IOException ioe = ForeignExceptionUtil.toIOException(response.getException()); future.completeExceptionally(ioe); } else { - future.complete(null); + try { + future.complete(converter.convert(response.getResult())); + } catch (IOException e) { + future.completeExceptionally(e); + } } }); } @@ -3188,6 +3254,20 @@ Void> adminCall(controller, stub, RequestConverter.buildRollWALWriterRequest(), .serverName(serverName).call(); } + @Override + public CompletableFuture> rollAllWALWriters() { + return this + .> procedureCall( + RequestConverter.buildRollAllWALWritersRequest(ng.getNonceGroup(), ng.newNonce()), + (s, c, req, done) -> s.rollAllWALWriters(c, req, done), resp -> resp.getProcId(), + result -> LastHighestWalFilenum.parseFrom(result.toByteArray()).getFileNumMap() + .entrySet().stream().collect(Collectors + .toUnmodifiableMap(e -> ServerName.valueOf(e.getKey()), Map.Entry::getValue)), + new RollAllWALWritersBiConsumer()); + } + @Override public CompletableFuture clearCompactionQueues(ServerName serverName, Set queues) { return this. newAdminCaller() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 3bbfac500ce5..37fdb1ba6fe7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -139,6 +139,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; @@ -860,6 +861,11 @@ public static RollWALWriterRequest buildRollWALWriterRequest() { return RollWALWriterRequest.getDefaultInstance(); } + public static RollAllWALWritersRequest buildRollAllWALWritersRequest(long nonceGroup, + long nonce) { + return RollAllWALWritersRequest.newBuilder().setNonceGroup(nonceGroup).setNonce(nonce).build(); + } + /** * Create a new GetServerInfoRequest * @return a GetServerInfoRequest diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java index 4f8a7320fb40..37292d5feefc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java @@ -65,7 +65,7 @@ public static void addListener(CompletableFuture future, try { // See this post on stack overflow(shorten since the url is too long), // https://s.apache.org/completionexception - // For a chain of CompleableFuture, only the first child CompletableFuture can get the + // For a chain of CompletableFuture, only the first child CompletableFuture can get the // original exception, others will get a CompletionException, which wraps the original // exception. So here we unwrap it before passing it to the callback action. action.accept(resp, unwrapCompletionException(error)); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index e6a9d8fb2bdf..6e68ce5f1900 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -262,7 +262,7 @@ public interface RemoteProcedure { * Called when RS tells the remote procedure is succeeded through the * {@code reportProcedureDone} method. */ - void remoteOperationCompleted(TEnv env); + void remoteOperationCompleted(TEnv env, byte[] remoteResultData); /** * Called when RS tells the remote procedure is failed through the {@code reportProcedureDone} diff --git a/hbase-protocol-shaded/src/main/protobuf/HBase.proto b/hbase-protocol-shaded/src/main/protobuf/HBase.proto index 0fd3d667d4d0..c66ee7eb9791 100644 --- a/hbase-protocol-shaded/src/main/protobuf/HBase.proto +++ b/hbase-protocol-shaded/src/main/protobuf/HBase.proto @@ -289,3 +289,7 @@ message RotateFileData { required int64 timestamp = 1; required bytes data = 2; } + +message LastHighestWalFilenum { + map file_num = 1; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index a8adaa27453f..768a1d7544ea 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -799,6 +799,15 @@ message ModifyColumnStoreFileTrackerResponse { message FlushMasterStoreRequest {} message FlushMasterStoreResponse {} +message RollAllWALWritersRequest { + optional uint64 nonce_group = 1 [default = 0]; + optional uint64 nonce = 2 [default = 0]; +} + +message RollAllWALWritersResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -1270,6 +1279,9 @@ service MasterService { rpc FlushTable(FlushTableRequest) returns(FlushTableResponse); + + rpc rollAllWALWriters(RollAllWALWritersRequest) + returns(RollAllWALWritersResponse); } // HBCK Service definitions. diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index e3b43afd66aa..554d7ec9c410 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -839,3 +839,21 @@ message ReloadQuotasProcedureStateData { required ServerName target_server = 1; optional ForeignExceptionMessage error = 2; } + +enum LogRollProcedureState { + LOG_ROLL_ROLL_LOG_ON_RS = 1; + LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM = 2; + LOG_ROLL_UNREGISTER_SERVER_LISTENER = 3; +} + +message LogRollRemoteProcedureStateData { + required ServerName target_server = 1; +} + +message RSLogRollParameter { +} + +message LogRollRemoteProcedureResult { + optional ServerName server_name = 1; + optional uint64 last_highest_wal_filenum = 2; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto index e68ba8e72869..3d2d8c6ff5fd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto @@ -160,6 +160,7 @@ message RemoteProcedureResult { optional ForeignExceptionMessage error = 3; // Master active time as fencing token optional int64 initiating_master_active_time = 4; + optional bytes proc_result_data = 5; } message ReportProcedureDoneRequest { repeated RemoteProcedureResult result = 1; diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index 230795f27479..30eb328fd3cd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -420,5 +420,4 @@ service AdminService { rpc GetCachedFilesList(GetCachedFilesListRequest) returns(GetCachedFilesListResponse); - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index fce32333577d..fee132b7a4d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -303,7 +303,13 @@ public enum EventType { * RS reload quotas.
* RS_RELOAD_QUOTAS */ - RS_RELOAD_QUOTAS(90, ExecutorType.RS_RELOAD_QUOTAS_OPERATIONS); + RS_RELOAD_QUOTAS(90, ExecutorType.RS_RELOAD_QUOTAS_OPERATIONS), + + /** + * RS log roll.
+ * RS_LOG_ROLL + */ + RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL); private final int code; private final ExecutorType executor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index 1d689d276aa1..668cd701c0d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -56,7 +56,8 @@ public enum ExecutorType { RS_CLAIM_REPLICATION_QUEUE(35), RS_SNAPSHOT_OPERATIONS(36), RS_FLUSH_OPERATIONS(37), - RS_RELOAD_QUOTAS_OPERATIONS(38); + RS_RELOAD_QUOTAS_OPERATIONS(38), + RS_LOG_ROLL(39); ExecutorType(int value) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 1cda553a81dc..6f235b2156f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -160,6 +160,7 @@ import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure; import org.apache.hadoop.hbase.master.procedure.FlushTableProcedure; import org.apache.hadoop.hbase.master.procedure.InitMetaProcedure; +import org.apache.hadoop.hbase.master.procedure.LogRollProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; @@ -4201,11 +4202,11 @@ public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() { return (RemoteProcedure) procedure; } - public void remoteProcedureCompleted(long procId) { + public void remoteProcedureCompleted(long procId, byte[] remoteResultData) { LOG.debug("Remote procedure done, pid={}", procId); RemoteProcedure procedure = getRemoteProcedure(procId); if (procedure != null) { - procedure.remoteOperationCompleted(procedureExecutor.getEnvironment()); + procedure.remoteOperationCompleted(procedureExecutor.getEnvironment(), remoteResultData); } } @@ -4539,7 +4540,7 @@ public long flushTable(TableName tableName, List columnFamilies, long no @Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preTableFlush(tableName); - LOG.info(getClientIdAuditPrefix() + " flush " + tableName); + LOG.info("{} flush {}", getClientIdAuditPrefix(), tableName); submitProcedure( new FlushTableProcedure(procedureExecutor.getEnvironment(), tableName, columnFamilies)); getMaster().getMasterCoprocessorHost().postTableFlush(tableName); @@ -4551,4 +4552,21 @@ protected String getDescription() { } }); } + + @Override + public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException { + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() { + LOG.info("{} roll all wal writers", getClientIdAuditPrefix()); + submitProcedure(new LogRollProcedure()); + } + + @Override + protected String getDescription() { + return "RollAllWALWriters"; + } + }); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index fc246d38d513..de911b54ee9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -321,6 +321,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; @@ -1372,7 +1374,7 @@ public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, @Override public GetProcedureResultResponse getProcedureResult(RpcController controller, GetProcedureResultRequest request) throws ServiceException { - LOG.debug("Checking to see if procedure is done pid=" + request.getProcId()); + LOG.debug("Checking to see if procedure is done pid={}", request.getProcId()); try { server.checkInitialized(); GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); @@ -2575,7 +2577,9 @@ public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, } request.getResultList().forEach(result -> { if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { - server.remoteProcedureCompleted(result.getProcId()); + byte[] remoteResultData = + result.hasProcResultData() ? result.getProcResultData().toByteArray() : null; + server.remoteProcedureCompleted(result.getProcId(), remoteResultData); } else { server.remoteProcedureFailed(result.getProcId(), RemoteProcedureException.fromProto(result.getError())); @@ -3662,4 +3666,15 @@ public FlushTableResponse flushTable(RpcController controller, FlushTableRequest throw new ServiceException(ioe); } } + + @Override + public RollAllWALWritersResponse rollAllWALWriters(RpcController rpcController, + RollAllWALWritersRequest request) throws ServiceException { + try { + long procId = server.rollAllWALWriters(request.getNonceGroup(), request.getNonce()); + return RollAllWALWritersResponse.newBuilder().setProcId(procId).build(); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index e9c98d624460..0573b1a75628 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -515,4 +515,10 @@ long flushTable(final TableName tableName, final List columnFamilies, * @return procedure Id */ long truncateRegion(RegionInfo regionInfo, long nonceGroup, long nonce) throws IOException; + + /** + * Roll WAL writer for all RegionServers + * @return procedure id + */ + long rollAllWALWriters(long nonceGroup, long nonce) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 55cfc28bb53a..b99f0448e8f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -236,6 +236,14 @@ public boolean unregisterListener(final ServerListener listener) { return this.listeners.remove(listener); } + /** + * Removes all of the ServerListeners of this collection that satisfy the given predicate. + * @param filter a predicate which returns true for ServerListener to be removed + */ + public boolean unregisterListenerIf(final Predicate filter) { + return this.listeners.removeIf(filter); + } + /** * Let the server manager know a new regionserver has come online * @param request the startup request diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index a828b5b668fc..cb3b91ca0e20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -103,7 +103,7 @@ public Optional remoteCallBuild(Maste newRemoteOperation(MasterProcedureEnv env); @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { // should not be called since we use reportRegionStateTransition to report the result throw new UnsupportedOperationException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java index e0712f1d2aa3..4cf685f50a0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java @@ -166,7 +166,7 @@ protected boolean abort(final MasterProcedureEnv env) { } @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { // should not be called for region operation until we modified the open/close region procedure throw new UnsupportedOperationException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java index 7c67f0e3ee90..af482aeff281 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java @@ -149,7 +149,7 @@ public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, IOEx } @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { complete(env, null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollProcedure.java new file mode 100644 index 000000000000..a61b2c4afa55 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollProcedure.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LastHighestWalFilenum; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollProcedureState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollRemoteProcedureResult; + +/** + * The procedure to perform WAL rolling on all of RegionServers. + */ +@InterfaceAudience.Private +public class LogRollProcedure + extends StateMachineProcedure + implements GlobalProcedureInterface { + + private static final Logger LOG = LoggerFactory.getLogger(LogRollProcedure.class); + + public LogRollProcedure() { + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, LogRollProcedureState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.info("{} execute state={}", this, state); + + final ServerManager serverManager = env.getMasterServices().getServerManager(); + + try { + switch (state) { + case LOG_ROLL_ROLL_LOG_ON_RS: + // avoid potential new region server missing + serverManager.registerListener(new NewServerWALRoller(env)); + + final List subProcedures = + serverManager.getOnlineServersList().stream().map(LogRollRemoteProcedure::new).toList(); + addChildProcedure(subProcedures.toArray(new LogRollRemoteProcedure[0])); + setNextState(LogRollProcedureState.LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM); + return Flow.HAS_MORE_STATE; + case LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM: + // get children procedure + List children = + env.getMasterServices().getMasterProcedureExecutor().getProcedures().stream() + .filter(p -> p instanceof LogRollRemoteProcedure) + .filter(p -> p.getParentProcId() == getProcId()).map(p -> (LogRollRemoteProcedure) p) + .toList(); + LastHighestWalFilenum.Builder builder = LastHighestWalFilenum.newBuilder(); + for (Procedure child : children) { + LogRollRemoteProcedureResult result = + LogRollRemoteProcedureResult.parseFrom(child.getResult()); + builder.putFileNum(ProtobufUtil.toServerName(result.getServerName()).toString(), + result.getLastHighestWalFilenum()); + } + setResult(builder.build().toByteArray()); + setNextState(LogRollProcedureState.LOG_ROLL_UNREGISTER_SERVER_LISTENER); + return Flow.HAS_MORE_STATE; + case LOG_ROLL_UNREGISTER_SERVER_LISTENER: + serverManager.unregisterListenerIf(l -> l instanceof NewServerWALRoller); + return Flow.NO_MORE_STATE; + } + } catch (Exception e) { + setFailure("log-roll", e); + } + return Flow.NO_MORE_STATE; + } + + @Override + public String getGlobalId() { + return getClass().getSimpleName(); + } + + private static final class NewServerWALRoller implements ServerListener { + + private final MasterProcedureEnv env; + + public NewServerWALRoller(MasterProcedureEnv env) { + this.env = env; + } + + @Override + public void serverAdded(ServerName server) { + env.getMasterServices().getMasterProcedureExecutor() + .submitProcedure(new LogRollRemoteProcedure(server)); + } + } + + @Override + protected void rollbackState(MasterProcedureEnv env, LogRollProcedureState state) { + // nothing to rollback + } + + @Override + protected LogRollProcedureState getState(int stateId) { + return LogRollProcedureState.forNumber(stateId); + } + + @Override + protected int getStateId(LogRollProcedureState state) { + return state.getNumber(); + } + + @Override + protected LogRollProcedureState getInitialState() { + return LogRollProcedureState.LOG_ROLL_ROLL_LOG_ON_RS; + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + return false; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.serializeStateData(serializer); + + if (getResult() != null && getResult().length > 0) { + serializer.serialize(LastHighestWalFilenum.parseFrom(getResult())); + } else { + serializer.serialize(LastHighestWalFilenum.getDefaultInstance()); + } + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.deserializeStateData(serializer); + + if (getResult() == null) { + LastHighestWalFilenum lastHighestWalFilenum = + serializer.deserialize(LastHighestWalFilenum.class); + if (lastHighestWalFilenum != null) { + if ( + lastHighestWalFilenum.getFileNumMap().isEmpty() + && getCurrentState() == LogRollProcedureState.LOG_ROLL_UNREGISTER_SERVER_LISTENER + ) { + LOG.warn("pid = {}, current state is the last state, but rsHighestWalFilenumMap is " + + "empty, this should not happen. Are all region servers down ?", getProcId()); + } else { + setResult(lastHighestWalFilenum.toByteArray()); + } + } + } + } + + @Override + protected void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollRemoteProcedure.java new file mode 100644 index 000000000000..df8e02ed6010 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollRemoteProcedure.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.Optional; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.ServerOperation; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; +import org.apache.hadoop.hbase.regionserver.LogRollCallable; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollRemoteProcedureStateData; + +/** + * The remote procedure to perform WAL rolling on the specific RegionServer without retrying. + */ +@InterfaceAudience.Private +public class LogRollRemoteProcedure extends ServerRemoteProcedure + implements ServerProcedureInterface { + + public LogRollRemoteProcedure() { + } + + public LogRollRemoteProcedure(ServerName targetServer) { + this.targetServer = targetServer; + } + + @Override + protected void rollback(MasterProcedureEnv env) { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + return false; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + serializer.serialize(LogRollRemoteProcedureStateData.newBuilder() + .setTargetServer(ProtobufUtil.toServerName(targetServer)).build()); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + LogRollRemoteProcedureStateData data = + serializer.deserialize(LogRollRemoteProcedureStateData.class); + this.targetServer = ProtobufUtil.toServerName(data.getTargetServer()); + } + + @Override + public Optional remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { + return Optional.of(new ServerOperation(this, getProcId(), LogRollCallable.class, + LogRollRemoteProcedureStateData.getDefaultInstance().toByteArray(), + env.getMasterServices().getMasterActiveTime())); + } + + @Override + public ServerName getServerName() { + return targetServer; + } + + @Override + public boolean hasMetaTableRegion() { + return false; + } + + @Override + public ServerOperationType getServerOperationType() { + return ServerOperationType.LOG_ROLL; + } + + @Override + protected boolean complete(MasterProcedureEnv env, Throwable error) { + // do not retry. just returns. + if (error != null) { + LOG.warn("Failed to roll wal for {}", targetServer, error); + return false; + } else { + return true; + } + } + + @Override + public synchronized void remoteOperationCompleted(MasterProcedureEnv env, + byte[] remoteResultData) { + setResult(remoteResultData); + super.remoteOperationCompleted(env, remoteResultData); + } + + @Override + protected void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()).append(" targetServer=").append(targetServer); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java index e73b23a3f965..b7ff6db67dbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java @@ -62,6 +62,11 @@ public enum ServerOperationType { * Re-read the hbase:quotas table and update {@link QuotaCache}. */ RELOAD_QUOTAS, + + /** + * send roll log request to region server and handle the response + */ + LOG_ROLL } /** Returns Name of this server instance. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java index 57912f419039..55920bd47b38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java @@ -44,6 +44,7 @@ public boolean requireExclusiveLock(Procedure proc) { case CLAIM_REPLICATION_QUEUE_REMOTE: case VERIFY_SNAPSHOT: case RELOAD_QUOTAS: + case LOG_ROLL: return false; default: break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java index 0c89b6396417..563961d765e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java @@ -123,7 +123,8 @@ public synchronized void remoteCallFailed(MasterProcedureEnv env, ServerName ser } @Override - public synchronized void remoteOperationCompleted(MasterProcedureEnv env) { + public synchronized void remoteOperationCompleted(MasterProcedureEnv env, + byte[] remoteResultData) { state = MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_REPORT_SUCCEED; remoteOperationDone(env, null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java index 05621767e7f8..f4df40b168f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java @@ -108,7 +108,7 @@ public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, IOEx } @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { complete(env, null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java index 68aac1ef6e2d..7ea98d00cc7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java @@ -28,12 +28,11 @@ public abstract class BaseRSProcedureCallable implements RSProcedureCallable { private Exception initError; @Override - public final Void call() throws Exception { + public final byte[] call() throws Exception { if (initError != null) { throw initError; } - doCall(); - return null; + return doCall(); } @Override @@ -46,7 +45,7 @@ public final void init(byte[] parameter, HRegionServer rs) { } } - protected abstract void doCall() throws Exception; + protected abstract byte[] doCall() throws Exception; protected abstract void initParameter(byte[] parameter) throws Exception; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java index 635d2b6f87a5..7ed9ff7664b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java @@ -26,7 +26,7 @@ * A general interface for a sub procedure runs at RS side. */ @InterfaceAudience.Private -public interface RSProcedureCallable extends Callable { +public interface RSProcedureCallable extends Callable { /** * Initialize the callable diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java index 3dd932a1736d..e39317290bbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRegionCallable.java @@ -43,7 +43,7 @@ public class FlushRegionCallable extends BaseRSProcedureCallable { private List columnFamilies; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { HRegion region = rs.getRegion(regionInfo.getEncodedName()); if (region == null) { throw new NotServingRegionException("region=" + regionInfo.getRegionNameAsString()); @@ -64,6 +64,7 @@ protected void doCall() throws Exception { LOG.debug("Closing region operation on {}", region); region.closeRegionOperation(); } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 350baca36f46..cd49ceb753ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1969,6 +1969,9 @@ executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SNAPSHOT_OP executorService.startExecutorService( executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_RELOAD_QUOTAS_OPERATIONS) .setCorePoolSize(rsRefreshQuotasThreads)); + final int logRollThreads = conf.getInt("hbase.regionserver.executor.log.roll.threads", 1); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.RS_LOG_ROLL).setCorePoolSize(logRollThreads)); Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller", uncaughtExceptionHandler); @@ -2203,7 +2206,7 @@ public void stop(final String msg) { */ public void stop(final String msg, final boolean force, final User user) { if (!this.stopped) { - LOG.info("***** STOPPING region server '" + this + "' *****"); + LOG.info("***** STOPPING region server '{}' *****", this); if (this.rsHost != null) { // when forced via abort don't allow CPs to override try { @@ -3551,9 +3554,9 @@ void executeProcedure(long procId, long initiatingMasterActiveTime, .submit(new RSProcedureHandler(this, procId, initiatingMasterActiveTime, callable)); } - public void remoteProcedureComplete(long procId, long initiatingMasterActiveTime, - Throwable error) { - procedureResultReporter.complete(procId, initiatingMasterActiveTime, error); + public void remoteProcedureComplete(long procId, long initiatingMasterActiveTime, Throwable error, + byte[] procResultData) { + procedureResultReporter.complete(procId, initiatingMasterActiveTime, error, procResultData); } void reportProcedureDone(ReportProcedureDoneRequest request) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRollCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRollCallable.java new file mode 100644 index 000000000000..11dc28c2a682 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRollCallable.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.AbstractWALRoller; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollRemoteProcedureResult; + +@InterfaceAudience.Private +public class LogRollCallable extends BaseRSProcedureCallable { + + private static final Logger LOG = LoggerFactory.getLogger(LogRollCallable.class); + + private int maxRollRetry; + + @Override + protected byte[] doCall() throws Exception { + for (int nAttempt = 0; nAttempt < maxRollRetry; nAttempt++) { + try { + Pair filenumPairBefore = getFilenumPair(); + + rs.getWalRoller().requestRollAll(); + rs.getWalRoller().waitUntilWalRollFinished(); + + Pair filenumPairAfter = getFilenumPair(); + LOG.info( + "Before rolling log, highest filenum = {} default WAL filenum = {}, After " + + "rolling log, highest filenum = {} default WAL filenum = {}", + filenumPairBefore.getFirst(), filenumPairBefore.getSecond(), filenumPairAfter.getFirst(), + filenumPairAfter.getSecond()); + return LogRollRemoteProcedureResult.newBuilder() + .setServerName(ProtobufUtil.toServerName(rs.getServerName())) + .setLastHighestWalFilenum(filenumPairBefore.getFirst()).build().toByteArray(); + } catch (Exception e) { + LOG.warn("Failed rolling log on attempt={}", nAttempt, e); + if (nAttempt == maxRollRetry - 1) { + throw e; + } + } + } + return null; + } + + private Pair getFilenumPair() throws IOException { + long highestFilenum = rs.getWALs().stream() + .mapToLong(wal -> ((AbstractFSWAL) wal).getFilenum()).max().orElse(-1L); + long defaultWALFilenum = ((AbstractFSWAL) rs.getWAL(null)).getFilenum(); + return Pair.newPair(highestFilenum, defaultWALFilenum); + } + + @Override + protected void initParameter(byte[] parameter) throws Exception { + this.maxRollRetry = rs.getConfiguration().getInt(AbstractWALRoller.WAL_ROLL_RETRIES, 1); + } + + @Override + public EventType getEventType() { + return EventType.RS_LOG_ROLL; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index bd232addcec5..d325c67a82af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3966,7 +3966,7 @@ private void executeProcedures(RemoteProcedureRequest request) { LOG.warn("Failed to instantiating remote procedure {}, pid={}", request.getProcClass(), request.getProcId(), e); server.remoteProcedureComplete(request.getProcId(), request.getInitiatingMasterActiveTime(), - e); + e, null); return; } callable.init(request.getProcData().toByteArray(), server); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java index e134dfda7ac8..de23db37856a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java @@ -29,9 +29,10 @@ public class ReloadQuotasCallable extends BaseRSProcedureCallable { private static final Logger LOG = LoggerFactory.getLogger(ReloadQuotasCallable.class); @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { LOG.info("Reloading quotas"); rs.getRegionServerRpcQuotaManager().reload(); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java index 21016fe59dd0..7fcf363a919c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java @@ -28,6 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult; @@ -51,7 +52,8 @@ public RemoteProcedureResultReporter(HRegionServer server) { this.server = server; } - public void complete(long procId, long initiatingMasterActiveTime, Throwable error) { + public void complete(long procId, long initiatingMasterActiveTime, Throwable error, + byte[] procReturnValue) { RemoteProcedureResult.Builder builder = RemoteProcedureResult.newBuilder().setProcId(procId) .setInitiatingMasterActiveTime(initiatingMasterActiveTime); if (error != null) { @@ -62,6 +64,9 @@ public void complete(long procId, long initiatingMasterActiveTime, Throwable err LOG.debug("Successfully complete execution of pid={}", procId); builder.setStatus(RemoteProcedureResult.Status.SUCCESS); } + if (procReturnValue != null) { + builder.setProcResultData(ByteString.copyFrom(procReturnValue)); + } results.add(builder.build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java index 0693aee87508..7158671efb1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java @@ -41,7 +41,7 @@ public class SnapshotRegionCallable extends BaseRSProcedureCallable { private ForeignExceptionDispatcher monitor; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { HRegion region = rs.getRegion(regionInfo.getEncodedName()); if (region == null) { throw new NotServingRegionException( @@ -78,6 +78,7 @@ protected void doCall() throws Exception { LOG.debug("Closing snapshot operation on {}", region); region.closeRegionOperation(Region.Operation.SNAPSHOT); } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java index db7908d81be8..76a3c1cf84e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java @@ -32,8 +32,9 @@ public class SnapshotVerifyCallable extends BaseRSProcedureCallable { private RegionInfo region; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { rs.getRsSnapshotVerifier().verifyRegion(snapshot, region); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java index 151c865db794..e6ae50f6e9ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java @@ -79,7 +79,7 @@ public static class ErrorWALSplitException extends HBaseIOException { } @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { // grab a lock splitWALLock = splitWALLocks.acquireLock(walPath); try { @@ -97,6 +97,7 @@ protected void doCall() throws Exception { } finally { splitWALLock.unlock(); } + return null; } public String getWalPath() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java index 6eacc6b78e6a..3e150144f2c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java @@ -51,14 +51,16 @@ public RSProcedureHandler(HRegionServer rs, long procId, long initiatingMasterAc @Override public void process() { Throwable error = null; + byte[] procResultData = null; try { MDC.put("pid", Long.toString(procId)); - callable.call(); + procResultData = callable.call(); } catch (Throwable t) { - LOG.error("pid=" + this.procId, t); + LOG.error("pid={}", this.procId, t); error = t; } finally { - ((HRegionServer) server).remoteProcedureComplete(procId, initiatingMasterActiveTime, error); + ((HRegionServer) server).remoteProcedureComplete(procId, initiatingMasterActiveTime, error, + procResultData); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java index 2b7e14f9f7aa..73fa29766186 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java @@ -39,9 +39,10 @@ public EventType getEventType() { } @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { PeerProcedureHandler handler = rs.getReplicationSourceService().getPeerProcedureHandler(); handler.claimReplicationQueue(queueId); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java index 094a61dcdd1f..5d4454c14484 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java @@ -43,7 +43,7 @@ public class RefreshPeerCallable extends BaseRSProcedureCallable { private int stage; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { LOG.info("Received a peer change event, peerId=" + peerId + ", type=" + type); PeerProcedureHandler handler = rs.getReplicationSourceService().getPeerProcedureHandler(); switch (type) { @@ -68,6 +68,7 @@ protected void doCall() throws Exception { default: throw new IllegalArgumentException("Unknown peer modification type: " + type); } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index 427fe80b0c36..ed368e18981d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -69,7 +69,7 @@ public class ReplaySyncReplicationWALCallable extends BaseRSProcedureCallable { private final KeyLocker peersLock = new KeyLocker<>(); @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { LOG.info("Received a replay sync replication wals {} event, peerId={}", wals, peerId); if (rs.getReplicationSinkService() != null) { Lock peerLock = peersLock.acquireLock(wals.get(0)); @@ -81,6 +81,7 @@ protected void doCall() throws Exception { peerLock.unlock(); } } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java index d09c821b9edc..fd35464e686c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java @@ -34,8 +34,9 @@ public class SwitchRpcThrottleRemoteCallable extends BaseRSProcedureCallable { private boolean rpcThrottleEnabled; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { rs.getRegionServerRpcQuotaManager().switchRpcThrottle(rpcThrottleEnabled); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index c900333af9eb..5e6457211344 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -69,7 +69,7 @@ public abstract class AbstractWALRoller extends Thread impl * Configure for the max count of log rolling retry. The real retry count is also limited by the * timeout of log rolling via {@link #WAL_ROLL_WAIT_TIMEOUT} */ - protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; + public static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; protected final ConcurrentMap wals = new ConcurrentHashMap<>(); protected final T abortable; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index e78ca7d0cdb7..daaa2e5c2b99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -568,4 +568,9 @@ public long flushTable(TableName tableName, List columnFamilies, long no long nonce) throws IOException { return 0; } + + @Override + public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException { + return 0; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestLogRollProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestLogRollProcedure.java new file mode 100644 index 000000000000..1b587097ddad --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestLogRollProcedure.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY; +import static org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.DISPATCH_MAX_QUEUE_SIZE_CONF_KEY; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.SingleProcessHBaseCluster; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +@Category(MediumTests.class) +public class TestLogRollProcedure { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestLogRollProcedure.class); + + @Rule + public TestName name = new TestName(); + + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + private Configuration conf; + + @Before + public void setUp() throws Exception { + conf = TEST_UTIL.getConfiguration(); + conf.set(DISPATCH_DELAY_CONF_KEY, "2000"); + conf.set(DISPATCH_MAX_QUEUE_SIZE_CONF_KEY, "128"); + TEST_UTIL.startMiniCluster(2); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testSimpleLogRoll() throws IOException { + HRegionServer rs = TEST_UTIL.getHBaseCluster().getRegionServer(0); + long fileNumBefore = ((AbstractFSWAL) rs.getWAL(null)).getFilenum(); + + TEST_UTIL.getAdmin().rollAllWALWriters(); + + long fileNumAfter = ((AbstractFSWAL) rs.getWAL(null)).getFilenum(); + assertTrue(fileNumAfter > fileNumBefore); + } + + @Test + public void testMasterRestarts() throws IOException { + SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HRegionServer rs = cluster.getRegionServer(0); + long fileNumBefore = ((AbstractFSWAL) rs.getWAL(null)).getFilenum(); + + LogRollProcedure procedure = new LogRollProcedure(); + long procId = cluster.getMaster().getMasterProcedureExecutor().submitProcedure(procedure); + + TEST_UTIL.waitFor(60000, () -> cluster.getMaster().getMasterProcedureExecutor().getProcedures() + .stream().anyMatch(p -> p instanceof LogRollRemoteProcedure)); + ServerName serverName = cluster.getMaster().getServerName(); + cluster.killMaster(serverName); + cluster.waitForMasterToStop(serverName, 30000); + cluster.startMaster(); + cluster.waitForActiveAndReadyMaster(); + + ProcedureExecutor exec = cluster.getMaster().getMasterProcedureExecutor(); + TEST_UTIL.waitFor(30000, () -> exec.isRunning() && exec.isFinished(procId)); + + long fileNumAfter = ((AbstractFSWAL) rs.getWAL(null)).getFilenum(); + + assertTrue(fileNumAfter > fileNumBefore); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java index 1500a3c00cd3..f828f5ce1ba6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java @@ -188,7 +188,8 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws } @Override - public synchronized void remoteOperationCompleted(MasterProcedureEnv env) { + public synchronized void remoteOperationCompleted(MasterProcedureEnv env, + byte[] remoteResultData) { complete(env, null); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java index 1c4abd15eaf0..c0a37c20e88b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java @@ -57,14 +57,14 @@ public HMasterForTest(Configuration conf) throws IOException { } @Override - public void remoteProcedureCompleted(long procId) { + public void remoteProcedureCompleted(long procId, byte[] data) { if ( FAIL && getMasterProcedureExecutor() .getProcedure(procId) instanceof SyncReplicationReplayWALRemoteProcedure ) { throw new RuntimeException("Inject error"); } - super.remoteProcedureCompleted(procId); + super.remoteProcedureCompleted(procId, data); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 35c868413e19..4d592b49d0d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -526,6 +526,11 @@ public void rollWALWriter(ServerName serverName) throws IOException, FailedLogCl admin.rollWALWriter(serverName); } + @Override + public Map rollAllWALWriters() throws IOException { + return admin.rollAllWALWriters(); + } + public CompactionState getCompactionState(TableName tableName) throws IOException { return admin.getCompactionState(tableName); } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 5ceaf2a08c72..93cc312338c9 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -179,6 +179,12 @@ def wal_roll(server_name) # TODO: remove older hlog_roll version alias hlog_roll wal_roll + #---------------------------------------------------------------------------------------------- + # Requests all region servers to roll wal writer + def wal_roll_all + @admin.rollAllWALWriters + end + #---------------------------------------------------------------------------------------------- # Requests a table or region split def split(table_or_region_name, split_point = nil) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 46b38dd96b89..6be3854b8a57 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -467,6 +467,7 @@ def self.exception_handler(hide_traceback) unassign zk_dump wal_roll + wal_roll_all hbck_chore_run catalogjanitor_run catalogjanitor_switch diff --git a/hbase-shell/src/main/ruby/shell/commands/wal_roll_all.rb b/hbase-shell/src/main/ruby/shell/commands/wal_roll_all.rb new file mode 100644 index 000000000000..13d764495653 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/wal_roll_all.rb @@ -0,0 +1,37 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +module Shell + module Commands + class WalRollAll < Command + def help + <<-EOF +Request all region servers to roll wal writer. Note that this method is synchronous, +which means it will block until all RegionServers have completed the log roll, +or a RegionServer fails due to an exception that retry will not work. Here is how +you would run the command in the hbase shell: + hbase> wal_roll_all +EOF + end + + def command + admin.wal_roll_all + end + end + end +end diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 0eff84bba7c8..a0d73dcca21c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -826,7 +826,11 @@ public String getLocks() { @Override public void rollWALWriter(ServerName serverName) { throw new NotImplementedException("rollWALWriter not supported in ThriftAdmin"); + } + @Override + public Map rollAllWALWriters() { + throw new NotImplementedException("rollAllWALWriters not supported in ThriftAdmin"); } @Override From 89416ce17b7f9a2d16a5ad37fc89d0c4da4b50d9 Mon Sep 17 00:00:00 2001 From: Ruanhui <32773751+frostruan@users.noreply.github.com> Date: Mon, 15 Sep 2025 09:47:30 +0800 Subject: [PATCH 03/92] HBASE-27355 Separate meta read requests from master and client (#7261) Co-authored-by: huiruan Signed-off-by: Duo Zhang Reviewed-by: Aman Poonia --- .../hadoop/hbase/MetaTableAccessor.java | 37 ++++++++---- .../hbase/ipc/MetaRWQueueRpcExecutor.java | 25 +++++++- .../hadoop/hbase/ipc/RWQueueRpcExecutor.java | 4 ++ .../apache/hadoop/hbase/ipc/RpcExecutor.java | 6 +- .../RSAnnotationReadingPriorityFunction.java | 6 +- .../hbase/ipc/TestSimpleRpcScheduler.java | 60 ++++++++++--------- 6 files changed, 93 insertions(+), 45 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 98750d38a7c3..05b049e27dbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hbase.filter.SubstringComparator; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.regionserver.RSAnnotationReadingPriorityFunction; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ExceptionUtil; @@ -185,6 +186,7 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re } Get get = new Get(row); get.addFamily(HConstants.CATALOG_FAMILY); + get.setPriority(RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS); Result r; try (Table t = getMetaHTable(connection)) { r = t.get(get); @@ -213,6 +215,7 @@ public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri) throws IOException { Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(ri)); get.addFamily(HConstants.CATALOG_FAMILY); + get.setPriority(RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS); try (Table t = getMetaHTable(connection)) { return t.get(get); } @@ -226,11 +229,7 @@ public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri) */ public static Result getRegionResult(Connection connection, RegionInfo regionInfo) throws IOException { - Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo)); - get.addFamily(HConstants.CATALOG_FAMILY); - try (Table t = getMetaHTable(connection)) { - return t.get(get); - } + return getCatalogFamilyRow(connection, regionInfo); } /** @@ -341,6 +340,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { scan.setReadType(Scan.ReadType.PREAD); } scan.setCaching(scannerCaching); + scan.setPriority(RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS); return scan; } @@ -368,7 +368,7 @@ public static List> getTableRegionsAndLocations( final boolean excludeOfflinedSplitParents) throws IOException { if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { throw new IOException( - "This method can't be used to locate meta regions;" + " use MetaTableLocator instead"); + "This method can't be used to locate meta regions; use MetaTableLocator instead"); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress ClientMetaTableAccessor.CollectRegionLocationsVisitor visitor = @@ -385,10 +385,10 @@ public static void fullScanMetaAndPrint(Connection connection) throws IOExceptio if (r == null || r.isEmpty()) { return true; } - LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); + LOG.info("fullScanMetaAndPrint.Current Meta Row: {}", r); TableState state = CatalogFamilyFormat.getTableState(r); if (state != null) { - LOG.info("fullScanMetaAndPrint.Table State={}" + state); + LOG.info("fullScanMetaAndPrint.Table State={}", state); } else { RegionLocations locations = CatalogFamilyFormat.getRegionLocations(r); if (locations == null) { @@ -461,6 +461,15 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor); } + /** + * Performs a scan of META table. + * @param connection connection we're using + * @param startRow Where to start the scan. Pass null if want to begin scan at first row. + * @param stopRow Where to stop the scan. Pass null if want to scan all rows from the start one + * @param type scanned part of meta + * @param maxRows maximum rows to return + * @param visitor Visitor invoked against each row. + */ public static void scanMeta(Connection connection, @Nullable final byte[] startRow, @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, final ClientMetaTableAccessor.Visitor visitor) throws IOException { @@ -481,9 +490,11 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR } if (LOG.isTraceEnabled()) { - LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) - + " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit - + " with caching=" + scan.getCaching()); + LOG.trace( + "Scanning META starting at row={} stopping at row={} for max={} with caching={} " + + "priority={}", + Bytes.toStringBinary(startRow), Bytes.toStringBinary(stopRow), rowUpperLimit, + scan.getCaching(), scan.getPriority()); } int currentRow = 0; @@ -912,7 +923,7 @@ private static void updateLocation(Connection connection, RegionInfo regionInfo, addRegionInfo(put, regionInfo); addLocation(put, sn, openSeqNum, regionInfo.getReplicaId()); putToMetaTable(connection, put); - LOG.info("Updated row {} with server=", regionInfo.getRegionNameAsString(), sn); + LOG.info("Updated row {} with server = {}", regionInfo.getRegionNameAsString(), sn); } public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException { @@ -937,7 +948,7 @@ public static Put addLocation(Put p, ServerName sn, long openSeqNum, int replica .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) .setQualifier(CatalogFamilyFormat.getStartCodeColumn(replicaId)) .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put) - .setValue(Bytes.toBytes(sn.getStartcode())).build()) + .setValue(Bytes.toBytes(sn.getStartCode())).build()) .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) .setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) .setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)).build()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java index a86e6554b1cc..97c3a8765256 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.regionserver.RSAnnotationReadingPriorityFunction; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +33,10 @@ public class MetaRWQueueRpcExecutor extends RWQueueRpcExecutor { "hbase.ipc.server.metacallqueue.read.ratio"; public static final String META_CALL_QUEUE_SCAN_SHARE_CONF_KEY = "hbase.ipc.server.metacallqueue.scan.ratio"; - public static final float DEFAULT_META_CALL_QUEUE_READ_SHARE = 0.9f; + public static final String META_CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = + "hbase.ipc.server.metacallqueue.handler.factor"; + public static final float DEFAULT_META_CALL_QUEUE_READ_SHARE = 0.8f; + private static final float DEFAULT_META_CALL_QUEUE_SCAN_SHARE = 0.2f; public MetaRWQueueRpcExecutor(final String name, final int handlerCount, final int maxQueueLength, final PriorityFunction priority, final Configuration conf, final Abortable abortable) { @@ -46,6 +50,23 @@ protected float getReadShare(final Configuration conf) { @Override protected float getScanShare(final Configuration conf) { - return conf.getFloat(META_CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0); + return conf.getFloat(META_CALL_QUEUE_SCAN_SHARE_CONF_KEY, DEFAULT_META_CALL_QUEUE_SCAN_SHARE); + } + + @Override + public boolean dispatch(CallRunner callTask) { + RpcCall call = callTask.getRpcCall(); + int level = call.getHeader().getPriority(); + final boolean toWriteQueue = isWriteRequest(call.getHeader(), call.getParam()); + // dispatch client system read request to read handlers + // dispatch internal system read request to scan handlers + final boolean toScanQueue = + getNumScanQueues() > 0 && level == RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS; + return dispatchTo(toWriteQueue, toScanQueue, callTask); + } + + @Override + protected float getCallQueueHandlerFactor(Configuration conf) { + return conf.getFloat(META_CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.5f); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java index 298a9fc3aeb2..70a7b74b8e2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java @@ -297,4 +297,8 @@ private void propagateBalancerConfigChange(QueueBalancer balancer, Configuration ((ConfigurationObserver) balancer).onConfigurationChange(conf); } } + + protected int getNumScanQueues() { + return numScanQueues; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java index 7e5bdfcc7d6f..15c9afe030c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java @@ -130,7 +130,7 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ this.conf = conf; this.abortable = abortable; - float callQueuesHandlersFactor = this.conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.1f); + float callQueuesHandlersFactor = getCallQueueHandlerFactor(conf); if ( Float.compare(callQueuesHandlersFactor, 1.0f) > 0 || Float.compare(0.0f, callQueuesHandlersFactor) > 0 @@ -468,4 +468,8 @@ public void onConfigurationChange(Configuration conf) { } } } + + protected float getCallQueueHandlerFactor(Configuration conf) { + return conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.1f); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java index 1197f7b5359c..94c76cf55a4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java @@ -46,7 +46,8 @@ * Priority function specifically for the region server. */ @InterfaceAudience.Private -class RSAnnotationReadingPriorityFunction extends AnnotationReadingPriorityFunction { +public class RSAnnotationReadingPriorityFunction + extends AnnotationReadingPriorityFunction { private static final Logger LOG = LoggerFactory.getLogger(RSAnnotationReadingPriorityFunction.class); @@ -54,6 +55,9 @@ class RSAnnotationReadingPriorityFunction extends AnnotationReadingPriorityFunct /** Used to control the scan delay, currently sqrt(numNextCall * weight) */ public static final String SCAN_VTIME_WEIGHT_CONF_KEY = "hbase.ipc.server.scan.vtime.weight"; + // QOS for internal meta read requests + public static final int INTERNAL_READ_QOS = 250; + @SuppressWarnings("unchecked") private final Class[] knownArgumentClasses = new Class[] { GetRegionInfoRequest.class, GetStoreFileRequest.class, CloseRegionRequest.class, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index 19aa46a0d626..eed7d98d7358 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl; +import org.apache.hadoop.hbase.regionserver.RSAnnotationReadingPriorityFunction; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RPCTests; import org.apache.hadoop.hbase.util.Bytes; @@ -108,7 +109,7 @@ public void testBasic() throws IOException, InterruptedException { RpcScheduler scheduler = new SimpleRpcScheduler(conf, 10, 0, 0, qosFunction, 0); scheduler.init(CONTEXT); scheduler.start(); - CallRunner task = createMockTask(); + CallRunner task = createMockTask(HConstants.NORMAL_QOS); task.setStatus(new MonitoredRPCHandlerImpl("test")); scheduler.dispatch(task); verify(task, timeout(10000)).run(); @@ -163,7 +164,7 @@ public void testCallQueueInfo() throws IOException, InterruptedException { int totalCallMethods = 10; for (int i = totalCallMethods; i > 0; i--) { - CallRunner task = createMockTask(); + CallRunner task = createMockTask(HConstants.NORMAL_QOS); task.setStatus(new MonitoredRPCHandlerImpl("test")); scheduler.dispatch(task); } @@ -185,9 +186,9 @@ public void testCallQueueInfo() throws IOException, InterruptedException { @Test public void testHandlerIsolation() throws IOException, InterruptedException { - CallRunner generalTask = createMockTask(); - CallRunner priorityTask = createMockTask(); - CallRunner replicationTask = createMockTask(); + CallRunner generalTask = createMockTask(HConstants.NORMAL_QOS); + CallRunner priorityTask = createMockTask(HConstants.HIGH_QOS + 1); + CallRunner replicationTask = createMockTask(HConstants.REPLICATION_QOS); List tasks = ImmutableList.of(generalTask, priorityTask, replicationTask); Map qos = ImmutableMap.of(generalTask, 0, priorityTask, HConstants.HIGH_QOS + 1, replicationTask, HConstants.REPLICATION_QOS); @@ -227,10 +228,12 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { assertEquals(3, ImmutableSet.copyOf(handlerThreads.values()).size()); } - private CallRunner createMockTask() { + private CallRunner createMockTask(int priority) { ServerCall call = mock(ServerCall.class); CallRunner task = mock(CallRunner.class); + RequestHeader header = RequestHeader.newBuilder().setPriority(priority).build(); when(task.getRpcCall()).thenReturn(call); + when(call.getHeader()).thenReturn(header); return task; } @@ -707,7 +710,7 @@ public void testFastPathBalancedQueueRpcExecutorWithQueueLength0() throws Except @Test public void testMetaRWScanQueues() throws Exception { Configuration schedConf = HBaseConfiguration.create(); - schedConf.setFloat(RpcExecutor.CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 1.0f); + schedConf.setFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 1.0f); schedConf.setFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, 0.7f); schedConf.setFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0.5f); @@ -728,36 +731,37 @@ public void testMetaRWScanQueues() throws Exception { when(putCall.getHeader()).thenReturn(putHead); when(putCall.getParam()).thenReturn(putCall.param); - CallRunner getCallTask = mock(CallRunner.class); - ServerCall getCall = mock(ServerCall.class); - RequestHeader getHead = RequestHeader.newBuilder().setMethodName("get").build(); - when(getCallTask.getRpcCall()).thenReturn(getCall); - when(getCall.getHeader()).thenReturn(getHead); - - CallRunner scanCallTask = mock(CallRunner.class); - ServerCall scanCall = mock(ServerCall.class); - scanCall.param = ScanRequest.newBuilder().build(); - RequestHeader scanHead = RequestHeader.newBuilder().setMethodName("scan").build(); - when(scanCallTask.getRpcCall()).thenReturn(scanCall); - when(scanCall.getHeader()).thenReturn(scanHead); - when(scanCall.getParam()).thenReturn(scanCall.param); + CallRunner clientReadCallTask = mock(CallRunner.class); + ServerCall clientReadCall = mock(ServerCall.class); + RequestHeader clientReadHead = RequestHeader.newBuilder().setMethodName("get").build(); + when(clientReadCallTask.getRpcCall()).thenReturn(clientReadCall); + when(clientReadCall.getHeader()).thenReturn(clientReadHead); + + CallRunner internalReadCallTask = mock(CallRunner.class); + ServerCall internalReadCall = mock(ServerCall.class); + internalReadCall.param = ScanRequest.newBuilder().build(); + RequestHeader masterReadHead = RequestHeader.newBuilder().setMethodName("scan") + .setPriority(RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS).build(); + when(internalReadCallTask.getRpcCall()).thenReturn(internalReadCall); + when(internalReadCall.getHeader()).thenReturn(masterReadHead); + when(internalReadCall.getParam()).thenReturn(internalReadCall.param); ArrayList work = new ArrayList<>(); doAnswerTaskExecution(putCallTask, work, 1, 1000); - doAnswerTaskExecution(getCallTask, work, 2, 1000); - doAnswerTaskExecution(scanCallTask, work, 3, 1000); + doAnswerTaskExecution(clientReadCallTask, work, 2, 1000); + doAnswerTaskExecution(internalReadCallTask, work, 3, 1000); // There are 3 queues: [puts], [gets], [scans] // so the calls will be interleaved scheduler.dispatch(putCallTask); scheduler.dispatch(putCallTask); scheduler.dispatch(putCallTask); - scheduler.dispatch(getCallTask); - scheduler.dispatch(getCallTask); - scheduler.dispatch(getCallTask); - scheduler.dispatch(scanCallTask); - scheduler.dispatch(scanCallTask); - scheduler.dispatch(scanCallTask); + scheduler.dispatch(clientReadCallTask); + scheduler.dispatch(clientReadCallTask); + scheduler.dispatch(clientReadCallTask); + scheduler.dispatch(internalReadCallTask); + scheduler.dispatch(internalReadCallTask); + scheduler.dispatch(internalReadCallTask); while (work.size() < 6) { Thread.sleep(100); From 0d1ff8aa9bc21b73f2cf624d35fdcea1417de613 Mon Sep 17 00:00:00 2001 From: Ruan Hui Date: Wed, 6 Jul 2022 10:59:13 +0800 Subject: [PATCH 04/92] HBASE-27157 Potential race condition in WorkerAssigner (#4577) Close #7299 Co-authored-by: Duo Zhang Signed-off-by: Duo Zhang Signed-off-by: Lijin Bin --- .../hadoop/hbase/master/SplitWALManager.java | 18 +-- .../hadoop/hbase/master/WorkerAssigner.java | 33 ++--- .../procedure/SnapshotVerifyProcedure.java | 3 +- .../master/procedure/SplitWALProcedure.java | 2 +- .../master/snapshot/SnapshotManager.java | 16 +-- .../hbase/master/TestSplitWALManager.java | 136 +++++++++--------- 6 files changed, 100 insertions(+), 108 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index 18dfc7d493bf..32b2f4d21f29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -26,7 +26,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Optional; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -35,7 +34,6 @@ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.master.procedure.SplitWALProcedure; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; @@ -153,25 +151,19 @@ List createSplitWALProcedures(List splittingWALs, */ public ServerName acquireSplitWALWorker(Procedure procedure) throws ProcedureSuspendedException { - Optional worker = splitWorkerAssigner.acquire(); - if (worker.isPresent()) { - LOG.debug("Acquired split WAL worker={}", worker.get()); - return worker.get(); - } - splitWorkerAssigner.suspend(procedure); - throw new ProcedureSuspendedException(); + ServerName worker = splitWorkerAssigner.acquire(procedure); + LOG.debug("Acquired split WAL worker={}", worker); + return worker; } /** * After the worker finished the split WAL task, it will release the worker, and wake up all the * suspend procedures in the ProcedureEvent - * @param worker worker which is about to release - * @param scheduler scheduler which is to wake up the procedure event + * @param worker worker which is about to release */ - public void releaseSplitWALWorker(ServerName worker, MasterProcedureScheduler scheduler) { + public void releaseSplitWALWorker(ServerName worker) { LOG.debug("Release split WAL worker={}", worker); splitWorkerAssigner.release(worker); - splitWorkerAssigner.wake(scheduler); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java index b6df41acee23..7b1ec80cab4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java @@ -23,9 +23,9 @@ import java.util.Map; import java.util.Optional; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.yetus.audience.InterfaceAudience; /** @@ -51,36 +51,37 @@ public WorkerAssigner(MasterServices master, int maxTasks, ProcedureEvent eve } } - public synchronized Optional acquire() { + public synchronized ServerName acquire(Procedure proc) throws ProcedureSuspendedException { List serverList = master.getServerManager().getOnlineServersList(); Collections.shuffle(serverList); Optional worker = serverList.stream() .filter( serverName -> !currentWorkers.containsKey(serverName) || currentWorkers.get(serverName) > 0) .findAny(); - worker.ifPresent(name -> currentWorkers.compute(name, (serverName, - availableWorker) -> availableWorker == null ? maxTasks - 1 : availableWorker - 1)); - return worker; + if (worker.isPresent()) { + ServerName sn = worker.get(); + currentWorkers.compute(sn, (serverName, + availableWorker) -> availableWorker == null ? maxTasks - 1 : availableWorker - 1); + return sn; + } else { + event.suspend(); + event.suspendIfNotReady(proc); + throw new ProcedureSuspendedException(); + } } public synchronized void release(ServerName serverName) { currentWorkers.compute(serverName, (k, v) -> v == null ? null : v + 1); - } - - public void suspend(Procedure proc) { - event.suspend(); - event.suspendIfNotReady(proc); - } - - public void wake(MasterProcedureScheduler scheduler) { if (!event.isReady()) { - event.wake(scheduler); + event.wake(master.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler()); } } @Override - public void serverAdded(ServerName worker) { - this.wake(master.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler()); + public synchronized void serverAdded(ServerName worker) { + if (!event.isReady()) { + event.wake(master.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler()); + } } public synchronized void addUsedWorker(ServerName worker) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java index a3e126484c34..34a12ed52b1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java @@ -109,8 +109,7 @@ protected synchronized boolean complete(MasterProcedureEnv env, Throwable error) setFailure("verify-snapshot", e); } finally { // release the worker - env.getMasterServices().getSnapshotManager().releaseSnapshotVerifyWorker(this, targetServer, - env.getProcedureScheduler()); + env.getMasterServices().getSnapshotManager().releaseSnapshotVerifyWorker(this, targetServer); } return isProcedureCompleted; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java index 699834f9c1d7..98c2c0ec6930 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java @@ -90,7 +90,7 @@ protected Flow executeFromState(MasterProcedureEnv env, MasterProcedureProtos.Sp skipPersistence(); throw new ProcedureSuspendedException(); } - splitWALManager.releaseSplitWALWorker(worker, env.getProcedureScheduler()); + splitWALManager.releaseSplitWALWorker(worker); if (!finished) { LOG.warn("Failed to split wal {} by server {}, retry...", walPath, worker); setNextState(MasterProcedureProtos.SplitWALState.ACQUIRE_SPLIT_WAL_WORKER); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index ac9e654fcb34..c86af2bda5e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -26,7 +26,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; @@ -66,7 +65,6 @@ import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner; import org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.RestoreSnapshotProcedure; import org.apache.hadoop.hbase.master.procedure.SnapshotProcedure; @@ -1474,20 +1472,14 @@ public boolean snapshotProcedureEnabled() { public ServerName acquireSnapshotVerifyWorker(SnapshotVerifyProcedure procedure) throws ProcedureSuspendedException { - Optional worker = verifyWorkerAssigner.acquire(); - if (worker.isPresent()) { - LOG.debug("{} Acquired verify snapshot worker={}", procedure, worker.get()); - return worker.get(); - } - verifyWorkerAssigner.suspend(procedure); - throw new ProcedureSuspendedException(); + ServerName worker = verifyWorkerAssigner.acquire(procedure); + LOG.debug("{} Acquired verify snapshot worker={}", procedure, worker); + return worker; } - public void releaseSnapshotVerifyWorker(SnapshotVerifyProcedure procedure, ServerName worker, - MasterProcedureScheduler scheduler) { + public void releaseSnapshotVerifyWorker(SnapshotVerifyProcedure procedure, ServerName worker) { LOG.debug("{} Release verify snapshot worker={}", procedure, worker); verifyWorkerAssigner.release(worker); - verifyWorkerAssigner.wake(scheduler); } private void restoreWorkers() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index ea92f7922794..7e6922b0fc48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hbase.master; -import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; -import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; import static org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType.SPLIT_WAL; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -33,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -46,10 +50,10 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -63,7 +67,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @Category({ MasterTests.class, LargeTests.class }) - public class TestSplitWALManager { @ClassRule @@ -78,10 +81,11 @@ public class TestSplitWALManager { private byte[] FAMILY; @Before - public void setup() throws Exception { + public void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtil(); - TEST_UTIL.getConfiguration().setBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, false); - TEST_UTIL.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1); + TEST_UTIL.getConfiguration().setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, false); + TEST_UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 5); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER, 1); TEST_UTIL.startMiniCluster(3); master = TEST_UTIL.getHBaseCluster().getMaster(); splitWALManager = master.getSplitWALManager(); @@ -90,7 +94,7 @@ public void setup() throws Exception { } @After - public void teardown() throws Exception { + public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -98,57 +102,61 @@ public void teardown() throws Exception { public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>(); for (int i = 0; i < 4; i++) { - testProcedures - .add(new FakeServerProcedure(TEST_UTIL.getHBaseCluster().getServerHoldingMeta())); + testProcedures.add(new FakeServerProcedure( + ServerName.valueOf("server" + i, 12345, EnvironmentEdgeManager.currentTime()))); } - ServerName server = splitWALManager.acquireSplitWALWorker(testProcedures.get(0)); - Assert.assertNotNull(server); - Assert.assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(1))); - Assert.assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(2))); - - Exception e = null; - try { - splitWALManager.acquireSplitWALWorker(testProcedures.get(3)); - } catch (ProcedureSuspendedException suspendException) { - e = suspendException; + ProcedureExecutor procExec = master.getMasterProcedureExecutor(); + procExec.submitProcedure(testProcedures.get(0)); + TEST_UTIL.waitFor(10000, () -> testProcedures.get(0).isWorkerAcquired()); + procExec.submitProcedure(testProcedures.get(1)); + procExec.submitProcedure(testProcedures.get(2)); + TEST_UTIL.waitFor(10000, + () -> testProcedures.get(1).isWorkerAcquired() && testProcedures.get(2).isWorkerAcquired()); + + // should get a ProcedureSuspendedException, so it will try to acquire but can not get a worker + procExec.submitProcedure(testProcedures.get(3)); + TEST_UTIL.waitFor(10000, () -> testProcedures.get(3).isTriedToAcquire()); + for (int i = 0; i < 3; i++) { + Thread.sleep(1000); + assertFalse(testProcedures.get(3).isWorkerAcquired()); } - Assert.assertNotNull(e); - Assert.assertTrue(e instanceof ProcedureSuspendedException); - splitWALManager.releaseSplitWALWorker(server, TEST_UTIL.getHBaseCluster().getMaster() - .getMasterProcedureExecutor().getEnvironment().getProcedureScheduler()); - Assert.assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(3))); + // release a worker, the last procedure should be able to get a worker + testProcedures.get(0).countDown(); + TEST_UTIL.waitFor(10000, () -> testProcedures.get(3).isWorkerAcquired()); + + for (int i = 1; i < 4; i++) { + testProcedures.get(i).countDown(); + } + for (int i = 0; i < 4; i++) { + final int index = i; + TEST_UTIL.waitFor(10000, () -> testProcedures.get(index).isFinished()); + } } @Test public void testAddNewServer() throws Exception { List testProcedures = new ArrayList<>(); for (int i = 0; i < 4; i++) { - testProcedures - .add(new FakeServerProcedure(TEST_UTIL.getHBaseCluster().getServerHoldingMeta())); + testProcedures.add( + new FakeServerProcedure(TEST_UTIL.getHBaseCluster().getRegionServer(1).getServerName())); } ServerName server = splitWALManager.acquireSplitWALWorker(testProcedures.get(0)); - Assert.assertNotNull(server); - Assert.assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(1))); - Assert.assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(2))); - - Exception e = null; - try { - splitWALManager.acquireSplitWALWorker(testProcedures.get(3)); - } catch (ProcedureSuspendedException suspendException) { - e = suspendException; - } - Assert.assertNotNull(e); - Assert.assertTrue(e instanceof ProcedureSuspendedException); + assertNotNull(server); + assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(1))); + assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(2))); + + assertThrows(ProcedureSuspendedException.class, + () -> splitWALManager.acquireSplitWALWorker(testProcedures.get(3))); JVMClusterUtil.RegionServerThread newServer = TEST_UTIL.getHBaseCluster().startRegionServer(); newServer.waitForServerOnline(); - Assert.assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(3))); + assertNotNull(splitWALManager.acquireSplitWALWorker(testProcedures.get(3))); } @Test public void testCreateSplitWALProcedures() throws Exception { - TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE); + TEST_UTIL.createTable(TABLE_NAME, FAMILY, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); // load table TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY); ProcedureExecutor masterPE = master.getMasterProcedureExecutor(); @@ -158,21 +166,21 @@ public void testCreateSplitWALProcedures() throws Exception { // Test splitting meta wal FileStatus[] wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.META_FILTER); - Assert.assertEquals(1, wals.length); + assertEquals(1, wals.length); List testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer); - Assert.assertEquals(1, testProcedures.size()); + assertEquals(1, testProcedures.size()); ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0)); - Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath())); + assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath())); // Test splitting wal wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.NON_META_FILTER); - Assert.assertEquals(1, wals.length); + assertEquals(1, wals.length); testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer); - Assert.assertEquals(1, testProcedures.size()); + assertEquals(1, testProcedures.size()); ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0)); - Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath())); + assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath())); } @Test @@ -192,11 +200,11 @@ public void testAcquireAndReleaseSplitWALWorker() throws Exception { ProcedureTestingUtility.submitProcedure(masterPE, failedProcedure, HConstants.NO_NONCE, HConstants.NO_NONCE); TEST_UTIL.waitFor(20000, () -> failedProcedure.isTriedToAcquire()); - Assert.assertFalse(failedProcedure.isWorkerAcquired()); + assertFalse(failedProcedure.isWorkerAcquired()); // let one procedure finish and release worker testProcedures.get(0).countDown(); TEST_UTIL.waitFor(10000, () -> failedProcedure.isWorkerAcquired()); - Assert.assertTrue(testProcedures.get(0).isSuccess()); + assertTrue(testProcedures.get(0).isSuccess()); } @Test @@ -206,14 +214,14 @@ public void testGetWALsToSplit() throws Exception { TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY); ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta(); List metaWals = splitWALManager.getWALsToSplit(metaServer, true); - Assert.assertEquals(1, metaWals.size()); + assertEquals(1, metaWals.size()); List wals = splitWALManager.getWALsToSplit(metaServer, false); - Assert.assertEquals(1, wals.size()); + assertEquals(1, wals.size()); ServerName testServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream() .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() .get(); metaWals = splitWALManager.getWALsToSplit(testServer, true); - Assert.assertEquals(0, metaWals.size()); + assertEquals(0, metaWals.size()); } private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception { @@ -233,9 +241,9 @@ private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception { .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() .get(); List procedures = splitWALManager.splitWALs(testServer, false); - Assert.assertEquals(1, procedures.size()); + assertEquals(1, procedures.size()); ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0)); - Assert.assertEquals(0, splitWALManager.getWALsToSplit(testServer, false).size()); + assertEquals(0, splitWALManager.getWALsToSplit(testServer, false).size()); // Validate the old WAL file archive dir Path walRootDir = hmaster.getMasterFileSystem().getWALRootDir(); @@ -244,12 +252,12 @@ private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception { int archiveFileCount = walFS.listStatus(walArchivePath).length; procedures = splitWALManager.splitWALs(metaServer, true); - Assert.assertEquals(1, procedures.size()); + assertEquals(1, procedures.size()); ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0)); - Assert.assertEquals(0, splitWALManager.getWALsToSplit(metaServer, true).size()); - Assert.assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size()); + assertEquals(0, splitWALManager.getWALsToSplit(metaServer, true).size()); + assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size()); // There should be archiveFileCount + 1 WALs after SplitWALProcedure finish - Assert.assertEquals("Splitted WAL files should be archived", archiveFileCount + 1, + assertEquals("Splitted WAL files should be archived", archiveFileCount + 1, walFS.listStatus(walArchivePath).length); } @@ -261,8 +269,8 @@ public void testSplitLogs() throws Exception { @Test public void testSplitLogsWithDifferentWalAndRootFS() throws Exception { HBaseTestingUtil testUtil2 = new HBaseTestingUtil(); - testUtil2.getConfiguration().setBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, false); - testUtil2.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1); + testUtil2.getConfiguration().setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, false); + testUtil2.getConfiguration().setInt(HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER, 1); Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); testUtil2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); CommonFSUtils.setWALRootDir(testUtil2.getConfiguration(), dir); @@ -295,7 +303,7 @@ public void testWorkerReloadWhenMasterRestart() throws Exception { ProcedureTestingUtility.submitProcedure(master.getMasterProcedureExecutor(), failedProcedure, HConstants.NO_NONCE, HConstants.NO_NONCE); TEST_UTIL.waitFor(20000, () -> failedProcedure.isTriedToAcquire()); - Assert.assertFalse(failedProcedure.isWorkerAcquired()); + assertFalse(failedProcedure.isWorkerAcquired()); for (int i = 0; i < 3; i++) { testProcedures.get(i).countDown(); } @@ -307,9 +315,9 @@ public static final class FakeServerProcedure implements ServerProcedureInterface { private ServerName serverName; - private ServerName worker; + private volatile ServerName worker; private CountDownLatch barrier = new CountDownLatch(1); - private boolean triedToAcquire = false; + private volatile boolean triedToAcquire = false; public FakeServerProcedure() { } @@ -348,7 +356,7 @@ protected Flow executeFromState(MasterProcedureEnv env, setNextState(MasterProcedureProtos.SplitWALState.RELEASE_SPLIT_WORKER); return Flow.HAS_MORE_STATE; case RELEASE_SPLIT_WORKER: - splitWALManager.releaseSplitWALWorker(worker, env.getProcedureScheduler()); + splitWALManager.releaseSplitWALWorker(worker); return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException("unhandled state=" + state); From d592404cc34228eb3a98bf5a6442c2c49604afde Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Mon, 15 Sep 2025 09:55:15 +0100 Subject: [PATCH 05/92] HBASE-29451 Add Docs section describing BucketCache Time based priority (#7289) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Dávid Paksy Reviewed-by: Kevin Geiszler Reviewed-by: Tak Lon (Stephen) Wu --- src/main/asciidoc/_chapters/architecture.adoc | 221 ++++++++++++++++++ 1 file changed, 221 insertions(+) diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index 6c4ec4f30f75..a1fb55818691 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -1256,6 +1256,227 @@ In 1.0, it should be more straight-forward. Onheap LruBlockCache size is set as a fraction of java heap using `hfile.block.cache.size setting` (not the best name) and BucketCache is set as above in absolute Megabytes. ==== +==== Time Based Priority for BucketCache + +link:https://issues.apache.org/jira/browse/HBASE-28463[HBASE-28463] introduced time based priority +for blocks in BucketCache. It allows for defining +an age threshold at individual column families' configuration, whereby blocks older than this +configured threshold would be targeted first for eviction. + +Blocks from column families that don't define the age threshold wouldn't be evaluated by +the time based priority, and would only be evicted following the LRU eviction logic. + +This feature is mostly useful for use cases where most recent data is more frequently accessed, +and therefore should get higher priority in the cache. Configuring Time Based Priority with the +"age" of most accessed data would then give a finer control over blocks allocation in +the BucketCache than the built-in LRU eviction logic. + +Time Based Priority for BucketCache provides three different strategies for defining data age: + +* Cell timestamps: Uses the timestamp portion of HBase cells for comparing the data age. +* Custom cell qualifiers: Uses a custom-defined date qualifier for comparing the data age. +It uses that value to tier the entire row containing the given qualifier value. +This requires that the custom qualifier be a valid Java long timestamp. +* Custom value provider: Allows for defining a pluggable implementation that +contains the logic for identifying the date value to be used for comparison. +This also provides additional flexibility for different use cases that might have the date stored +in other formats or embedded with other data in various portions of a given row. + +For use cases where priority is determined by the order of record ingestion in HBase +(with the most recent being the most relevant), the built-in cell timestamp offers the most +convenient and efficient method for configuring age-based priority. +See <>. + +Some applications may utilize a custom date column to define the priority of table records. +In such instances, a custom cell qualifier-based priority is advisable. +See <>. + + +Finally, more intricate schemas may incorporate domain-specific logic for defining the age of +each record. The custom value provider facilitates the integration of custom code to implement +the appropriate parsing of the date value that should be used for the priority comparison. +See <>. + +With Time Based Priority for BucketCache, blocks age is evaluated when deciding if a block should +be cached (i.e. during reads, writes, compaction and prefetch), as well as during the cache +freeSpace run (mass eviction), prior to executing the LRU logic. + +Because blocks don't hold any specific meta information other than type, +it's necessary to group blocks of the same "age group" on separate files, using specialized compaction +implementations (see more details in the configuration section below). The time range of all blocks +in each file is then appended at the file meta info section, and is used for evaluating the age of +blocks that should be considered in the Time Based Priority logic. + +[[enable.timebasedpriorityforbucketcache]] +===== Configuring Time Based Priority for BucketCache + +Finding the age of each block involves an extra overhead, therefore the feature is disabled by +default at a global configuration level. + +To enable it, the following configuration should be set on RegionServers' _hbase-site.xml_: + +[source,xml] +---- + + hbase.regionserver.datatiering.enable + true + +---- + +Once enabled globally, it's necessary to define the desired strategy-specific settings at +the individual column family level. + +[[cellts.timebasedpriorityforbucketcache]] +====== Using Cell timestamps for Time Based Priority + +This strategy is the most efficient to run, as it uses the timestamp +portion of each cell containing the data for comparing the age of blocks. It requires +DateTieredCompaction for splitting the blocks into separate files according to blocks' ages. + +The example below sets the hot age threshold to one week (in milliseconds) +for the column family 'cf1' in table 'orders': + +[source] +---- +hbase(main):003:0> alter 'orders', {NAME => 'cf1', + CONFIGURATION => {'hbase.hstore.datatiering.type' => 'TIME_RANGE', + 'hbase.hstore.datatiering.hot.age.millis' => '604800000', + 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', + 'hbase.hstore.blockingStoreFiles' => '60', + 'hbase.hstore.compaction.min' => '2', + 'hbase.hstore.compaction.max' => '60' + } +} +---- + +.Date Tiered Compaction specific tunings +[NOTE] +==== +In the example above, the properties governing the number of windows and period of each window in +the date tiered compaction were not set. With the default settings, the compaction will create +initially four windows of six hours, then four windows of one day each, then another four +windows of four days each and so on until the minimum timestamp among the selected files is covered. +This can create a large number of files, therefore, additional changes to the +'hbase.hstore.blockingStoreFiles', 'hbase.hstore.compaction.min' and 'hbase.hstore.compaction.max' +are recommended. + +Alternatively, consider adjusting the initial window size to the same as the hot age threshold, and +two windows only per tier: + +[source] +---- +hbase(main):003:0> alter 'orders', {NAME => 'cf1', + CONFIGURATION => {'hbase.hstore.datatiering.type' => 'TIME_RANGE', + 'hbase.hstore.datatiering.hot.age.millis' => '604800000', + 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', + 'hbase.hstore.compaction.date.tiered.base.window.millis' => '604800000', + 'hbase.hstore.compaction.date.tiered.windows.per.tier' => '2' + } +} +---- +==== + +[[customcellqualifier.timebasedpriorityforbucketcache]] +====== Using Custom Cell Qualifiers for Time Based Priority + +This strategy uses a new compaction implementation designed for Time Based Priority. It extends +date tiered compaction, but instead of producing multiple tiers of various time windows, it +simply splits files into two groups: the "cold" group, where all blocks are older than the defined +threshold age, and the "hot" group, where all blocks are newer than the threshold age. + +The example below defines a cell qualifier 'event_date' to be used for comparing the age of blocks +within the custom cell qualifier strategy: + +[source] +---- +hbase(main):003:0> alter 'orders', {NAME => 'cf1', + CONFIGURATION => {'hbase.hstore.datatiering.type' => 'CUSTOM', + 'TIERING_CELL_QUALIFIER' => 'event_date', + 'hbase.hstore.datatiering.hot.age.millis' => '604800000', + 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.CustomTieredStoreEngine', + 'hbase.hstore.compaction.date.tiered.custom.age.limit.millis' => '604800000' + } +} +---- + +.Time Based Priority x Compaction Age Threshold Configurations +[NOTE] +==== +Note that there are two different configurations for defining the hot age threshold. +This is because the Time Based Priority enforcer operates independently of the compaction +implementation. +==== + +[[customvalueprovider.timebasedpriorityforbucketcache]] +====== Using a Custom value provider for Time Based Priority + +It's also possible to hook in domain-specific logic for defining the data age of each row to be +used for comparing blocks priorities. The Custom Time Based Priority framework defines the +`CustomTieredCompactor.TieringValueProvider` interface, which can be implemented to provide the +specific date value to be used by compaction for grouping the blocks according to the threshold age. + +In the following example, the `RowKeyPortionTieringValueProvider` implements the +`getTieringValue` method. This method parses the date from a segment of the row key value, +specifically between positions 14 and 29, using the "yyyyMMddHHmmss" format. +The parsed date is then returned as a long timestamp, which is then used by custom tiered compaction +to group the blocks based on the defined hot age threshold: + +[source,java] +---- +public class RowKeyPortionTieringValueProvider implements CustomTieredCompactor.TieringValueProvider { + private SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss"); + @Override + public void init(Configuration configuration) throws Exception {} + @Override + public long getTieringValue(Cell cell) { + byte[] rowArray = new byte[cell.getRowLength()]; + System.arraycopy(cell.getRowArray(), cell.getRowOffset(), rowArray, 0, cell.getRowLength()); + String datePortion = Bytes.toString(rowArray).substring(14, 29).trim(); + try { + return sdf.parse(datePortion).getTime(); + } catch (ParseException e) { + //handle error + } + return Long.MAX_VALUE; + } +} +---- + +The Tiering Value Provider above can then be configured for Time Based Priority as follows: + +[source] +---- +hbase(main):003:0> alter 'orders', {NAME => 'cf1', + CONFIGURATION => {'hbase.hstore.datatiering.type' => 'CUSTOM', + 'hbase.hstore.custom-tiering-value.provider.class' => + 'org.apache.hbase.client.example.RowKeyPortionTieringValueProvider', + 'hbase.hstore.datatiering.hot.age.millis' => '604800000', + 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.CustomTieredStoreEngine', + 'hbase.hstore.compaction.date.tiered.custom.age.limit.millis' => '604800000' + } +} +---- + +[NOTE] +==== +Upon enabling Custom Time Based Priority (either the custom qualifier or custom value provider) +in the column family configuration, it is imperative that major compaction be executed twice on +the specified tables to ensure the effective application of the newly configured priorities +within the bucket cache. +==== + + +[NOTE] +==== +Time Based Priority was originally implemented with the cell timestamp strategy only. The original +design covering cell timestamp based strategy is available +link:https://docs.google.com/document/d/1Qd3kvZodBDxHTFCIRtoePgMbvyuUSxeydi2SEWQFQro/edit?tab=t.0#heading=h.gjdgxs[here]. + +The second phase including the two custom strategies mentioned above is detailed in +link:https://docs.google.com/document/d/1uBGIO9IQ-FbSrE5dnUMRtQS23NbCbAmRVDkAOADcU_E/edit?tab=t.0[this separate design doc]. +==== + + ==== Compressed BlockCache link:https://issues.apache.org/jira/browse/HBASE-11331[HBASE-11331] introduced lazy BlockCache decompression, more simply referred to as compressed BlockCache. From 1e06bccf79ede58e86ca7f1f4b9231bb7a53fd72 Mon Sep 17 00:00:00 2001 From: Junegunn Choi Date: Mon, 15 Sep 2025 18:39:15 +0900 Subject: [PATCH 06/92] HBASE-29577 Fix NPE from RegionServerRpcQuotaManager when reloading configuration (#7285) Signed-off-by: Wellington Chevreuil Signed-off-by: Charles Connell --- .../hadoop/hbase/quotas/RegionServerRpcQuotaManager.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index 958793dcdf00..7a42d0f1aa31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -91,7 +91,9 @@ public void stop() { } public void reload() { - quotaCache.forceSynchronousCacheRefresh(); + if (isQuotaEnabled()) { + quotaCache.forceSynchronousCacheRefresh(); + } } @Override From 3ce997c79db056ebcf9f712944db1c2c6d71027a Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Tue, 16 Sep 2025 16:51:12 +0200 Subject: [PATCH 07/92] HBASE-29590 Use hadoop 3.4.2 as default hadooop3 dependency (#7301) Signed-off-by: Nihal Jain Signed-off-by: Duo Zhang --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 170dae40db9c..6e8fab5e8b29 100644 --- a/pom.xml +++ b/pom.xml @@ -855,7 +855,7 @@ 3.5.0 ${compileSource} - 3.4.1 + 3.4.2 ${hadoop-three.version} From c6a0c3b2b7af1253822066ee44fc88c6aff97db8 Mon Sep 17 00:00:00 2001 From: Hernan Romer Date: Tue, 16 Sep 2025 16:24:05 -0400 Subject: [PATCH 08/92] Modern backup failures can cause backup system to lock up (#7288) Co-authored-by: Hernan Gelaf-Romer Signed-off-by: Charles Connell Signed-off-by: Ray Mattingly --- .../hbase/backup/impl/BackupSystemTable.java | 4 +- .../master/TestRestoreBackupSystemTable.java | 84 +++++++++ .../org/apache/hadoop/hbase/client/Admin.java | 3 + .../hbase/client/AdminOverAsyncAdmin.java | 5 + .../hadoop/hbase/client/AsyncAdmin.java | 3 + .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 + .../hbase/client/RawAsyncHBaseAdmin.java | 25 +++ .../main/protobuf/server/master/Master.proto | 10 ++ .../server/master/MasterProcedure.proto | 7 + .../hbase/master/MasterRpcServices.java | 19 ++ .../RestoreBackupSystemTableProcedure.java | 169 ++++++++++++++++++ .../procedure/TableProcedureInterface.java | 3 +- .../hbase/master/procedure/TableQueue.java | 1 + .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 + .../hbase/thrift2/client/ThriftAdmin.java | 5 + 15 files changed, 344 insertions(+), 4 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 61a74450e8d6..f2ddcf5e7573 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -1403,9 +1403,7 @@ public static void restoreFromSnapshot(Connection conn) throws IOException { try (Admin admin = conn.getAdmin()) { String snapshotName = BackupSystemTable.getSnapshotName(conf); if (snapshotExists(admin, snapshotName)) { - admin.disableTable(BackupSystemTable.getTableName(conf)); - admin.restoreSnapshot(snapshotName); - admin.enableTable(BackupSystemTable.getTableName(conf)); + admin.restoreBackupSystemTable(snapshotName); LOG.debug("Done restoring backup system table"); } else { // Snapshot does not exists, i.e completeBackup failed after diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java new file mode 100644 index 000000000000..31ded67b4774 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.master; + +import static org.junit.Assert.assertEquals; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRestoreBackupSystemTable { + private static final String BACKUP_ROOT = "root"; + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + @BeforeClass + public static void setUp() throws Exception { + UTIL.startMiniCluster(); + } + + @Test + public void itRestoresFromSnapshot() throws Exception { + BackupSystemTable table = new BackupSystemTable(UTIL.getConnection()); + Set tables = new HashSet<>(); + + tables.add(TableName.valueOf("test1")); + tables.add(TableName.valueOf("test2")); + tables.add(TableName.valueOf("test3")); + + Map rsTimestampMap = new HashMap<>(); + rsTimestampMap.put("rs1:100", 100L); + rsTimestampMap.put("rs2:100", 101L); + rsTimestampMap.put("rs3:100", 103L); + + table.writeRegionServerLogTimestamp(tables, rsTimestampMap, BACKUP_ROOT); + BackupSystemTable.snapshot(UTIL.getConnection()); + + Admin admin = UTIL.getAdmin(); + TableName backupSystemTn = BackupSystemTable.getTableName(UTIL.getConfiguration()); + admin.disableTable(backupSystemTn); + admin.truncateTable(backupSystemTn, true); + + BackupSystemTable.restoreFromSnapshot(UTIL.getConnection()); + Map> results = table.readLogTimestampMap(BACKUP_ROOT); + + assertEquals(results.size(), tables.size()); + + for (TableName tableName : tables) { + Map resultMap = results.get(tableName); + assertEquals(resultMap, rsTimestampMap); + } + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 43a004a471cc..1c08ec3b26fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2661,4 +2661,7 @@ List getLogEntries(Set serverNames, String logType, Server * Get the list of cached files */ List getCachedFilesList(ServerName serverName) throws IOException; + + @InterfaceAudience.Private + void restoreBackupSystemTable(String snapshotName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index c866f434e63a..e6bf6c3d28e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1141,4 +1141,9 @@ public void flushMasterStore() throws IOException { public List getCachedFilesList(ServerName serverName) throws IOException { return get(admin.getCachedFilesList(serverName)); } + + @Override + public void restoreBackupSystemTable(String snapshotName) throws IOException { + get(admin.restoreBackupSystemTable(snapshotName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index d808aecc815c..ec0556f20ac1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1871,4 +1871,7 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Get the list of cached files */ CompletableFuture> getCachedFilesList(ServerName serverName); + + @InterfaceAudience.Private + CompletableFuture restoreBackupSystemTable(String snapshotName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 33ac47c73d69..b1fb2be13547 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -1010,4 +1010,9 @@ public CompletableFuture flushMasterStore() { public CompletableFuture> getCachedFilesList(ServerName serverName) { return wrap(rawAdmin.getCachedFilesList(serverName)); } + + @Override + public CompletableFuture restoreBackupSystemTable(String snapshotName) { + return wrap(rawAdmin.restoreBackupSystemTable(snapshotName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 2373e936726e..710c8c430386 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2795,6 +2795,19 @@ void onError(Throwable error) { } } + private static class RestoreBackupSystemTableProcedureBiConsumer extends ProcedureBiConsumer { + + @Override + void onFinished() { + LOG.info("RestoreBackupSystemTableProcedure completed"); + } + + @Override + void onError(Throwable error) { + LOG.info("RestoreBackupSystemTableProcedure failed with {}", error.getMessage()); + } + } + private static class CreateTableProcedureBiConsumer extends TableProcedureBiConsumer { CreateTableProcedureBiConsumer(TableName tableName) { @@ -4637,4 +4650,16 @@ List> adminCall(controller, stub, request.build(), resp -> resp.getCachedFilesList())) .serverName(serverName).call(); } + + @Override + public CompletableFuture restoreBackupSystemTable(String snapshotName) { + MasterProtos.RestoreBackupSystemTableRequest request = + MasterProtos.RestoreBackupSystemTableRequest.newBuilder().setSnapshotName(snapshotName) + .build(); + return this. procedureCall(request, + MasterService.Interface::restoreBackupSystemTable, + MasterProtos.RestoreBackupSystemTableResponse::getProcId, + new RestoreBackupSystemTableProcedureBiConsumer()); + } } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 768a1d7544ea..6dd6ee723b02 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -1280,6 +1280,9 @@ service MasterService { rpc FlushTable(FlushTableRequest) returns(FlushTableResponse); + rpc RestoreBackupSystemTable(RestoreBackupSystemTableRequest) + returns(RestoreBackupSystemTableResponse); + rpc rollAllWALWriters(RollAllWALWritersRequest) returns(RollAllWALWritersResponse); } @@ -1369,6 +1372,13 @@ message FixMetaRequest {} message FixMetaResponse {} +message RestoreBackupSystemTableRequest { + required string snapshot_name = 1; +} +message RestoreBackupSystemTableResponse { + optional uint64 proc_id = 1; +} + service HbckService { /** Update state of the table in meta only*/ rpc SetTableStateInMeta(SetTableStateInMetaRequest) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 554d7ec9c410..7e6c6c8e2fc7 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -840,6 +840,13 @@ message ReloadQuotasProcedureStateData { optional ForeignExceptionMessage error = 2; } +enum RestoreBackupSystemTableState { + RESTORE_BACKUP_SYSTEM_TABLE_PREPARE = 1; + RESTORE_BACKUP_SYSTEM_TABLE_DISABLE = 2; + RESTORE_BACKUP_SYSTEM_TABLE_RESTORE = 3; + RESTORE_BACKUP_SYSTEM_TABLE_ENABLE = 4; +} + enum LogRollProcedureState { LOG_ROLL_ROLL_LOG_ON_RS = 1; LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index de911b54ee9a..e9e0f970ef8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; +import org.apache.hadoop.hbase.master.procedure.RestoreBackupSystemTableProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure; import org.apache.hadoop.hbase.mob.MobUtils; @@ -3667,6 +3668,24 @@ public FlushTableResponse flushTable(RpcController controller, FlushTableRequest } } + @Override + public MasterProtos.RestoreBackupSystemTableResponse restoreBackupSystemTable( + RpcController rpcController, + MasterProtos.RestoreBackupSystemTableRequest restoreBackupSystemTableRequest) + throws ServiceException { + try { + String snapshotName = restoreBackupSystemTableRequest.getSnapshotName(); + SnapshotDescription snapshot = server.snapshotManager.getCompletedSnapshots().stream() + .filter(s -> s.getName().equals(snapshotName)).findFirst() + .orElseThrow(() -> new ServiceException("Snapshot %s not found".formatted(snapshotName))); + long pid = server.getMasterProcedureExecutor() + .submitProcedure(new RestoreBackupSystemTableProcedure(snapshot)); + return MasterProtos.RestoreBackupSystemTableResponse.newBuilder().setProcId(pid).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public RollAllWALWritersResponse rollAllWALWriters(RpcController rpcController, RollAllWALWritersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java new file mode 100644 index 000000000000..af980db6e39e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreBackupSystemTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; + +@InterfaceAudience.Private +public class RestoreBackupSystemTableProcedure + extends AbstractStateMachineTableProcedure { + private static final Logger LOG = + LoggerFactory.getLogger(RestoreBackupSystemTableProcedure.class); + + private final SnapshotDescription snapshot; + private boolean enableOnRollback = false; + + // Necessary for the procedure framework. Do not remove. + public RestoreBackupSystemTableProcedure() { + this(null); + } + + public RestoreBackupSystemTableProcedure(SnapshotDescription snapshot) { + this.snapshot = snapshot; + } + + @Override + public TableName getTableName() { + return TableName.valueOf(snapshot.getTable()); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.RESTORE_BACKUP_SYSTEM_TABLE; + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, RestoreBackupSystemTableState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.info("{} execute state={}", this, state); + + try { + switch (state) { + case RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: + prepare(env); + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_DISABLE); + case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE: + TableState tableState = + env.getMasterServices().getTableStateManager().getTableState(getTableName()); + if (tableState.isEnabled()) { + addChildProcedure(createDisableTableProcedure(env)); + } + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_RESTORE); + case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE: + addChildProcedure(createRestoreSnapshotProcedure(env)); + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_ENABLE); + case RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: + addChildProcedure(createEnableTableProcedure(env)); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (Exception e) { + setFailure("restore-backup-system-table", e); + LOG.warn("unexpected exception while execute {}. Mark procedure Failed.", this, e); + return Flow.NO_MORE_STATE; + } + } + + @Override + protected void rollbackState(MasterProcedureEnv env, RestoreBackupSystemTableState state) + throws IOException, InterruptedException { + switch (state) { + case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE, RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: + return; + case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE, RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: + if (enableOnRollback) { + addChildProcedure(createEnableTableProcedure(env)); + } + return; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } + + @Override + protected RestoreBackupSystemTableState getState(int stateId) { + return RestoreBackupSystemTableState.forNumber(stateId); + } + + @Override + protected int getStateId(RestoreBackupSystemTableState state) { + return state.getNumber(); + } + + @Override + protected RestoreBackupSystemTableState getInitialState() { + return RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_PREPARE; + } + + private Flow moreState(RestoreBackupSystemTableState next) { + setNextState(next); + return Flow.HAS_MORE_STATE; + } + + private Procedure[] createDisableTableProcedure(MasterProcedureEnv env) + throws HBaseIOException { + DisableTableProcedure disableTableProcedure = + new DisableTableProcedure(env, getTableName(), true); + return new DisableTableProcedure[] { disableTableProcedure }; + } + + private Procedure[] createEnableTableProcedure(MasterProcedureEnv env) { + EnableTableProcedure enableTableProcedure = new EnableTableProcedure(env, getTableName()); + return new EnableTableProcedure[] { enableTableProcedure }; + } + + private Procedure[] createRestoreSnapshotProcedure(MasterProcedureEnv env) + throws IOException { + TableDescriptor desc = env.getMasterServices().getTableDescriptors().get(getTableName()); + RestoreSnapshotProcedure restoreSnapshotProcedure = + new RestoreSnapshotProcedure(env, desc, snapshot); + return new RestoreSnapshotProcedure[] { restoreSnapshotProcedure }; + } + + private void prepare(MasterProcedureEnv env) throws IOException { + List snapshots = + env.getMasterServices().getSnapshotManager().getCompletedSnapshots(); + boolean exists = snapshots.stream().anyMatch(s -> s.getName().equals(snapshot.getName())); + if (!exists) { + throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(snapshot)); + } + + TableState tableState = + env.getMasterServices().getTableStateManager().getTableState(getTableName()); + if (tableState.isEnabled()) { + enableOnRollback = true; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index 00b9776366d5..c5c7ec602eab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -50,7 +50,8 @@ public enum TableOperationType { REGION_UNASSIGN, REGION_GC, MERGED_REGIONS_GC/* region operations */, - REGION_TRUNCATE + REGION_TRUNCATE, + RESTORE_BACKUP_SYSTEM_TABLE } /** Returns the name of the table the procedure is operating on */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index be66a28d275e..7be4c4b1810e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -54,6 +54,7 @@ static boolean requireTableExclusiveLock(TableProcedureInterface proc) { case DISABLE: case SNAPSHOT: case ENABLE: + case RESTORE_BACKUP_SYSTEM_TABLE: return true; case EDIT: // we allow concurrent edit on the ns family in meta table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 4d592b49d0d3..a59b2966b89d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -984,6 +984,11 @@ public List getCachedFilesList(ServerName serverName) throws IOException return admin.getCachedFilesList(serverName); } + @Override + public void restoreBackupSystemTable(String snapshotName) throws IOException { + admin.restoreBackupSystemTable(snapshotName); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index a0d73dcca21c..3d5a7e502e0a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1359,6 +1359,11 @@ public List getCachedFilesList(ServerName serverName) throws IOException throw new NotImplementedException("getCachedFilesList not supported in ThriftAdmin"); } + @Override + public void restoreBackupSystemTable(String snapshotName) throws IOException { + throw new NotImplementedException("restoreBackupSystemTable not supported in ThriftAdmin"); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { From 7f7b9e6ef298b16768382081c3ab0163d7f15af0 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Tue, 16 Sep 2025 16:27:09 -0400 Subject: [PATCH 09/92] Revert "Modern backup failures can cause backup system to lock up (#7288)" (#7307) This reverts commit c6a0c3b2b7af1253822066ee44fc88c6aff97db8. --- .../hbase/backup/impl/BackupSystemTable.java | 4 +- .../master/TestRestoreBackupSystemTable.java | 84 --------- .../org/apache/hadoop/hbase/client/Admin.java | 3 - .../hbase/client/AdminOverAsyncAdmin.java | 5 - .../hadoop/hbase/client/AsyncAdmin.java | 3 - .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 - .../hbase/client/RawAsyncHBaseAdmin.java | 25 --- .../main/protobuf/server/master/Master.proto | 10 -- .../server/master/MasterProcedure.proto | 7 - .../hbase/master/MasterRpcServices.java | 19 -- .../RestoreBackupSystemTableProcedure.java | 169 ------------------ .../procedure/TableProcedureInterface.java | 3 +- .../hbase/master/procedure/TableQueue.java | 1 - .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 - .../hbase/thrift2/client/ThriftAdmin.java | 5 - 15 files changed, 4 insertions(+), 344 deletions(-) delete mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index f2ddcf5e7573..61a74450e8d6 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -1403,7 +1403,9 @@ public static void restoreFromSnapshot(Connection conn) throws IOException { try (Admin admin = conn.getAdmin()) { String snapshotName = BackupSystemTable.getSnapshotName(conf); if (snapshotExists(admin, snapshotName)) { - admin.restoreBackupSystemTable(snapshotName); + admin.disableTable(BackupSystemTable.getTableName(conf)); + admin.restoreSnapshot(snapshotName); + admin.enableTable(BackupSystemTable.getTableName(conf)); LOG.debug("Done restoring backup system table"); } else { // Snapshot does not exists, i.e completeBackup failed after diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java deleted file mode 100644 index 31ded67b4774..000000000000 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup.master; - -import static org.junit.Assert.assertEquals; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ MasterTests.class, MediumTests.class }) -public class TestRestoreBackupSystemTable { - private static final String BACKUP_ROOT = "root"; - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - - @BeforeClass - public static void setUp() throws Exception { - UTIL.startMiniCluster(); - } - - @Test - public void itRestoresFromSnapshot() throws Exception { - BackupSystemTable table = new BackupSystemTable(UTIL.getConnection()); - Set tables = new HashSet<>(); - - tables.add(TableName.valueOf("test1")); - tables.add(TableName.valueOf("test2")); - tables.add(TableName.valueOf("test3")); - - Map rsTimestampMap = new HashMap<>(); - rsTimestampMap.put("rs1:100", 100L); - rsTimestampMap.put("rs2:100", 101L); - rsTimestampMap.put("rs3:100", 103L); - - table.writeRegionServerLogTimestamp(tables, rsTimestampMap, BACKUP_ROOT); - BackupSystemTable.snapshot(UTIL.getConnection()); - - Admin admin = UTIL.getAdmin(); - TableName backupSystemTn = BackupSystemTable.getTableName(UTIL.getConfiguration()); - admin.disableTable(backupSystemTn); - admin.truncateTable(backupSystemTn, true); - - BackupSystemTable.restoreFromSnapshot(UTIL.getConnection()); - Map> results = table.readLogTimestampMap(BACKUP_ROOT); - - assertEquals(results.size(), tables.size()); - - for (TableName tableName : tables) { - Map resultMap = results.get(tableName); - assertEquals(resultMap, rsTimestampMap); - } - } - - @AfterClass - public static void tearDown() throws Exception { - UTIL.shutdownMiniCluster(); - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 1c08ec3b26fd..43a004a471cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2661,7 +2661,4 @@ List getLogEntries(Set serverNames, String logType, Server * Get the list of cached files */ List getCachedFilesList(ServerName serverName) throws IOException; - - @InterfaceAudience.Private - void restoreBackupSystemTable(String snapshotName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index e6bf6c3d28e0..c866f434e63a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1141,9 +1141,4 @@ public void flushMasterStore() throws IOException { public List getCachedFilesList(ServerName serverName) throws IOException { return get(admin.getCachedFilesList(serverName)); } - - @Override - public void restoreBackupSystemTable(String snapshotName) throws IOException { - get(admin.restoreBackupSystemTable(snapshotName)); - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index ec0556f20ac1..d808aecc815c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1871,7 +1871,4 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Get the list of cached files */ CompletableFuture> getCachedFilesList(ServerName serverName); - - @InterfaceAudience.Private - CompletableFuture restoreBackupSystemTable(String snapshotName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index b1fb2be13547..33ac47c73d69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -1010,9 +1010,4 @@ public CompletableFuture flushMasterStore() { public CompletableFuture> getCachedFilesList(ServerName serverName) { return wrap(rawAdmin.getCachedFilesList(serverName)); } - - @Override - public CompletableFuture restoreBackupSystemTable(String snapshotName) { - return wrap(rawAdmin.restoreBackupSystemTable(snapshotName)); - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 710c8c430386..2373e936726e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2795,19 +2795,6 @@ void onError(Throwable error) { } } - private static class RestoreBackupSystemTableProcedureBiConsumer extends ProcedureBiConsumer { - - @Override - void onFinished() { - LOG.info("RestoreBackupSystemTableProcedure completed"); - } - - @Override - void onError(Throwable error) { - LOG.info("RestoreBackupSystemTableProcedure failed with {}", error.getMessage()); - } - } - private static class CreateTableProcedureBiConsumer extends TableProcedureBiConsumer { CreateTableProcedureBiConsumer(TableName tableName) { @@ -4650,16 +4637,4 @@ List> adminCall(controller, stub, request.build(), resp -> resp.getCachedFilesList())) .serverName(serverName).call(); } - - @Override - public CompletableFuture restoreBackupSystemTable(String snapshotName) { - MasterProtos.RestoreBackupSystemTableRequest request = - MasterProtos.RestoreBackupSystemTableRequest.newBuilder().setSnapshotName(snapshotName) - .build(); - return this. procedureCall(request, - MasterService.Interface::restoreBackupSystemTable, - MasterProtos.RestoreBackupSystemTableResponse::getProcId, - new RestoreBackupSystemTableProcedureBiConsumer()); - } } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 6dd6ee723b02..768a1d7544ea 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -1280,9 +1280,6 @@ service MasterService { rpc FlushTable(FlushTableRequest) returns(FlushTableResponse); - rpc RestoreBackupSystemTable(RestoreBackupSystemTableRequest) - returns(RestoreBackupSystemTableResponse); - rpc rollAllWALWriters(RollAllWALWritersRequest) returns(RollAllWALWritersResponse); } @@ -1372,13 +1369,6 @@ message FixMetaRequest {} message FixMetaResponse {} -message RestoreBackupSystemTableRequest { - required string snapshot_name = 1; -} -message RestoreBackupSystemTableResponse { - optional uint64 proc_id = 1; -} - service HbckService { /** Update state of the table in meta only*/ rpc SetTableStateInMeta(SetTableStateInMetaRequest) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 7e6c6c8e2fc7..554d7ec9c410 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -840,13 +840,6 @@ message ReloadQuotasProcedureStateData { optional ForeignExceptionMessage error = 2; } -enum RestoreBackupSystemTableState { - RESTORE_BACKUP_SYSTEM_TABLE_PREPARE = 1; - RESTORE_BACKUP_SYSTEM_TABLE_DISABLE = 2; - RESTORE_BACKUP_SYSTEM_TABLE_RESTORE = 3; - RESTORE_BACKUP_SYSTEM_TABLE_ENABLE = 4; -} - enum LogRollProcedureState { LOG_ROLL_ROLL_LOG_ON_RS = 1; LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index e9e0f970ef8d..de911b54ee9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -76,7 +76,6 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; -import org.apache.hadoop.hbase.master.procedure.RestoreBackupSystemTableProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure; import org.apache.hadoop.hbase.mob.MobUtils; @@ -3668,24 +3667,6 @@ public FlushTableResponse flushTable(RpcController controller, FlushTableRequest } } - @Override - public MasterProtos.RestoreBackupSystemTableResponse restoreBackupSystemTable( - RpcController rpcController, - MasterProtos.RestoreBackupSystemTableRequest restoreBackupSystemTableRequest) - throws ServiceException { - try { - String snapshotName = restoreBackupSystemTableRequest.getSnapshotName(); - SnapshotDescription snapshot = server.snapshotManager.getCompletedSnapshots().stream() - .filter(s -> s.getName().equals(snapshotName)).findFirst() - .orElseThrow(() -> new ServiceException("Snapshot %s not found".formatted(snapshotName))); - long pid = server.getMasterProcedureExecutor() - .submitProcedure(new RestoreBackupSystemTableProcedure(snapshot)); - return MasterProtos.RestoreBackupSystemTableResponse.newBuilder().setProcId(pid).build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - @Override public RollAllWALWritersResponse rollAllWALWriters(RpcController rpcController, RollAllWALWritersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java deleted file mode 100644 index af980db6e39e..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master.procedure; - -import java.io.IOException; -import java.util.List; -import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.procedure2.Procedure; -import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; -import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; -import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreBackupSystemTableState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; - -@InterfaceAudience.Private -public class RestoreBackupSystemTableProcedure - extends AbstractStateMachineTableProcedure { - private static final Logger LOG = - LoggerFactory.getLogger(RestoreBackupSystemTableProcedure.class); - - private final SnapshotDescription snapshot; - private boolean enableOnRollback = false; - - // Necessary for the procedure framework. Do not remove. - public RestoreBackupSystemTableProcedure() { - this(null); - } - - public RestoreBackupSystemTableProcedure(SnapshotDescription snapshot) { - this.snapshot = snapshot; - } - - @Override - public TableName getTableName() { - return TableName.valueOf(snapshot.getTable()); - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.RESTORE_BACKUP_SYSTEM_TABLE; - } - - @Override - protected Flow executeFromState(MasterProcedureEnv env, RestoreBackupSystemTableState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { - LOG.info("{} execute state={}", this, state); - - try { - switch (state) { - case RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: - prepare(env); - return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_DISABLE); - case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE: - TableState tableState = - env.getMasterServices().getTableStateManager().getTableState(getTableName()); - if (tableState.isEnabled()) { - addChildProcedure(createDisableTableProcedure(env)); - } - return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_RESTORE); - case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE: - addChildProcedure(createRestoreSnapshotProcedure(env)); - return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_ENABLE); - case RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: - addChildProcedure(createEnableTableProcedure(env)); - return Flow.NO_MORE_STATE; - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } catch (Exception e) { - setFailure("restore-backup-system-table", e); - LOG.warn("unexpected exception while execute {}. Mark procedure Failed.", this, e); - return Flow.NO_MORE_STATE; - } - } - - @Override - protected void rollbackState(MasterProcedureEnv env, RestoreBackupSystemTableState state) - throws IOException, InterruptedException { - switch (state) { - case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE, RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: - return; - case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE, RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: - if (enableOnRollback) { - addChildProcedure(createEnableTableProcedure(env)); - } - return; - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } - - @Override - protected RestoreBackupSystemTableState getState(int stateId) { - return RestoreBackupSystemTableState.forNumber(stateId); - } - - @Override - protected int getStateId(RestoreBackupSystemTableState state) { - return state.getNumber(); - } - - @Override - protected RestoreBackupSystemTableState getInitialState() { - return RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_PREPARE; - } - - private Flow moreState(RestoreBackupSystemTableState next) { - setNextState(next); - return Flow.HAS_MORE_STATE; - } - - private Procedure[] createDisableTableProcedure(MasterProcedureEnv env) - throws HBaseIOException { - DisableTableProcedure disableTableProcedure = - new DisableTableProcedure(env, getTableName(), true); - return new DisableTableProcedure[] { disableTableProcedure }; - } - - private Procedure[] createEnableTableProcedure(MasterProcedureEnv env) { - EnableTableProcedure enableTableProcedure = new EnableTableProcedure(env, getTableName()); - return new EnableTableProcedure[] { enableTableProcedure }; - } - - private Procedure[] createRestoreSnapshotProcedure(MasterProcedureEnv env) - throws IOException { - TableDescriptor desc = env.getMasterServices().getTableDescriptors().get(getTableName()); - RestoreSnapshotProcedure restoreSnapshotProcedure = - new RestoreSnapshotProcedure(env, desc, snapshot); - return new RestoreSnapshotProcedure[] { restoreSnapshotProcedure }; - } - - private void prepare(MasterProcedureEnv env) throws IOException { - List snapshots = - env.getMasterServices().getSnapshotManager().getCompletedSnapshots(); - boolean exists = snapshots.stream().anyMatch(s -> s.getName().equals(snapshot.getName())); - if (!exists) { - throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(snapshot)); - } - - TableState tableState = - env.getMasterServices().getTableStateManager().getTableState(getTableName()); - if (tableState.isEnabled()) { - enableOnRollback = true; - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index c5c7ec602eab..00b9776366d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -50,8 +50,7 @@ public enum TableOperationType { REGION_UNASSIGN, REGION_GC, MERGED_REGIONS_GC/* region operations */, - REGION_TRUNCATE, - RESTORE_BACKUP_SYSTEM_TABLE + REGION_TRUNCATE } /** Returns the name of the table the procedure is operating on */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index 7be4c4b1810e..be66a28d275e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -54,7 +54,6 @@ static boolean requireTableExclusiveLock(TableProcedureInterface proc) { case DISABLE: case SNAPSHOT: case ENABLE: - case RESTORE_BACKUP_SYSTEM_TABLE: return true; case EDIT: // we allow concurrent edit on the ns family in meta table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index a59b2966b89d..4d592b49d0d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -984,11 +984,6 @@ public List getCachedFilesList(ServerName serverName) throws IOException return admin.getCachedFilesList(serverName); } - @Override - public void restoreBackupSystemTable(String snapshotName) throws IOException { - admin.restoreBackupSystemTable(snapshotName); - } - @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 3d5a7e502e0a..a0d73dcca21c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1359,11 +1359,6 @@ public List getCachedFilesList(ServerName serverName) throws IOException throw new NotImplementedException("getCachedFilesList not supported in ThriftAdmin"); } - @Override - public void restoreBackupSystemTable(String snapshotName) throws IOException { - throw new NotImplementedException("restoreBackupSystemTable not supported in ThriftAdmin"); - } - @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { From 0f11becf47612de1af5c39fcc26ccb626a0dc310 Mon Sep 17 00:00:00 2001 From: Ray Mattingly Date: Tue, 16 Sep 2025 16:30:41 -0400 Subject: [PATCH 10/92] HBASE-29448 Modern backup failures can cause backup system to lock up (#7308) Co-authored-by: Hernan Romer Co-authored-by: Hernan Gelaf-Romer Signed-off-by: Charles Connell Signed-off-by: Ray Mattingly --- .../hbase/backup/impl/BackupSystemTable.java | 4 +- .../master/TestRestoreBackupSystemTable.java | 84 +++++++++ .../org/apache/hadoop/hbase/client/Admin.java | 3 + .../hbase/client/AdminOverAsyncAdmin.java | 5 + .../hadoop/hbase/client/AsyncAdmin.java | 3 + .../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 + .../hbase/client/RawAsyncHBaseAdmin.java | 25 +++ .../main/protobuf/server/master/Master.proto | 10 ++ .../server/master/MasterProcedure.proto | 7 + .../hbase/master/MasterRpcServices.java | 19 ++ .../RestoreBackupSystemTableProcedure.java | 169 ++++++++++++++++++ .../procedure/TableProcedureInterface.java | 3 +- .../hbase/master/procedure/TableQueue.java | 1 + .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 + .../hbase/thrift2/client/ThriftAdmin.java | 5 + 15 files changed, 344 insertions(+), 4 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 61a74450e8d6..f2ddcf5e7573 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -1403,9 +1403,7 @@ public static void restoreFromSnapshot(Connection conn) throws IOException { try (Admin admin = conn.getAdmin()) { String snapshotName = BackupSystemTable.getSnapshotName(conf); if (snapshotExists(admin, snapshotName)) { - admin.disableTable(BackupSystemTable.getTableName(conf)); - admin.restoreSnapshot(snapshotName); - admin.enableTable(BackupSystemTable.getTableName(conf)); + admin.restoreBackupSystemTable(snapshotName); LOG.debug("Done restoring backup system table"); } else { // Snapshot does not exists, i.e completeBackup failed after diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java new file mode 100644 index 000000000000..31ded67b4774 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.master; + +import static org.junit.Assert.assertEquals; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRestoreBackupSystemTable { + private static final String BACKUP_ROOT = "root"; + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + @BeforeClass + public static void setUp() throws Exception { + UTIL.startMiniCluster(); + } + + @Test + public void itRestoresFromSnapshot() throws Exception { + BackupSystemTable table = new BackupSystemTable(UTIL.getConnection()); + Set tables = new HashSet<>(); + + tables.add(TableName.valueOf("test1")); + tables.add(TableName.valueOf("test2")); + tables.add(TableName.valueOf("test3")); + + Map rsTimestampMap = new HashMap<>(); + rsTimestampMap.put("rs1:100", 100L); + rsTimestampMap.put("rs2:100", 101L); + rsTimestampMap.put("rs3:100", 103L); + + table.writeRegionServerLogTimestamp(tables, rsTimestampMap, BACKUP_ROOT); + BackupSystemTable.snapshot(UTIL.getConnection()); + + Admin admin = UTIL.getAdmin(); + TableName backupSystemTn = BackupSystemTable.getTableName(UTIL.getConfiguration()); + admin.disableTable(backupSystemTn); + admin.truncateTable(backupSystemTn, true); + + BackupSystemTable.restoreFromSnapshot(UTIL.getConnection()); + Map> results = table.readLogTimestampMap(BACKUP_ROOT); + + assertEquals(results.size(), tables.size()); + + for (TableName tableName : tables) { + Map resultMap = results.get(tableName); + assertEquals(resultMap, rsTimestampMap); + } + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 43a004a471cc..1c08ec3b26fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2661,4 +2661,7 @@ List getLogEntries(Set serverNames, String logType, Server * Get the list of cached files */ List getCachedFilesList(ServerName serverName) throws IOException; + + @InterfaceAudience.Private + void restoreBackupSystemTable(String snapshotName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index c866f434e63a..e6bf6c3d28e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1141,4 +1141,9 @@ public void flushMasterStore() throws IOException { public List getCachedFilesList(ServerName serverName) throws IOException { return get(admin.getCachedFilesList(serverName)); } + + @Override + public void restoreBackupSystemTable(String snapshotName) throws IOException { + get(admin.restoreBackupSystemTable(snapshotName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index d808aecc815c..ec0556f20ac1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1871,4 +1871,7 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Get the list of cached files */ CompletableFuture> getCachedFilesList(ServerName serverName); + + @InterfaceAudience.Private + CompletableFuture restoreBackupSystemTable(String snapshotName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 33ac47c73d69..b1fb2be13547 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -1010,4 +1010,9 @@ public CompletableFuture flushMasterStore() { public CompletableFuture> getCachedFilesList(ServerName serverName) { return wrap(rawAdmin.getCachedFilesList(serverName)); } + + @Override + public CompletableFuture restoreBackupSystemTable(String snapshotName) { + return wrap(rawAdmin.restoreBackupSystemTable(snapshotName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 2373e936726e..710c8c430386 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -2795,6 +2795,19 @@ void onError(Throwable error) { } } + private static class RestoreBackupSystemTableProcedureBiConsumer extends ProcedureBiConsumer { + + @Override + void onFinished() { + LOG.info("RestoreBackupSystemTableProcedure completed"); + } + + @Override + void onError(Throwable error) { + LOG.info("RestoreBackupSystemTableProcedure failed with {}", error.getMessage()); + } + } + private static class CreateTableProcedureBiConsumer extends TableProcedureBiConsumer { CreateTableProcedureBiConsumer(TableName tableName) { @@ -4637,4 +4650,16 @@ List> adminCall(controller, stub, request.build(), resp -> resp.getCachedFilesList())) .serverName(serverName).call(); } + + @Override + public CompletableFuture restoreBackupSystemTable(String snapshotName) { + MasterProtos.RestoreBackupSystemTableRequest request = + MasterProtos.RestoreBackupSystemTableRequest.newBuilder().setSnapshotName(snapshotName) + .build(); + return this. procedureCall(request, + MasterService.Interface::restoreBackupSystemTable, + MasterProtos.RestoreBackupSystemTableResponse::getProcId, + new RestoreBackupSystemTableProcedureBiConsumer()); + } } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 768a1d7544ea..6dd6ee723b02 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -1280,6 +1280,9 @@ service MasterService { rpc FlushTable(FlushTableRequest) returns(FlushTableResponse); + rpc RestoreBackupSystemTable(RestoreBackupSystemTableRequest) + returns(RestoreBackupSystemTableResponse); + rpc rollAllWALWriters(RollAllWALWritersRequest) returns(RollAllWALWritersResponse); } @@ -1369,6 +1372,13 @@ message FixMetaRequest {} message FixMetaResponse {} +message RestoreBackupSystemTableRequest { + required string snapshot_name = 1; +} +message RestoreBackupSystemTableResponse { + optional uint64 proc_id = 1; +} + service HbckService { /** Update state of the table in meta only*/ rpc SetTableStateInMeta(SetTableStateInMetaRequest) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 554d7ec9c410..7e6c6c8e2fc7 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -840,6 +840,13 @@ message ReloadQuotasProcedureStateData { optional ForeignExceptionMessage error = 2; } +enum RestoreBackupSystemTableState { + RESTORE_BACKUP_SYSTEM_TABLE_PREPARE = 1; + RESTORE_BACKUP_SYSTEM_TABLE_DISABLE = 2; + RESTORE_BACKUP_SYSTEM_TABLE_RESTORE = 3; + RESTORE_BACKUP_SYSTEM_TABLE_ENABLE = 4; +} + enum LogRollProcedureState { LOG_ROLL_ROLL_LOG_ON_RS = 1; LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index de911b54ee9a..e9e0f970ef8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; +import org.apache.hadoop.hbase.master.procedure.RestoreBackupSystemTableProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure; import org.apache.hadoop.hbase.mob.MobUtils; @@ -3667,6 +3668,24 @@ public FlushTableResponse flushTable(RpcController controller, FlushTableRequest } } + @Override + public MasterProtos.RestoreBackupSystemTableResponse restoreBackupSystemTable( + RpcController rpcController, + MasterProtos.RestoreBackupSystemTableRequest restoreBackupSystemTableRequest) + throws ServiceException { + try { + String snapshotName = restoreBackupSystemTableRequest.getSnapshotName(); + SnapshotDescription snapshot = server.snapshotManager.getCompletedSnapshots().stream() + .filter(s -> s.getName().equals(snapshotName)).findFirst() + .orElseThrow(() -> new ServiceException("Snapshot %s not found".formatted(snapshotName))); + long pid = server.getMasterProcedureExecutor() + .submitProcedure(new RestoreBackupSystemTableProcedure(snapshot)); + return MasterProtos.RestoreBackupSystemTableResponse.newBuilder().setProcId(pid).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public RollAllWALWritersResponse rollAllWALWriters(RpcController rpcController, RollAllWALWritersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java new file mode 100644 index 000000000000..af980db6e39e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreBackupSystemTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; + +@InterfaceAudience.Private +public class RestoreBackupSystemTableProcedure + extends AbstractStateMachineTableProcedure { + private static final Logger LOG = + LoggerFactory.getLogger(RestoreBackupSystemTableProcedure.class); + + private final SnapshotDescription snapshot; + private boolean enableOnRollback = false; + + // Necessary for the procedure framework. Do not remove. + public RestoreBackupSystemTableProcedure() { + this(null); + } + + public RestoreBackupSystemTableProcedure(SnapshotDescription snapshot) { + this.snapshot = snapshot; + } + + @Override + public TableName getTableName() { + return TableName.valueOf(snapshot.getTable()); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.RESTORE_BACKUP_SYSTEM_TABLE; + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, RestoreBackupSystemTableState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.info("{} execute state={}", this, state); + + try { + switch (state) { + case RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: + prepare(env); + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_DISABLE); + case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE: + TableState tableState = + env.getMasterServices().getTableStateManager().getTableState(getTableName()); + if (tableState.isEnabled()) { + addChildProcedure(createDisableTableProcedure(env)); + } + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_RESTORE); + case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE: + addChildProcedure(createRestoreSnapshotProcedure(env)); + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_ENABLE); + case RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: + addChildProcedure(createEnableTableProcedure(env)); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (Exception e) { + setFailure("restore-backup-system-table", e); + LOG.warn("unexpected exception while execute {}. Mark procedure Failed.", this, e); + return Flow.NO_MORE_STATE; + } + } + + @Override + protected void rollbackState(MasterProcedureEnv env, RestoreBackupSystemTableState state) + throws IOException, InterruptedException { + switch (state) { + case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE, RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: + return; + case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE, RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: + if (enableOnRollback) { + addChildProcedure(createEnableTableProcedure(env)); + } + return; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } + + @Override + protected RestoreBackupSystemTableState getState(int stateId) { + return RestoreBackupSystemTableState.forNumber(stateId); + } + + @Override + protected int getStateId(RestoreBackupSystemTableState state) { + return state.getNumber(); + } + + @Override + protected RestoreBackupSystemTableState getInitialState() { + return RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_PREPARE; + } + + private Flow moreState(RestoreBackupSystemTableState next) { + setNextState(next); + return Flow.HAS_MORE_STATE; + } + + private Procedure[] createDisableTableProcedure(MasterProcedureEnv env) + throws HBaseIOException { + DisableTableProcedure disableTableProcedure = + new DisableTableProcedure(env, getTableName(), true); + return new DisableTableProcedure[] { disableTableProcedure }; + } + + private Procedure[] createEnableTableProcedure(MasterProcedureEnv env) { + EnableTableProcedure enableTableProcedure = new EnableTableProcedure(env, getTableName()); + return new EnableTableProcedure[] { enableTableProcedure }; + } + + private Procedure[] createRestoreSnapshotProcedure(MasterProcedureEnv env) + throws IOException { + TableDescriptor desc = env.getMasterServices().getTableDescriptors().get(getTableName()); + RestoreSnapshotProcedure restoreSnapshotProcedure = + new RestoreSnapshotProcedure(env, desc, snapshot); + return new RestoreSnapshotProcedure[] { restoreSnapshotProcedure }; + } + + private void prepare(MasterProcedureEnv env) throws IOException { + List snapshots = + env.getMasterServices().getSnapshotManager().getCompletedSnapshots(); + boolean exists = snapshots.stream().anyMatch(s -> s.getName().equals(snapshot.getName())); + if (!exists) { + throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(snapshot)); + } + + TableState tableState = + env.getMasterServices().getTableStateManager().getTableState(getTableName()); + if (tableState.isEnabled()) { + enableOnRollback = true; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index 00b9776366d5..c5c7ec602eab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -50,7 +50,8 @@ public enum TableOperationType { REGION_UNASSIGN, REGION_GC, MERGED_REGIONS_GC/* region operations */, - REGION_TRUNCATE + REGION_TRUNCATE, + RESTORE_BACKUP_SYSTEM_TABLE } /** Returns the name of the table the procedure is operating on */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index be66a28d275e..7be4c4b1810e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -54,6 +54,7 @@ static boolean requireTableExclusiveLock(TableProcedureInterface proc) { case DISABLE: case SNAPSHOT: case ENABLE: + case RESTORE_BACKUP_SYSTEM_TABLE: return true; case EDIT: // we allow concurrent edit on the ns family in meta table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 4d592b49d0d3..a59b2966b89d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -984,6 +984,11 @@ public List getCachedFilesList(ServerName serverName) throws IOException return admin.getCachedFilesList(serverName); } + @Override + public void restoreBackupSystemTable(String snapshotName) throws IOException { + admin.restoreBackupSystemTable(snapshotName); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index a0d73dcca21c..3d5a7e502e0a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1359,6 +1359,11 @@ public List getCachedFilesList(ServerName serverName) throws IOException throw new NotImplementedException("getCachedFilesList not supported in ThriftAdmin"); } + @Override + public void restoreBackupSystemTable(String snapshotName) throws IOException { + throw new NotImplementedException("restoreBackupSystemTable not supported in ThriftAdmin"); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException { From 280e8e891d55de8296ea082bb48c52d8f9a32b6e Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 18 Sep 2025 07:36:11 +0200 Subject: [PATCH 11/92] HBASE-29548 Update ApacheDS to 2.0.0.AM27 and ldap-api to 2.1.7 (#7305) Signed-off-by: Nihal Jain Signed-off-by: Duo Zhang --- hbase-http/pom.xml | 4 ++ .../hadoop/hbase/http/LdapServerTestBase.java | 61 +++++++++++++++---- .../hadoop/hbase/http/TestLdapAdminACL.java | 23 +++---- .../hadoop/hbase/http/TestLdapHttpServer.java | 20 +++--- pom.xml | 4 +- 5 files changed, 74 insertions(+), 38 deletions(-) diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index c4063428b942..d64e6cd7fa84 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -184,6 +184,10 @@ org.bouncycastle bcprov-jdk15on + + org.bouncycastle + bcpkix-jdk15on + diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java index bbf35b8585f6..8856aaa0e205 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java @@ -21,34 +21,73 @@ import java.net.HttpURLConnection; import java.net.URL; import org.apache.commons.codec.binary.Base64; -import org.apache.directory.server.core.integ.CreateLdapServerRule; +import org.apache.directory.ldap.client.template.LdapConnectionTemplate; +import org.apache.directory.server.core.api.DirectoryService; +import org.apache.directory.server.core.integ.ApacheDSTestExtension; +import org.apache.directory.server.ldap.LdapServer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.http.resource.JerseyResource; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.extension.ExtendWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for setting up and testing an HTTP server with LDAP authentication. */ +@ExtendWith(ApacheDSTestExtension.class) public class LdapServerTestBase extends HttpServerFunctionalTest { private static final Logger LOG = LoggerFactory.getLogger(LdapServerTestBase.class); - @ClassRule - public static CreateLdapServerRule ldapRule = new CreateLdapServerRule(); - protected static HttpServer server; protected static URL baseUrl; + /** + * The following fields are set by ApacheDSTestExtension. These are normally inherited from + * AbstractLdapTestUnit, but this class already has a parent. We only use ldapServer, but + * declaring that one alone does not work. + */ + + /** The class DirectoryService instance */ + public static DirectoryService classDirectoryService; + + /** The test DirectoryService instance */ + public static DirectoryService methodDirectoryService; + + /** The current DirectoryService instance */ + public static DirectoryService directoryService; + + /** The class LdapServer instance */ + public static LdapServer classLdapServer; + + /** The test LdapServer instance */ + public static LdapServer methodLdapServer; + + /** The current LdapServer instance */ + public static LdapServer ldapServer; + + /** The Ldap connection template */ + public static LdapConnectionTemplate ldapConnectionTemplate; + + /** The current revision */ + public static long revision = 0L; + + /** + * End of fields required by ApacheDSTestExtension + */ + private static final String AUTH_TYPE = "Basic "; + protected static LdapServer getLdapServer() { + return classLdapServer; + } + /** * Sets up the HTTP server with LDAP authentication before any tests are run. * @throws Exception if an error occurs during server setup */ - @BeforeClass + @BeforeAll public static void setupServer() throws Exception { Configuration conf = new Configuration(); setLdapConfigurations(conf); @@ -66,7 +105,7 @@ public static void setupServer() throws Exception { * Stops the HTTP server after all tests are completed. * @throws Exception if an error occurs during server shutdown */ - @AfterClass + @AfterAll public static void stopServer() throws Exception { try { if (null != server) { @@ -90,8 +129,8 @@ protected static void setLdapConfigurations(Configuration conf) { conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, "org.apache.hadoop.hbase.http.lib.AuthenticationFilterInitializer"); conf.set("hadoop.http.authentication.type", "ldap"); - conf.set("hadoop.http.authentication.ldap.providerurl", String.format("ldap://%s:%s", - LdapConstants.LDAP_SERVER_ADDR, ldapRule.getLdapServer().getPort())); + conf.set("hadoop.http.authentication.ldap.providerurl", + String.format("ldap://%s:%s", LdapConstants.LDAP_SERVER_ADDR, getLdapServer().getPort())); conf.set("hadoop.http.authentication.ldap.enablestarttls", "false"); conf.set("hadoop.http.authentication.ldap.basedn", LdapConstants.LDAP_BASE_DN); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java index 459865509630..900c1fef07b1 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java @@ -17,10 +17,11 @@ */ package org.apache.hadoop.hbase.http; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.net.HttpURLConnection; +import java.util.concurrent.TimeUnit; import org.apache.directory.server.annotations.CreateLdapServer; import org.apache.directory.server.annotations.CreateTransport; import org.apache.directory.server.core.annotations.ApplyLdifs; @@ -29,21 +30,19 @@ import org.apache.directory.server.core.annotations.CreatePartition; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.http.resource.JerseyResource; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Test class for admin ACLs with LDAP authentication on the HttpServer. */ -@Category({ MiscTests.class, SmallTests.class }) +@Tag("org.apache.hadoop.hbase.testclassification.MiscTests") +@Tag("org.apache.hadoop.hbase.testclassification.SmallTests") @CreateLdapServer( transports = { @CreateTransport(protocol = "LDAP", address = LdapConstants.LDAP_SERVER_ADDR), }) @CreateDS(name = "TestLdapAdminACL", allowAnonAccess = true, @@ -55,18 +54,16 @@ "dn: uid=jdoe," + LdapConstants.LDAP_BASE_DN, "cn: John Doe", "sn: Doe", "objectClass: inetOrgPerson", "uid: jdoe", "userPassword: secure123" }) +@Timeout(value = 1, unit = TimeUnit.MINUTES) public class TestLdapAdminACL extends LdapServerTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLdapAdminACL.class); private static final Logger LOG = LoggerFactory.getLogger(TestLdapAdminACL.class); private static final String ADMIN_CREDENTIALS = "bjones:p@ssw0rd"; private static final String NON_ADMIN_CREDENTIALS = "jdoe:secure123"; private static final String WRONG_CREDENTIALS = "bjones:password"; - @BeforeClass + @BeforeAll public static void setupServer() throws Exception { Configuration conf = new Configuration(); setLdapConfigurationWithACLs(conf); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java index bff4dc9d9591..66b3b2924eed 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java @@ -17,27 +17,26 @@ */ package org.apache.hadoop.hbase.http; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.net.HttpURLConnection; +import java.util.concurrent.TimeUnit; import org.apache.directory.server.annotations.CreateLdapServer; import org.apache.directory.server.annotations.CreateTransport; import org.apache.directory.server.core.annotations.ApplyLdifs; import org.apache.directory.server.core.annotations.ContextEntry; import org.apache.directory.server.core.annotations.CreateDS; import org.apache.directory.server.core.annotations.CreatePartition; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Test class for LDAP authentication on the HttpServer. */ -@Category({ MiscTests.class, SmallTests.class }) +@Tag("org.apache.hadoop.hbase.testclassification.MiscTests") +@Tag("org.apache.hadoop.hbase.testclassification.SmallTests") @CreateLdapServer( transports = { @CreateTransport(protocol = "LDAP", address = LdapConstants.LDAP_SERVER_ADDR), }) @CreateDS(name = "TestLdapHttpServer", allowAnonAccess = true, @@ -46,12 +45,9 @@ + "dc: example\n" + "objectClass: top\n" + "objectClass: domain\n\n")) }) @ApplyLdifs({ "dn: uid=bjones," + LdapConstants.LDAP_BASE_DN, "cn: Bob Jones", "sn: Jones", "objectClass: inetOrgPerson", "uid: bjones", "userPassword: p@ssw0rd" }) +@Timeout(value = 1, unit = TimeUnit.MINUTES) public class TestLdapHttpServer extends LdapServerTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLdapHttpServer.class); - private static final String BJONES_CREDENTIALS = "bjones:p@ssw0rd"; private static final String WRONG_CREDENTIALS = "bjones:password"; diff --git a/pom.xml b/pom.xml index 6e8fab5e8b29..75370482fd8e 100644 --- a/pom.xml +++ b/pom.xml @@ -1069,8 +1069,8 @@ none - 2.0.0.AM26 - 2.0.0 + 2.0.0.AM27 + 2.1.7 ${project.build.directory}/META-INF/resources/webjars 5.3.3 From e1c17e5e8c7d11df12b9abf3cf6e224c5c4da8f8 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 18 Sep 2025 08:40:10 +0200 Subject: [PATCH 12/92] HBASE-29602 Add -Djava.security.manager=allow to JDK18+ surefire JVM flags (#7315) Signed-off-by: Duo Zhang Signed-off-by: Balazs Meszaros --- pom.xml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pom.xml b/pom.xml index 75370482fd8e..2e6d12507122 100644 --- a/pom.xml +++ b/pom.xml @@ -1052,6 +1052,13 @@ --add-opens java.base/sun.security.x509=ALL-UNNAMED --add-opens java.base/sun.security.util=ALL-UNNAMED --add-opens java.base/java.net=ALL-UNNAMED + + -Djava.security.manager=allow ${hbase-surefire.argLine} @{jacocoArgLine} 1.5.1 @@ -3572,6 +3579,18 @@ + + build-with-jdk18 + + [18,) + + + ${hbase-surefire.jdk17.flags} + ${hbase-surefire.jdk18.flags} + ${hbase-surefire.argLine} + @{jacocoArgLine} + + jenkins.patch From 620f7a3a812cca4c35ced04885611a012153ee67 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 18 Sep 2025 08:55:15 +0200 Subject: [PATCH 13/92] HBASE-29601 Handle Junit 5 tests in TestCheckTestClasses (#7311) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/ClassTestFinder.java | 19 +++++++++++++++++-- .../hadoop/hbase/TestCheckTestClasses.java | 8 ++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java index 1bc648aeb0b5..dc51187e3cf8 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java @@ -19,9 +19,11 @@ import java.lang.reflect.Method; import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.List; import java.util.regex.Pattern; -import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; import org.junit.runners.Suite; /** @@ -46,6 +48,16 @@ public static Class[] getCategoryAnnotations(Class c) { return new Class[0]; } + public static String[] getTagAnnotations(Class c) { + // TODO handle optional Tags annotation + Tag[] tags = c.getAnnotationsByType(Tag.class); + List values = new ArrayList<>(); + for (Tag tag : tags) { + values.add(tag.value()); + } + return values.toArray(new String[values.size()]); + } + /** Filters both test classes and anything in the hadoop-compat modules */ public static class TestFileNameFilter implements FileNameFilter, ResourcePathFilter { private static final Pattern hadoopCompactRe = Pattern.compile("hbase-hadoop\\d?-compat"); @@ -92,7 +104,10 @@ private boolean isTestClass(Class c) { } for (Method met : c.getMethods()) { - if (met.getAnnotation(Test.class) != null) { + if ( + met.getAnnotation(org.junit.Test.class) != null + || met.getAnnotation(org.junit.jupiter.api.Test.class) != null + ) { return true; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java index 3d3ca12bd82d..c2b007280b4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java @@ -45,11 +45,15 @@ public void checkClasses() throws Exception { List> badClasses = new java.util.ArrayList<>(); ClassTestFinder classFinder = new ClassTestFinder(); for (Class c : classFinder.findClasses(false)) { - if (ClassTestFinder.getCategoryAnnotations(c).length == 0) { + if ( + ClassTestFinder.getCategoryAnnotations(c).length == 0 + && ClassTestFinder.getTagAnnotations(c).length == 0 + ) { badClasses.add(c); } } - assertTrue("There are " + badClasses.size() + " test classes without category: " + badClasses, + assertTrue( + "There are " + badClasses.size() + " test classes without category and tag: " + badClasses, badClasses.isEmpty()); } } From 40b1ffc51002f3d43c7ffc0556fc8bc650aea0ce Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 18 Sep 2025 10:27:09 +0200 Subject: [PATCH 14/92] HBASE-29592 Add hadoop 3.4.2 in client integration tests (#7306) Signed-off-by: Nihal Jain Signed-off-by: Duo Zhang --- dev-support/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 4bc418017c0a..c550272cc3f8 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -59,8 +59,8 @@ pipeline { ASF_NIGHTLIES_BASE_ORI = "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" ASF_NIGHTLIES_BASE = "${ASF_NIGHTLIES_BASE_ORI.replaceAll(' ', '%20')}" // These are dependent on the branch - HADOOP3_VERSIONS = "3.3.5,3.3.6,3.4.0,3.4.1" - HADOOP3_DEFAULT_VERSION = "3.4.1" + HADOOP3_VERSIONS = "3.3.5,3.3.6,3.4.0,3.4.1,3.4.2" + HADOOP3_DEFAULT_VERSION = "3.4.2" } parameters { booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, description: '''Check to use the current HEAD of apache/yetus rather than our configured release. From 8799c13cd9713660a13b4d34ac9e37a0a59c4191 Mon Sep 17 00:00:00 2001 From: Sreenivasulu Date: Thu, 18 Sep 2025 14:04:33 +0530 Subject: [PATCH 15/92] HBASE-29587 Set Test category for TestSnapshotProcedureEarlyExpiration (#7292) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Dávid Paksy --- .../procedure/TestSnapshotProcedureEarlyExpiration.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureEarlyExpiration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureEarlyExpiration.java index 0870f16face1..3f1c37ea8f63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureEarlyExpiration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureEarlyExpiration.java @@ -34,16 +34,20 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RegionSplitter; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; +import org.junit.experimental.categories.Category; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SnapshotState; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +@Category({ MasterTests.class, MediumTests.class }) public class TestSnapshotProcedureEarlyExpiration extends TestSnapshotProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = From 8adb7bdb261690553c01045f2c264db3657c9c84 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 18 Sep 2025 10:38:21 +0200 Subject: [PATCH 16/92] HBASE-29610 Add and use String constants for Junit 5 @Tag annotations (#7322) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/testclassification/ClientTests.java | 1 + .../hadoop/hbase/testclassification/CoprocessorTests.java | 1 + .../apache/hadoop/hbase/testclassification/FilterTests.java | 1 + .../apache/hadoop/hbase/testclassification/FlakeyTests.java | 1 + .../org/apache/hadoop/hbase/testclassification/IOTests.java | 1 + .../hadoop/hbase/testclassification/IntegrationTests.java | 1 + .../apache/hadoop/hbase/testclassification/LargeTests.java | 1 + .../hadoop/hbase/testclassification/MapReduceTests.java | 1 + .../apache/hadoop/hbase/testclassification/MasterTests.java | 1 + .../apache/hadoop/hbase/testclassification/MediumTests.java | 1 + .../hadoop/hbase/testclassification/MetricsTests.java | 1 + .../apache/hadoop/hbase/testclassification/MiscTests.java | 1 + .../apache/hadoop/hbase/testclassification/RPCTests.java | 1 + .../hadoop/hbase/testclassification/RSGroupTests.java | 1 + .../hadoop/hbase/testclassification/RegionServerTests.java | 1 + .../hadoop/hbase/testclassification/ReplicationTests.java | 1 + .../apache/hadoop/hbase/testclassification/RestTests.java | 1 + .../hadoop/hbase/testclassification/SecurityTests.java | 1 + .../apache/hadoop/hbase/testclassification/SmallTests.java | 1 + .../hbase/testclassification/VerySlowMapReduceTests.java | 2 ++ .../hbase/testclassification/VerySlowRegionServerTests.java | 2 ++ .../org/apache/hadoop/hbase/testclassification/ZKTests.java | 1 + .../java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java | 6 ++++-- .../org/apache/hadoop/hbase/http/TestLdapHttpServer.java | 6 ++++-- 24 files changed, 32 insertions(+), 4 deletions(-) diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java index d9bae8490637..b0e259e1f9e2 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java @@ -36,4 +36,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface ClientTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.ClientTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java index a168adec08af..2dc143e944a0 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface CoprocessorTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.CoprocessorTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java index 84f346baaea2..1b45b583c182 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface FilterTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.FilterTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java index c23bfa298b36..0cb861979e08 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface FlakeyTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.FlakeyTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java index 8eee0e6ae4b9..be55b3829e52 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java @@ -36,4 +36,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface IOTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.IOTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java index 4e555b73fedb..0003cd1db511 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java @@ -34,4 +34,5 @@ * @see LargeTests */ public interface IntegrationTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.IntegrationTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java index b47e5bab9a46..3a24764e706a 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java @@ -33,4 +33,5 @@ * @see IntegrationTests */ public interface LargeTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.LargeTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java index 0e68ab3c0340..ac5b05e30704 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface MapReduceTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MapReduceTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java index 5dcf51b27e59..0ad843493ec1 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface MasterTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MasterTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java index d1f836ec0049..548f655c774e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java @@ -32,4 +32,5 @@ * @see IntegrationTests */ public interface MediumTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MediumTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java index 27beaacf963e..c6985d6b95cc 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java @@ -21,4 +21,5 @@ * Tag a test that covers our metrics handling. */ public interface MetricsTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MetricsTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java index 695042e801bf..b7b7ad4c3f66 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface MiscTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MiscTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java index 929bd6487edf..71a24d5d5dd6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface RPCTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RPCTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java index 050a70762928..4d1ab88a9cf6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java @@ -21,4 +21,5 @@ * Tag the tests related to rs group feature. */ public interface RSGroupTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RSGroupTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java index 3439afa76eba..d79691d6fac6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface RegionServerTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RegionServerTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java index df606c960c25..74c65a57982d 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface ReplicationTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.ReplicationTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java index a648b4c39e03..9a73fde57e2c 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface RestTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RestTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java index a4e55ad3aba0..939c25c05ff4 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface SecurityTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.SecurityTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java index 64d2bce381b6..54e16d7ad1ae 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java @@ -30,4 +30,5 @@ * @see IntegrationTests */ public interface SmallTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.SmallTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java index d1f433b9719d..dac933ec78e4 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java @@ -36,4 +36,6 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface VerySlowMapReduceTests { + public static final String TAG = + "org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java index f556979e5b6a..1583de103e38 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java @@ -36,4 +36,6 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface VerySlowRegionServerTests { + public static final String TAG = + "org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java index 9fa0579ed47e..a318b388ef72 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java @@ -22,4 +22,5 @@ * {@code RecoverableZooKeeper}, not for tests which depend on ZooKeeper. */ public interface ZKTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.ZKTests"; } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java index 900c1fef07b1..c4fd208fa7ce 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java @@ -31,6 +31,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hbase.http.resource.JerseyResource; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @@ -41,8 +43,8 @@ /** * Test class for admin ACLs with LDAP authentication on the HttpServer. */ -@Tag("org.apache.hadoop.hbase.testclassification.MiscTests") -@Tag("org.apache.hadoop.hbase.testclassification.SmallTests") +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) @CreateLdapServer( transports = { @CreateTransport(protocol = "LDAP", address = LdapConstants.LDAP_SERVER_ADDR), }) @CreateDS(name = "TestLdapAdminACL", allowAnonAccess = true, diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java index 66b3b2924eed..9faa8dc49fb6 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java @@ -28,6 +28,8 @@ import org.apache.directory.server.core.annotations.ContextEntry; import org.apache.directory.server.core.annotations.CreateDS; import org.apache.directory.server.core.annotations.CreatePartition; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -35,8 +37,8 @@ /** * Test class for LDAP authentication on the HttpServer. */ -@Tag("org.apache.hadoop.hbase.testclassification.MiscTests") -@Tag("org.apache.hadoop.hbase.testclassification.SmallTests") +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) @CreateLdapServer( transports = { @CreateTransport(protocol = "LDAP", address = LdapConstants.LDAP_SERVER_ADDR), }) @CreateDS(name = "TestLdapHttpServer", allowAnonAccess = true, From da7325b77d38f5881679675373dd434d8fa1c013 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Thu, 18 Sep 2025 17:06:30 +0800 Subject: [PATCH 17/92] HBASE-29591 Add hadoop 3.4.2 in hadoop check (#7320) Signed-off-by: Istvan Toth --- dev-support/hbase-personality.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 46f08276c651..9a5d34cc2138 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -612,17 +612,17 @@ function hadoopcheck_rebuild # TODO remove this on non 2.5 branches ? yetus_info "Setting Hadoop 3 versions to test based on branch-2.5 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.2.4 3.3.6 3.4.0" + hbase_hadoop3_versions="3.2.4 3.3.6 3.4.1" else - hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6 3.4.0" + hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6 3.4.0 3.4.1" fi else yetus_info "Setting Hadoop 3 versions to test based on branch-2.6+/master/feature branch rules" # Isn't runnung these tests with the default Hadoop version redundant ? if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.3.6 3.4.0" + hbase_hadoop3_versions="3.3.6 3.4.1" else - hbase_hadoop3_versions="3.3.5 3.3.6 3.4.0" + hbase_hadoop3_versions="3.3.5 3.3.6 3.4.0 3.4.1" fi fi From 04d48ee4903ba88145e21a1c2fd203b1cebb0636 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Paksy?= Date: Thu, 18 Sep 2025 15:47:57 +0200 Subject: [PATCH 18/92] HBASE-29609 Upgrade checkstyle and Maven checkstyle plugin (#7321) Signed-off-by: Istvan Toth --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 2e6d12507122..bb4094f93105 100644 --- a/pom.xml +++ b/pom.xml @@ -935,14 +935,14 @@ - 8.29 + 11.0.1 3.1.0 2.41.0 2.4.2 1.0.0 1.8 3.3.0 - 3.1.0 + 3.6.0 2.10 3.0.1 3.4.0 From 42fc87d3ae9193c7119b2385d3ba990af56b55de Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 20 Sep 2025 16:18:21 +0800 Subject: [PATCH 19/92] HBASE-29608 Add test to make sure we do not have copy paste errors in the TAG value (#7324) Signed-off-by: Istvan Toth --- .../hadoop/hbase/TestJUnit5TagConstants.java | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java new file mode 100644 index 000000000000..10ac18a2b1bf --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +/** + * Verify that the values are all correct. + */ +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +// TODO: this is the timeout for each method, not the whole class +@Timeout(value = 1, unit = TimeUnit.MINUTES) +public class TestJUnit5TagConstants { + + @Test + public void testVerify() throws Exception { + ClassFinder finder = new ClassFinder(getClass().getClassLoader()); + for (Class annoClazz : finder.findClasses(ClientTests.class.getPackageName(), false)) { + Field field = annoClazz.getField("TAG"); + assertEquals(annoClazz.getName(), field.get(null)); + } + } +} From d6e68b130798b14a36af79574ab94d5446892bfc Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 20 Sep 2025 16:33:27 +0800 Subject: [PATCH 20/92] HBASE-29608 Addendum remove jdk9+ only API calls --- .../java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java index 10ac18a2b1bf..43607e171817 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java @@ -40,7 +40,7 @@ public class TestJUnit5TagConstants { @Test public void testVerify() throws Exception { ClassFinder finder = new ClassFinder(getClass().getClassLoader()); - for (Class annoClazz : finder.findClasses(ClientTests.class.getPackageName(), false)) { + for (Class annoClazz : finder.findClasses(ClientTests.class.getPackage().getName(), false)) { Field field = annoClazz.getField("TAG"); assertEquals(annoClazz.getName(), field.get(null)); } From fd7a84fa0a019106c75f7274820d45f77d91c12b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Paksy?= Date: Sat, 20 Sep 2025 14:45:37 +0200 Subject: [PATCH 21/92] Revert "HBASE-29609 Upgrade checkstyle and Maven checkstyle plugin (#7321)" (#7332) This reverts commit 04d48ee4903ba88145e21a1c2fd203b1cebb0636. --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index bb4094f93105..2e6d12507122 100644 --- a/pom.xml +++ b/pom.xml @@ -935,14 +935,14 @@ - 11.0.1 + 8.29 3.1.0 2.41.0 2.4.2 1.0.0 1.8 3.3.0 - 3.6.0 + 3.1.0 2.10 3.0.1 3.4.0 From 99b7e6cdba1bc66839bbf3a9f34500937921adc6 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Sat, 20 Sep 2025 17:04:24 +0200 Subject: [PATCH 22/92] HBASE-29612 Remove HBaseTestingUtil.forceChangeTaskLogDir (#7326) Co-authored-by: Daniel Roudnitsky Signed-off-by: Duo Zhang --- .../hadoop/hbase/backup/TestBackupBase.java | 4 +- .../hbase/backup/TestBackupHFileCleaner.java | 4 +- .../hbase/backup/TestBackupSmallTests.java | 4 +- .../apache/hadoop/hbase/HBaseTestingUtil.java | 38 +++---------------- 4 files changed, 12 insertions(+), 38 deletions(-) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index a14fce59faf2..bfc61010257e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -310,6 +310,8 @@ public static void setUpHelper() throws Exception { // Set MultiWAL (with 2 default WAL files per RS) conf1.set(WALFactory.WAL_PROVIDER, provider); TEST_UTIL.startMiniCluster(); + conf1 = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniMapReduceCluster(); if (useSecondCluster) { conf2 = HBaseConfiguration.create(conf1); @@ -322,9 +324,7 @@ public static void setUpHelper() throws Exception { CommonFSUtils.setWALRootDir(TEST_UTIL2.getConfiguration(), p); TEST_UTIL2.startMiniCluster(); } - conf1 = TEST_UTIL.getConfiguration(); - TEST_UTIL.startMiniMapReduceCluster(); BACKUP_ROOT_DIR = new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR) .toString(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java index cfceada51a02..9989748746cb 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -46,7 +46,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ MasterTests.class, SmallTests.class }) +@Category({ MasterTests.class, MediumTests.class }) public class TestBackupHFileCleaner { @ClassRule diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java index 83cc19578ade..5add9412014f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java @@ -22,14 +22,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.security.UserGroupInformation; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category(SmallTests.class) +@Category(MediumTests.class) public class TestBackupSmallTests extends TestBackupBase { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 93bde45a9d57..d1b1382c33de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -28,7 +28,6 @@ import java.io.OutputStream; import java.io.UncheckedIOException; import java.lang.reflect.Field; -import java.lang.reflect.Modifier; import java.net.BindException; import java.net.DatagramSocket; import java.net.InetAddress; @@ -136,7 +135,6 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; @@ -152,7 +150,6 @@ import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.TaskLog; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -2295,6 +2292,9 @@ public HRegionServer getRSForFirstRegionInTable(TableName tableName) /** * Starts a MiniMRCluster with a default number of TaskTracker's. + * MiniMRCluster caches hadoop.log.dir when first started. It is not possible to start multiple + * MiniMRCluster instances with different log dirs. MiniMRCluster is only to be used from when the + * test is run from a separate VM (i.e not in SmallTests) * @throws IOException When starting the cluster fails. */ public MiniMRCluster startMiniMapReduceCluster() throws IOException { @@ -2305,35 +2305,11 @@ public MiniMRCluster startMiniMapReduceCluster() throws IOException { return mrCluster; } - /** - * Tasktracker has a bug where changing the hadoop.log.dir system property will not change its - * internal static LOG_DIR variable. - */ - private void forceChangeTaskLogDir() { - Field logDirField; - try { - logDirField = TaskLog.class.getDeclaredField("LOG_DIR"); - logDirField.setAccessible(true); - - Field modifiersField = ReflectionUtils.getModifiersField(); - modifiersField.setAccessible(true); - modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL); - - logDirField.set(null, new File(hadoopLogDir, "userlogs")); - } catch (SecurityException e) { - throw new RuntimeException(e); - } catch (NoSuchFieldException e) { - throw new RuntimeException(e); - } catch (IllegalArgumentException e) { - throw new RuntimeException(e); - } catch (IllegalAccessException e) { - throw new RuntimeException(e); - } - } - /** * Starts a MiniMRCluster. Call {@link #setFileSystemURI(String)} to use a different - * filesystem. + * filesystem. MiniMRCluster caches hadoop.log.dir when first started. It is not possible to start + * multiple MiniMRCluster instances with different log dirs. MiniMRCluster is only to be used from + * when the test is run from a separate VM (i.e not in SmallTests) * @param servers The number of TaskTracker's to start. * @throws IOException When starting the cluster fails. */ @@ -2345,8 +2321,6 @@ private void startMiniMapReduceCluster(final int servers) throws IOException { setupClusterTestDir(); createDirsAndSetProperties(); - forceChangeTaskLogDir(); - //// hadoop2 specific settings // Tests were failing because this process used 6GB of virtual memory and was getting killed. // we up the VM usable so that processes don't get killed. From 1cd9f29786127f4a6935f4e034d94ea083b12964 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 22 Sep 2025 16:11:48 +0800 Subject: [PATCH 23/92] HBASE-29576 Replicate HBaseClassTestRule functionality for Junit 5 (#7331) Signed-off-by: Istvan Toth --- .../hadoop/hbase/HBaseJupitorExtension.java | 216 ++++++++++++++++++ .../hadoop/hbase/TestJUnit5TagConstants.java | 4 - .../org.junit.jupiter.api.extension.Extension | 16 ++ .../hadoop/hbase/http/TestLdapAdminACL.java | 3 - .../hadoop/hbase/http/TestLdapHttpServer.java | 3 - pom.xml | 1 + 6 files changed, 233 insertions(+), 10 deletions(-) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupitorExtension.java create mode 100644 hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupitorExtension.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupitorExtension.java new file mode 100644 index 000000000000..867b4c38a73c --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupitorExtension.java @@ -0,0 +1,216 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.jupiter.api.Assertions.fail; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.time.Duration; +import java.time.Instant; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.yetus.audience.InterfaceAudience; +import org.junit.jupiter.api.extension.AfterAllCallback; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.api.extension.ExtensionContext.Store; +import org.junit.jupiter.api.extension.InvocationInterceptor; +import org.junit.jupiter.api.extension.ReflectiveInvocationContext; +import org.junit.platform.commons.JUnitException; +import org.junit.platform.commons.util.ExceptionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Class test rule implementation for JUnit5. + *

+ * It ensures that all JUnit5 tests should have at least one of {@link SmallTests}, + * {@link MediumTests}, {@link LargeTests}, {@link IntegrationTests} tags, and set timeout based on + * the tag. + *

+ * It also controls the timeout for the whole test class running, while the timeout annotation in + * JUnit5 can only enforce the timeout for each test method. + *

+ * Finally, it also forbid System.exit call in tests. TODO: need to find a new way as + * SecurityManager has been removed since Java 21. + */ +@InterfaceAudience.Private +public class HBaseJupitorExtension + implements InvocationInterceptor, BeforeAllCallback, AfterAllCallback { + + private static final Logger LOG = LoggerFactory.getLogger(HBaseJupitorExtension.class); + + private static final SecurityManager securityManager = new TestSecurityManager(); + + private static final ExtensionContext.Namespace NAMESPACE = + ExtensionContext.Namespace.create(HBaseJupitorExtension.class); + + private static final Map TAG_TO_TIMEOUT = + ImmutableMap.of(SmallTests.TAG, Duration.ofMinutes(3), MediumTests.TAG, Duration.ofMinutes(6), + LargeTests.TAG, Duration.ofMinutes(13), IntegrationTests.TAG, Duration.ZERO); + + private static final String EXECUTOR = "executor"; + + private static final String DEADLINE = "deadline"; + + public HBaseJupitorExtension() { + super(); + } + + private Duration pickTimeout(ExtensionContext ctx) { + Set timeoutTags = TAG_TO_TIMEOUT.keySet(); + Set timeoutTag = Sets.intersection(timeoutTags, ctx.getTags()); + if (timeoutTag.isEmpty()) { + fail("Test class " + ctx.getDisplayName() + " does not have any of the following scale tags " + + timeoutTags); + } + if (timeoutTag.size() > 1) { + fail("Test class " + ctx.getDisplayName() + " has multiple scale tags " + timeoutTag); + } + return TAG_TO_TIMEOUT.get(Iterables.getOnlyElement(timeoutTag)); + } + + @Override + public void beforeAll(ExtensionContext ctx) throws Exception { + // TODO: remove this usage + System.setSecurityManager(securityManager); + Duration timeout = pickTimeout(ctx); + if (timeout.isZero() || timeout.isNegative()) { + LOG.info("No timeout for {}", ctx.getDisplayName()); + // zero means no timeout + return; + } + Instant deadline = Instant.now().plus(timeout); + LOG.info("Timeout for {} is {}, it should be finished before {}", ctx.getDisplayName(), timeout, + deadline); + ExecutorService executor = + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("HBase-Test-" + ctx.getDisplayName() + "-Main-Thread").build()); + Store store = ctx.getStore(NAMESPACE); + store.put(EXECUTOR, executor); + store.put(DEADLINE, deadline); + } + + @Override + public void afterAll(ExtensionContext ctx) throws Exception { + Store store = ctx.getStore(NAMESPACE); + ExecutorService executor = store.remove(EXECUTOR, ExecutorService.class); + if (executor != null) { + executor.shutdownNow(); + } + store.remove(DEADLINE); + // reset secutiry manager + System.setSecurityManager(null); + } + + private T runWithTimeout(Invocation invocation, ExtensionContext ctx) throws Throwable { + Store store = ctx.getStore(NAMESPACE); + ExecutorService executor = store.get(EXECUTOR, ExecutorService.class); + if (executor == null) { + return invocation.proceed(); + } + Instant deadline = store.get(DEADLINE, Instant.class); + Instant now = Instant.now(); + if (!now.isBefore(deadline)) { + fail("Test " + ctx.getDisplayName() + " timed out, deadline is " + deadline); + return null; + } + + Duration remaining = Duration.between(now, deadline); + LOG.info("remaining timeout for {} is {}", ctx.getDisplayName(), remaining); + Future future = executor.submit(() -> { + try { + return invocation.proceed(); + } catch (Throwable t) { + // follow the same pattern with junit5 + throw ExceptionUtils.throwAsUncheckedException(t); + } + }); + try { + return future.get(remaining.toNanos(), TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + fail("Test " + ctx.getDisplayName() + " interrupted"); + return null; + } catch (ExecutionException e) { + throw ExceptionUtils.throwAsUncheckedException(e.getCause()); + } catch (TimeoutException e) { + + throw new JUnitException( + "Test " + ctx.getDisplayName() + " timed out, deadline is " + deadline, e); + } + } + + @Override + public void interceptBeforeAllMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext); + } + + @Override + public void interceptBeforeEachMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext); + } + + @Override + public void interceptTestMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext); + } + + @Override + public void interceptAfterEachMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext); + } + + @Override + public void interceptAfterAllMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext); + } + + @Override + public T interceptTestClassConstructor(Invocation invocation, + ReflectiveInvocationContext> invocationContext, + ExtensionContext extensionContext) throws Throwable { + return runWithTimeout(invocation, extensionContext); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java index 43607e171817..3e30b388ab2e 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java @@ -20,21 +20,17 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import java.lang.reflect.Field; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; /** * Verify that the values are all correct. */ @Tag(MiscTests.TAG) @Tag(SmallTests.TAG) -// TODO: this is the timeout for each method, not the whole class -@Timeout(value = 1, unit = TimeUnit.MINUTES) public class TestJUnit5TagConstants { @Test diff --git a/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension new file mode 100644 index 000000000000..a4143fd934da --- /dev/null +++ b/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +org.apache.hadoop.hbase.HBaseJupitorExtension diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java index c4fd208fa7ce..91a3321bdfce 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.net.HttpURLConnection; -import java.util.concurrent.TimeUnit; import org.apache.directory.server.annotations.CreateLdapServer; import org.apache.directory.server.annotations.CreateTransport; import org.apache.directory.server.core.annotations.ApplyLdifs; @@ -36,7 +35,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,7 +54,6 @@ "dn: uid=jdoe," + LdapConstants.LDAP_BASE_DN, "cn: John Doe", "sn: Doe", "objectClass: inetOrgPerson", "uid: jdoe", "userPassword: secure123" }) -@Timeout(value = 1, unit = TimeUnit.MINUTES) public class TestLdapAdminACL extends LdapServerTestBase { private static final Logger LOG = LoggerFactory.getLogger(TestLdapAdminACL.class); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java index 9faa8dc49fb6..c4936513fb36 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.net.HttpURLConnection; -import java.util.concurrent.TimeUnit; import org.apache.directory.server.annotations.CreateLdapServer; import org.apache.directory.server.annotations.CreateTransport; import org.apache.directory.server.core.annotations.ApplyLdifs; @@ -32,7 +31,6 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; /** * Test class for LDAP authentication on the HttpServer. @@ -47,7 +45,6 @@ + "dc: example\n" + "objectClass: top\n" + "objectClass: domain\n\n")) }) @ApplyLdifs({ "dn: uid=bjones," + LdapConstants.LDAP_BASE_DN, "cn: Bob Jones", "sn: Jones", "objectClass: inetOrgPerson", "uid: bjones", "userPassword: p@ssw0rd" }) -@Timeout(value = 1, unit = TimeUnit.MINUTES) public class TestLdapHttpServer extends LdapServerTestBase { private static final String BJONES_CREDENTIALS = "bjones:p@ssw0rd"; diff --git a/pom.xml b/pom.xml index 2e6d12507122..18c074a7c249 100644 --- a/pom.xml +++ b/pom.xml @@ -1967,6 +1967,7 @@ listener org.apache.hadoop.hbase.TimedOutTestsListener,org.apache.hadoop.hbase.HBaseClassTestRuleChecker,org.apache.hadoop.hbase.ResourceCheckerJUnitListener + junit.jupiter.extensions.autodetection.enabled=true From 57e3d5e42fd923d39ed5ed64fd781e83819b0f69 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 22 Sep 2025 16:26:48 +0800 Subject: [PATCH 24/92] HBASE-29576 Addendum fix typo Jupitor -> Jupiter --- ...upitorExtension.java => HBaseJupiterExtension.java} | 10 +++------- .../services/org.junit.jupiter.api.extension.Extension | 2 +- 2 files changed, 4 insertions(+), 8 deletions(-) rename hbase-common/src/test/java/org/apache/hadoop/hbase/{HBaseJupitorExtension.java => HBaseJupiterExtension.java} (97%) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupitorExtension.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupiterExtension.java similarity index 97% rename from hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupitorExtension.java rename to hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupiterExtension.java index 867b4c38a73c..ff2ad14fe76b 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupitorExtension.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupiterExtension.java @@ -66,15 +66,15 @@ * SecurityManager has been removed since Java 21. */ @InterfaceAudience.Private -public class HBaseJupitorExtension +public class HBaseJupiterExtension implements InvocationInterceptor, BeforeAllCallback, AfterAllCallback { - private static final Logger LOG = LoggerFactory.getLogger(HBaseJupitorExtension.class); + private static final Logger LOG = LoggerFactory.getLogger(HBaseJupiterExtension.class); private static final SecurityManager securityManager = new TestSecurityManager(); private static final ExtensionContext.Namespace NAMESPACE = - ExtensionContext.Namespace.create(HBaseJupitorExtension.class); + ExtensionContext.Namespace.create(HBaseJupiterExtension.class); private static final Map TAG_TO_TIMEOUT = ImmutableMap.of(SmallTests.TAG, Duration.ofMinutes(3), MediumTests.TAG, Duration.ofMinutes(6), @@ -84,10 +84,6 @@ public class HBaseJupitorExtension private static final String DEADLINE = "deadline"; - public HBaseJupitorExtension() { - super(); - } - private Duration pickTimeout(ExtensionContext ctx) { Set timeoutTags = TAG_TO_TIMEOUT.keySet(); Set timeoutTag = Sets.intersection(timeoutTags, ctx.getTags()); diff --git a/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension index a4143fd934da..0cb8a35a1ee8 100644 --- a/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension +++ b/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension @@ -13,4 +13,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -org.apache.hadoop.hbase.HBaseJupitorExtension +org.apache.hadoop.hbase.HBaseJupiterExtension From 0a06e2b3ffdf633363c4a481854eab60e836979d Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Mon, 22 Sep 2025 15:39:51 +0200 Subject: [PATCH 25/92] HBASE-29619 Don't use Java 14+ style case statements in RestoreBackupSystemTableProcedure (#7336) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Dávid Paksy Signed-off-by: Duo Zhang --- .../master/procedure/RestoreBackupSystemTableProcedure.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java index af980db6e39e..3a204d42a2c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java @@ -100,9 +100,11 @@ protected Flow executeFromState(MasterProcedureEnv env, RestoreBackupSystemTable protected void rollbackState(MasterProcedureEnv env, RestoreBackupSystemTableState state) throws IOException, InterruptedException { switch (state) { - case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE, RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: + case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE: + case RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: return; - case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE, RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: + case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE: + case RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: if (enableOnRollback) { addChildProcedure(createEnableTableProcedure(env)); } From d108b8e51bcd78495b6884f0293feea1646d12f6 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Mon, 22 Sep 2025 16:14:42 +0200 Subject: [PATCH 26/92] HBASE-29550 Reflection error in TestRSGroupsKillRS with Java 21 (#7327) Signed-off-by: Duo Zhang --- .../apache/hadoop/hbase/trace/TraceUtil.java | 4 +- .../apache/hadoop/hbase/util/VersionInfo.java | 5 ++- .../hbase/rsgroup/TestRSGroupsKillRS.java | 39 ++++++++++--------- 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java index 5b1fb86a351a..260c0064f840 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java @@ -28,8 +28,8 @@ import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; -import org.apache.hadoop.hbase.Version; import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -39,7 +39,7 @@ private TraceUtil() { } public static Tracer getGlobalTracer() { - return GlobalOpenTelemetry.getTracer("org.apache.hbase", Version.version); + return GlobalOpenTelemetry.getTracer("org.apache.hbase", VersionInfo.getVersion()); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java index ba60edb06a08..dfb74e66a9ec 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java @@ -40,12 +40,15 @@ public class VersionInfo { // higher than any numbers in the version. private static final int VERY_LARGE_NUMBER = 100000; + // Copying into a non-final member so that it can be changed by reflection for testing + private static String version = Version.version; + /** * Get the hbase version. * @return the hbase version string, eg. "0.6.3-dev" */ public static String getVersion() { - return Version.version; + return version; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java index b3e30211f24c..3a596a02e0a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.lang.reflect.Field; -import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -47,7 +46,6 @@ import org.apache.hadoop.hbase.testclassification.RSGroupTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.VersionInfo; import org.junit.After; import org.junit.AfterClass; @@ -268,24 +266,27 @@ public void testLowerMetaGroupVersion() throws Exception { Address address = servers.iterator().next(); int majorVersion = VersionInfo.getMajorVersion(originVersion); assertTrue(majorVersion >= 1); - String lowerVersion = String.valueOf(majorVersion - 1) + originVersion.split("\\.")[1]; - setFinalStatic(Version.class.getField("version"), lowerVersion); - TEST_UTIL.getMiniHBaseCluster().startRegionServer(address.getHostName(), address.getPort()); - assertEquals(NUM_SLAVES_BASE, - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); - assertTrue(VersionInfo.compareVersion(originVersion, - MASTER.getRegionServerVersion(getServerName(servers.iterator().next()))) > 0); - LOG.debug("wait for META assigned..."); - // SCP finished, which means all regions assigned too. - TEST_UTIL.waitFor(60000, () -> !TEST_UTIL.getHBaseCluster().getMaster().getProcedures().stream() - .filter(p -> (p instanceof ServerCrashProcedure)).findAny().isPresent()); + String lowerVersion = + String.valueOf(majorVersion - 1) + originVersion.substring(originVersion.indexOf(".")); + try { + setVersionInfoVersion(lowerVersion); + TEST_UTIL.getMiniHBaseCluster().startRegionServer(address.getHostName(), address.getPort()); + assertEquals(NUM_SLAVES_BASE, + TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size()); + assertTrue(VersionInfo.compareVersion(originVersion, + MASTER.getRegionServerVersion(getServerName(servers.iterator().next()))) > 0); + LOG.debug("wait for META assigned..."); + // SCP finished, which means all regions assigned too. + TEST_UTIL.waitFor(60000, () -> !TEST_UTIL.getHBaseCluster().getMaster().getProcedures() + .stream().filter(p -> (p instanceof ServerCrashProcedure)).findAny().isPresent()); + } finally { + setVersionInfoVersion(Version.version); + } } - private static void setFinalStatic(Field field, Object newValue) throws Exception { - field.setAccessible(true); - Field modifiersField = ReflectionUtils.getModifiersField(); - modifiersField.setAccessible(true); - modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); - field.set(null, newValue); + private static void setVersionInfoVersion(String newValue) throws Exception { + Field f = VersionInfo.class.getDeclaredField("version"); + f.setAccessible(true); + f.set(null, newValue); } } From b5cdaab0cbe889564c60c6aa7a607d92e20771dc Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Mon, 22 Sep 2025 16:27:10 +0200 Subject: [PATCH 27/92] HBASE-29615 Update Small tests description wrt reuseForks in docs (#7335) Signed-off-by: Duo Zhang --- .../test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java | 6 ++---- src/main/asciidoc/_chapters/developer.adoc | 5 ++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index d1b1382c33de..e40a74f49770 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -2293,8 +2293,7 @@ public HRegionServer getRSForFirstRegionInTable(TableName tableName) /** * Starts a MiniMRCluster with a default number of TaskTracker's. * MiniMRCluster caches hadoop.log.dir when first started. It is not possible to start multiple - * MiniMRCluster instances with different log dirs. MiniMRCluster is only to be used from when the - * test is run from a separate VM (i.e not in SmallTests) + * MiniMRCluster instances with different log dirs. * @throws IOException When starting the cluster fails. */ public MiniMRCluster startMiniMapReduceCluster() throws IOException { @@ -2308,8 +2307,7 @@ public MiniMRCluster startMiniMapReduceCluster() throws IOException { /** * Starts a MiniMRCluster. Call {@link #setFileSystemURI(String)} to use a different * filesystem. MiniMRCluster caches hadoop.log.dir when first started. It is not possible to start - * multiple MiniMRCluster instances with different log dirs. MiniMRCluster is only to be used from - * when the test is run from a separate VM (i.e not in SmallTests) + * multiple MiniMRCluster instances with different log dirs. * @param servers The number of TaskTracker's to start. * @throws IOException When starting the cluster fails. */ diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index 0844bafd4868..69773afa1002 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -1308,12 +1308,11 @@ to put on your new HBase test case. .Categorizing Tests Small Tests (((SmallTests))):: - _Small_ test cases are executed in a shared JVM and each test suite/test class should + _Small_ test cases are executed in separate JVM and each test suite/test class should run in 15 seconds or less; i.e. a link:https://en.wikipedia.org/wiki/JUnit[junit test fixture], a java object made up of test methods, should finish in under 15 seconds, no matter how many or how few test methods it has. These test cases should not use a minicluster as a minicluster starts many services, - most unrelated to what is being tested. Multiple start/stops may leak resources or just overwhelm - the single JVM context. + most unrelated to what is being tested. Medium Tests (((MediumTests))):: _Medium_ test cases are executed in separate JVM and individual test suites or test classes or in From 608c1b98048523fbfd4d870a49b26e14bad89ba7 Mon Sep 17 00:00:00 2001 From: Hernan Romer Date: Wed, 24 Sep 2025 14:51:32 -0400 Subject: [PATCH 28/92] HBASE-28440 Add support for using mapreduce sort in HFileOutputFormat2 (#7294) Co-authored-by: Hernan Gelaf-Romer Signed-off-by: Ray Mattingly --- .../impl/IncrementalTableBackupClient.java | 15 ++- .../mapreduce/MapReduceHFileSplitterJob.java | 36 ++++- .../hbase/mapreduce/HFileOutputFormat2.java | 32 ++++- .../mapreduce/KeyOnlyCellComparable.java | 94 +++++++++++++ .../mapreduce/PreSortedCellsReducer.java | 47 +++++++ .../hadoop/hbase/mapreduce/WALPlayer.java | 38 +++++- .../hadoop/hbase/mapreduce/TestWALPlayer.java | 126 ++++++++++++------ 7 files changed, 327 insertions(+), 61 deletions(-) create mode 100644 hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyOnlyCellComparable.java create mode 100644 hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PreSortedCellsReducer.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index bbc32b2ef8f8..b68ed527833f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2; import org.apache.hadoop.hbase.mapreduce.WALPlayer; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; @@ -334,6 +335,7 @@ public void execute() throws IOException, ColumnFamilyMismatchException { } protected void incrementalCopyHFiles(String[] files, String backupDest) throws IOException { + boolean diskBasedSortingOriginalValue = HFileOutputFormat2.diskBasedSortingEnabled(conf); try { LOG.debug("Incremental copy HFiles is starting. dest=" + backupDest); // set overall backup phase: incremental_copy @@ -348,6 +350,7 @@ protected void incrementalCopyHFiles(String[] files, String backupDest) throws I LOG.debug("Setting incremental copy HFiles job name to : " + jobname); } conf.set(JOB_NAME_CONF_KEY, jobname); + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, true); BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf); int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr); @@ -360,6 +363,8 @@ protected void incrementalCopyHFiles(String[] files, String backupDest) throws I + " finished."); } finally { deleteBulkLoadDirectory(); + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + diskBasedSortingOriginalValue); } } @@ -413,6 +418,9 @@ protected void walToHFiles(List dirPaths, List tableList) throws conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";"); conf.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true); conf.set(JOB_NAME_CONF_KEY, jobname); + + boolean diskBasedSortingEnabledOriginalValue = HFileOutputFormat2.diskBasedSortingEnabled(conf); + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, true); String[] playerArgs = { dirs, StringUtils.join(tableList, ",") }; try { @@ -421,13 +429,16 @@ protected void walToHFiles(List dirPaths, List tableList) throws if (result != 0) { throw new IOException("WAL Player failed"); } - conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); - conf.unset(JOB_NAME_CONF_KEY); } catch (IOException e) { throw e; } catch (Exception ee) { throw new IOException("Can not convert from directory " + dirs + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee); + } finally { + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + diskBasedSortingEnabledOriginalValue); + conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); + conf.unset(JOB_NAME_CONF_KEY); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index 7d9430914cb3..85df58e0946e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -34,11 +35,14 @@ import org.apache.hadoop.hbase.mapreduce.CellSortReducer; import org.apache.hadoop.hbase.mapreduce.HFileInputFormat; import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2; +import org.apache.hadoop.hbase.mapreduce.KeyOnlyCellComparable; +import org.apache.hadoop.hbase.mapreduce.PreSortedCellsReducer; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.snapshot.SnapshotRegionLocator; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; @@ -72,18 +76,28 @@ protected MapReduceHFileSplitterJob(final Configuration c) { /** * A mapper that just writes out cells. This one can be used together with {@link CellSortReducer} */ - static class HFileCellMapper extends Mapper { + static class HFileCellMapper extends Mapper, Cell> { + + private boolean diskBasedSortingEnabled = false; @Override public void map(NullWritable key, Cell value, Context context) throws IOException, InterruptedException { - context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), - new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(value))); + ExtendedCell extendedCell = PrivateCellUtil.ensureExtendedCell(value); + context.write(wrap(extendedCell), new MapReduceExtendedCell(extendedCell)); } @Override public void setup(Context context) throws IOException { - // do nothing + diskBasedSortingEnabled = + HFileOutputFormat2.diskBasedSortingEnabled(context.getConfiguration()); + } + + private WritableComparable wrap(ExtendedCell cell) { + if (diskBasedSortingEnabled) { + return new KeyOnlyCellComparable(cell); + } + return new ImmutableBytesWritable(CellUtil.cloneRow(cell)); } } @@ -107,13 +121,23 @@ public Job createSubmittableJob(String[] args) throws IOException { true); job.setJarByClass(MapReduceHFileSplitterJob.class); job.setInputFormatClass(HFileInputFormat.class); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); + boolean diskBasedSortingEnabled = HFileOutputFormat2.diskBasedSortingEnabled(conf); + if (diskBasedSortingEnabled) { + job.setMapOutputKeyClass(KeyOnlyCellComparable.class); + job.setSortComparatorClass(KeyOnlyCellComparable.KeyOnlyCellComparator.class); + } else { + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + } if (hfileOutPath != null) { LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs); TableName tableName = TableName.valueOf(tabName); job.setMapperClass(HFileCellMapper.class); - job.setReducerClass(CellSortReducer.class); + if (diskBasedSortingEnabled) { + job.setReducerClass(PreSortedCellsReducer.class); + } else { + job.setReducerClass(CellSortReducer.class); + } Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputValueClass(MapReduceExtendedCell.class); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index be68d4575963..7d9da905f424 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -82,6 +83,7 @@ import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.OutputFormat; @@ -170,6 +172,11 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) "hbase.mapreduce.hfileoutputformat.extendedcell.enabled"; static final boolean EXTENDED_CELL_SERIALIZATION_ENABLED_DEFULT = false; + @InterfaceAudience.Private + public static final String DISK_BASED_SORTING_ENABLED_KEY = + "hbase.mapreduce.hfileoutputformat.disk.based.sorting.enabled"; + private static final boolean DISK_BASED_SORTING_ENABLED_DEFAULT = false; + public static final String REMOTE_CLUSTER_CONF_PREFIX = "hbase.hfileoutputformat.remote.cluster."; public static final String REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY = REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; @@ -547,12 +554,19 @@ private static void writePartitions(Configuration conf, Path partitionsPath, // Write the actual file FileSystem fs = partitionsPath.getFileSystem(conf); - SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, - ImmutableBytesWritable.class, NullWritable.class); + boolean diskBasedSortingEnabled = diskBasedSortingEnabled(conf); + Class keyClass = + diskBasedSortingEnabled ? KeyOnlyCellComparable.class : ImmutableBytesWritable.class; + SequenceFile.Writer writer = + SequenceFile.createWriter(fs, conf, partitionsPath, keyClass, NullWritable.class); try { for (ImmutableBytesWritable startKey : sorted) { - writer.append(startKey, NullWritable.get()); + Writable writable = diskBasedSortingEnabled + ? new KeyOnlyCellComparable(KeyValueUtil.createFirstOnRow(startKey.get())) + : startKey; + + writer.append(writable, NullWritable.get()); } } finally { writer.close(); @@ -599,6 +613,10 @@ public static void configureIncrementalLoad(Job job, TableDescriptor tableDescri configureIncrementalLoad(job, singleTableInfo, HFileOutputFormat2.class); } + public static boolean diskBasedSortingEnabled(Configuration conf) { + return conf.getBoolean(DISK_BASED_SORTING_ENABLED_KEY, DISK_BASED_SORTING_ENABLED_DEFAULT); + } + static void configureIncrementalLoad(Job job, List multiTableInfo, Class> cls) throws IOException { Configuration conf = job.getConfiguration(); @@ -617,7 +635,13 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, // Based on the configured map output class, set the correct reducer to properly // sort the incoming values. // TODO it would be nice to pick one or the other of these formats. - if ( + boolean diskBasedSorting = diskBasedSortingEnabled(conf); + + if (diskBasedSorting) { + job.setMapOutputKeyClass(KeyOnlyCellComparable.class); + job.setSortComparatorClass(KeyOnlyCellComparable.KeyOnlyCellComparator.class); + job.setReducerClass(PreSortedCellsReducer.class); + } else if ( KeyValue.class.equals(job.getMapOutputValueClass()) || MapReduceExtendedCell.class.equals(job.getMapOutputValueClass()) ) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyOnlyCellComparable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyOnlyCellComparable.java new file mode 100644 index 000000000000..d9b28f8a6895 --- /dev/null +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyOnlyCellComparable.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import java.io.ByteArrayInputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.IOException; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class KeyOnlyCellComparable implements WritableComparable { + + static { + WritableComparator.define(KeyOnlyCellComparable.class, new KeyOnlyCellComparator()); + } + + private ExtendedCell cell = null; + + public KeyOnlyCellComparable() { + } + + public KeyOnlyCellComparable(ExtendedCell cell) { + this.cell = cell; + } + + public ExtendedCell getCell() { + return cell; + } + + @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_COMPARETO_USE_OBJECT_EQUALS", + justification = "This is wrong, yes, but we should be purging Writables, not fixing them") + public int compareTo(KeyOnlyCellComparable o) { + return CellComparator.getInstance().compare(cell, o.cell); + } + + @Override + public void write(DataOutput out) throws IOException { + int keyLen = PrivateCellUtil.estimatedSerializedSizeOfKey(cell); + int valueLen = 0; // We avoid writing value here. So just serialize as if an empty value. + out.writeInt(keyLen + valueLen + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE); + out.writeInt(keyLen); + out.writeInt(valueLen); + PrivateCellUtil.writeFlatKey(cell, out); + out.writeLong(cell.getSequenceId()); + } + + @Override + public void readFields(DataInput in) throws IOException { + cell = KeyValue.create(in); + long seqId = in.readLong(); + cell.setSequenceId(seqId); + } + + public static class KeyOnlyCellComparator extends WritableComparator { + + @Override + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + try (DataInputStream d1 = new DataInputStream(new ByteArrayInputStream(b1, s1, l1)); + DataInputStream d2 = new DataInputStream(new ByteArrayInputStream(b2, s2, l2))) { + KeyOnlyCellComparable kv1 = new KeyOnlyCellComparable(); + kv1.readFields(d1); + KeyOnlyCellComparable kv2 = new KeyOnlyCellComparable(); + kv2.readFields(d2); + return compare(kv1, kv2); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PreSortedCellsReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PreSortedCellsReducer.java new file mode 100644 index 000000000000..8f4b2953ec0d --- /dev/null +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PreSortedCellsReducer.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import java.io.IOException; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.MapReduceExtendedCell; +import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class PreSortedCellsReducer + extends Reducer { + + @Override + protected void reduce(KeyOnlyCellComparable keyComparable, Iterable values, Context context) + throws IOException, InterruptedException { + + int index = 0; + ImmutableBytesWritable key = + new ImmutableBytesWritable(CellUtil.cloneRow(keyComparable.getCell())); + for (Cell cell : values) { + context.write(key, new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(cell))); + if (++index % 100 == 0) { + context.setStatus("Wrote " + index + " cells"); + } + } + } +} diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 5e2dc0902e0d..9813118e2502 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; @@ -96,9 +98,10 @@ protected WALPlayer(final Configuration c) { * A mapper that just writes out KeyValues. This one can be used together with * {@link CellSortReducer} */ - static class WALKeyValueMapper extends Mapper { + static class WALKeyValueMapper extends Mapper, Cell> { private Set tableSet = new HashSet(); private boolean multiTableSupport = false; + private boolean diskBasedSortingEnabled = false; @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { @@ -120,8 +123,8 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { byte[] outKey = multiTableSupport ? Bytes.add(table.getName(), Bytes.toBytes(tableSeparator), CellUtil.cloneRow(cell)) : CellUtil.cloneRow(cell); - context.write(new ImmutableBytesWritable(outKey), - new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(cell))); + ExtendedCell extendedCell = PrivateCellUtil.ensureExtendedCell(cell); + context.write(wrapKey(outKey, extendedCell), new MapReduceExtendedCell(extendedCell)); } } } catch (InterruptedException e) { @@ -135,8 +138,23 @@ public void setup(Context context) throws IOException { Configuration conf = context.getConfiguration(); String[] tables = conf.getStrings(TABLES_KEY); this.multiTableSupport = conf.getBoolean(MULTI_TABLES_SUPPORT, false); + this.diskBasedSortingEnabled = HFileOutputFormat2.diskBasedSortingEnabled(conf); Collections.addAll(tableSet, tables); } + + private WritableComparable wrapKey(byte[] key, ExtendedCell cell) { + if (this.diskBasedSortingEnabled) { + // Important to build a new cell with the updated key to maintain multi-table support + KeyValue kv = new KeyValue(key, 0, key.length, cell.getFamilyArray(), + cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), + KeyValue.Type.codeToType(PrivateCellUtil.getTypeByte(cell)), null, 0, 0); + kv.setSequenceId(cell.getSequenceId()); + return new KeyOnlyCellComparable(kv); + } else { + return new ImmutableBytesWritable(key); + } + } } /** @@ -313,7 +331,13 @@ public Job createSubmittableJob(String[] args) throws IOException { job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); + boolean diskBasedSortingEnabled = HFileOutputFormat2.diskBasedSortingEnabled(conf); + if (diskBasedSortingEnabled) { + job.setMapOutputKeyClass(KeyOnlyCellComparable.class); + job.setSortComparatorClass(KeyOnlyCellComparable.KeyOnlyCellComparator.class); + } else { + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + } String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null) { @@ -328,7 +352,11 @@ public Job createSubmittableJob(String[] args) throws IOException { List tableNames = getTableNameList(tables); job.setMapperClass(WALKeyValueMapper.class); - job.setReducerClass(CellSortReducer.class); + if (diskBasedSortingEnabled) { + job.setReducerClass(PreSortedCellsReducer.class); + } else { + job.setReducerClass(CellSortReducer.class); + } Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputValueClass(MapReduceExtendedCell.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index b39d04802c98..220e9a3793cd 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.util.ToolRunner; @@ -123,19 +124,22 @@ public void testPlayingRecoveredEdit() throws Exception { TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); // Copy testing recovered.edits file that is over under hbase-server test resources // up into a dir in our little hdfs cluster here. - String hbaseServerTestResourcesEdits = - System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" - + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); - assertTrue(new File(hbaseServerTestResourcesEdits).exists()); - FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); - // Target dir. - Path targetDir = new Path("edits").makeQualified(dfs.getUri(), dfs.getHomeDirectory()); - assertTrue(dfs.mkdirs(targetDir)); - dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); - assertEquals(0, - ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); - // I don't know how many edits are in this file for this table... so just check more than 1. - assertTrue(TEST_UTIL.countRows(tn) > 0); + runWithDiskBasedSortingDisabledAndEnabled(() -> { + String hbaseServerTestResourcesEdits = + System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + assertTrue(new File(hbaseServerTestResourcesEdits).exists()); + FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); + // Target dir. + Path targetDir = new Path("edits").makeQualified(dfs.getUri(), dfs.getHomeDirectory()); + assertTrue(dfs.mkdirs(targetDir)); + dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); + assertEquals(0, + ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); + // I don't know how many edits are in this file for this table... so just check more than 1. + assertTrue(TEST_UTIL.countRows(tn) > 0); + dfs.delete(targetDir, true); + }); } /** @@ -150,7 +154,7 @@ public void testWALPlayerBulkLoadWithOverriddenTimestamps() throws Exception { final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); final byte[] row = Bytes.toBytes("row"); - Table table = TEST_UTIL.createTable(tableName, family); + final Table table = TEST_UTIL.createTable(tableName, family); long now = EnvironmentEdgeManager.currentTime(); // put a row into the first table @@ -188,28 +192,37 @@ public void testWALPlayerBulkLoadWithOverriddenTimestamps() throws Exception { configuration.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true); WALPlayer player = new WALPlayer(configuration); - assertEquals(0, ToolRunner.run(configuration, player, - new String[] { walInputDir, tableName.getNameAsString() })); + final byte[] finalLastVal = lastVal; - Get g = new Get(row); - Result result = table.get(g); - byte[] value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); - assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(lastVal))); + runWithDiskBasedSortingDisabledAndEnabled(() -> { + assertEquals(0, ToolRunner.run(configuration, player, + new String[] { walInputDir, tableName.getNameAsString() })); - table = TEST_UTIL.truncateTable(tableName); - g = new Get(row); - result = table.get(g); - assertThat(result.listCells(), nullValue()); + Get g = new Get(row); + Result result = table.get(g); + byte[] value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); + assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(finalLastVal))); - BulkLoadHFiles.create(configuration).bulkLoad(tableName, - new Path(outPath, tableName.getNamespaceAsString() + "/" + tableName.getNameAsString())); + TEST_UTIL.truncateTable(tableName); + g = new Get(row); + result = table.get(g); + assertThat(result.listCells(), nullValue()); - g = new Get(row); - result = table.get(g); - value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); + BulkLoadHFiles.create(configuration).bulkLoad(tableName, + new Path(outPath, tableName.getNamespaceAsString() + "/" + tableName.getNameAsString())); - assertThat(result.listCells(), notNullValue()); - assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(lastVal))); + g = new Get(row); + result = table.get(g); + value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); + + assertThat(result.listCells(), notNullValue()); + assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(finalLastVal))); + + // cleanup + Path out = new Path(outPath); + FileSystem fs = out.getFileSystem(configuration); + assertTrue(fs.delete(out, true)); + }); } /** @@ -244,18 +257,21 @@ public void testWALPlayer() throws Exception { Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName = "_test_.name"; - configuration.set(optionName, "1000"); - player.setupTime(configuration, optionName); - assertEquals(1000, configuration.getLong(optionName, 0)); - assertEquals(0, ToolRunner.run(configuration, player, - new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); - - // verify the WAL was player into table 2 - Get g = new Get(ROW); - Result r = t2.get(g); - assertEquals(1, r.size()); - assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN2)); + + runWithDiskBasedSortingDisabledAndEnabled(() -> { + String optionName = "_test_.name"; + configuration.set(optionName, "1000"); + player.setupTime(configuration, optionName); + assertEquals(1000, configuration.getLong(optionName, 0)); + assertEquals(0, ToolRunner.run(configuration, player, + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); + + // verify the WAL was player into table 2 + Get g = new Get(ROW); + Result r = t2.get(g); + assertEquals(1, r.size()); + assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN2)); + }); } /** @@ -278,7 +294,7 @@ private void testWALKeyValueMapper(final String tableConfigKey) throws Exception WALKey key = mock(WALKey.class); when(key.getTableName()).thenReturn(TableName.valueOf("table")); @SuppressWarnings("unchecked") - Mapper.Context context = mock(Context.class); + Mapper, Cell>.Context context = mock(Context.class); when(context.getConfiguration()).thenReturn(configuration); WALEdit value = mock(WALEdit.class); @@ -335,7 +351,29 @@ public void testMainMethod() throws Exception { System.setErr(oldPrintStream); System.setSecurityManager(SECURITY_MANAGER); } + } + + private static void runWithDiskBasedSortingDisabledAndEnabled(TestMethod method) + throws Exception { + TEST_UTIL.getConfiguration().setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + false); + try { + method.run(); + } finally { + TEST_UTIL.getConfiguration().unset(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY); + } + + TEST_UTIL.getConfiguration().setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + true); + try { + method.run(); + } finally { + TEST_UTIL.getConfiguration().unset(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY); + } + } + private interface TestMethod { + void run() throws Exception; } } From 09600872510f05182e6dba02c584aee6a548366d Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Thu, 25 Sep 2025 10:44:48 +0100 Subject: [PATCH 29/92] HBASE-29623 Blocks for CFs with BlockCache disabled may still get cached on write or compaction (#7339) Signed-off-by: Peter Somogyi --- .../hadoop/hbase/io/hfile/CacheConfig.java | 66 ++++++++++--------- .../hbase/io/hfile/TestCacheConfig.java | 21 ++++-- 2 files changed, 51 insertions(+), 36 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 72ca37c0557c..fc8f4d569176 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -132,7 +132,7 @@ public class CacheConfig implements PropagatingConfigurationObserver { private volatile boolean cacheDataOnRead; /** Whether blocks should be flagged as in-memory when being cached */ - private final boolean inMemory; + private boolean inMemory; /** Whether data blocks should be cached when new files are written */ private volatile boolean cacheDataOnWrite; @@ -147,29 +147,29 @@ public class CacheConfig implements PropagatingConfigurationObserver { private volatile boolean evictOnClose; /** Whether data blocks should be stored in compressed and/or encrypted form in the cache */ - private final boolean cacheDataCompressed; + private boolean cacheDataCompressed; /** Whether data blocks should be prefetched into the cache */ - private final boolean prefetchOnOpen; + private boolean prefetchOnOpen; /** * Whether data blocks should be cached when compacted file is written */ - private final boolean cacheCompactedDataOnWrite; + private boolean cacheCompactedDataOnWrite; /** * Determine threshold beyond which we do not cache blocks on compaction */ private long cacheCompactedDataOnWriteThreshold; - private final boolean dropBehindCompaction; + private boolean dropBehindCompaction; // Local reference to the block cache private final BlockCache blockCache; private final ByteBuffAllocator byteBuffAllocator; - private final double heapUsageThreshold; + private double heapUsageThreshold; /** * Create a cache configuration using the specified configuration object and defaults for family @@ -191,32 +191,34 @@ public CacheConfig(Configuration conf, BlockCache blockCache) { */ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache blockCache, ByteBuffAllocator byteBuffAllocator) { - this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) - && (family == null ? true : family.isBlockCacheEnabled()); - this.inMemory = family == null ? DEFAULT_IN_MEMORY : family.isInMemory(); - this.cacheDataCompressed = - conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); - this.dropBehindCompaction = - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); - // For the following flags we enable them regardless of per-schema settings - // if they are enabled in the global configuration. - this.cacheDataOnWrite = conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) - || (family == null ? false : family.isCacheDataOnWrite()); - this.cacheIndexesOnWrite = - conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) - || (family == null ? false : family.isCacheIndexesOnWrite()); - this.cacheBloomsOnWrite = - conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) - || (family == null ? false : family.isCacheBloomsOnWrite()); - this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) - || (family == null ? false : family.isEvictBlocksOnClose()); - this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) - || (family == null ? false : family.isPrefetchBlocksOnOpen()); - this.cacheCompactedDataOnWrite = - conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); - this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); - this.heapUsageThreshold = - conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD); + if (family == null || family.isBlockCacheEnabled()) { + this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ); + this.inMemory = family == null ? DEFAULT_IN_MEMORY : family.isInMemory(); + this.cacheDataCompressed = + conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); + this.dropBehindCompaction = + conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); + // For the following flags we enable them regardless of per-schema settings + // if they are enabled in the global configuration. + this.cacheDataOnWrite = + conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) + || (family == null ? false : family.isCacheDataOnWrite()); + this.cacheIndexesOnWrite = + conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) + || (family == null ? false : family.isCacheIndexesOnWrite()); + this.cacheBloomsOnWrite = + conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) + || (family == null ? false : family.isCacheBloomsOnWrite()); + this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) + || (family == null ? false : family.isEvictBlocksOnClose()); + this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) + || (family == null ? false : family.isPrefetchBlocksOnOpen()); + this.cacheCompactedDataOnWrite = conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, + DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); + this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); + this.heapUsageThreshold = + conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD); + } this.blockCache = blockCache; this.byteBuffAllocator = byteBuffAllocator; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index f03d7fb2c016..50c0b717096a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -190,12 +190,14 @@ void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean @Test public void testDisableCacheDataBlock() throws IOException { + // First tests the default configs behaviour and block cache enabled Configuration conf = HBaseConfiguration.create(); CacheConfig cacheConfig = new CacheConfig(conf); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); assertFalse(cacheConfig.shouldCacheDataCompressed()); assertFalse(cacheConfig.shouldCacheDataOnWrite()); + assertFalse(cacheConfig.shouldCacheCompactedBlocksOnWrite()); assertTrue(cacheConfig.shouldCacheDataOnRead()); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); @@ -203,10 +205,12 @@ public void testDisableCacheDataBlock() throws IOException { assertFalse(cacheConfig.shouldCacheBloomsOnWrite()); assertFalse(cacheConfig.shouldCacheIndexesOnWrite()); + // Tests block cache enabled and related cache on write flags enabled conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true); conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true); + conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, true); cacheConfig = new CacheConfig(conf); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); @@ -219,9 +223,12 @@ public void testDisableCacheDataBlock() throws IOException { assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); + assertTrue(cacheConfig.shouldCacheCompactedBlocksOnWrite()); + // Tests block cache enabled but related cache on read/write properties disabled conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, false); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); + conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, false); cacheConfig = new CacheConfig(conf); assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); @@ -229,14 +236,20 @@ public void testDisableCacheDataBlock() throws IOException { assertFalse(cacheConfig.shouldCacheDataCompressed()); assertFalse(cacheConfig.shouldCacheDataOnWrite()); assertFalse(cacheConfig.shouldCacheDataOnRead()); + assertFalse(cacheConfig.shouldCacheCompactedBlocksOnWrite()); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); - conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, true); - conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); + // Finally tests block cache disabled in the column family but all cache on read/write + // properties enabled in the config. + conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); + conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true); + conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); + conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true); + conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, true); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder .newBuilder(Bytes.toBytes("testDisableCacheDataBlock")).setBlockCacheEnabled(false).build(); @@ -250,8 +263,8 @@ public void testDisableCacheDataBlock() throws IOException { assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX)); assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META)); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM)); - assertTrue(cacheConfig.shouldCacheBloomsOnWrite()); - assertTrue(cacheConfig.shouldCacheIndexesOnWrite()); + assertFalse(cacheConfig.shouldCacheBloomsOnWrite()); + assertFalse(cacheConfig.shouldCacheIndexesOnWrite()); } @Test From 67420e33f972478102a847c5673f233da1f071db Mon Sep 17 00:00:00 2001 From: Wellington Ramos Chevreuil Date: Thu, 25 Sep 2025 11:32:23 +0100 Subject: [PATCH 30/92] HBASE-29627 Handle any block cache fetching errors when reading a block in HFileReaderImpl (#7341) Signed-off-by: Peter Somogyi --- .../hbase/io/hfile/HFileReaderImpl.java | 26 +++++++++++++++++++ .../hbase/io/hfile/TestHFileReaderImpl.java | 22 ++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 899a681131f4..8f1bb3be7a5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1172,6 +1172,32 @@ public HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boo } return cachedBlock; } + } catch (Exception e) { + if (cachedBlock != null) { + returnAndEvictBlock(cache, cacheKey, cachedBlock); + } + LOG.warn("Failed retrieving block from cache with key {}. " + + "\n Evicting this block from cache and will read it from file system. " + + "\n Exception details: ", cacheKey, e); + if (LOG.isDebugEnabled()) { + LOG.debug("Further tracing details for failed block cache retrieval:" + + "\n Complete File path - {}," + "\n Expected Block Type - {}, Actual Block Type - {}," + + "\n Cache compressed - {}" + "\n Header size (after deserialized from cache) - {}" + + "\n Size with header - {}" + "\n Uncompressed size without header - {} " + + "\n Total byte buffer size - {}" + "\n Encoding code - {}", this.path, + expectedBlockType, (cachedBlock != null ? cachedBlock.getBlockType() : "N/A"), + (expectedBlockType != null + ? cacheConf.shouldCacheCompressed(expectedBlockType.getCategory()) + : "N/A"), + (cachedBlock != null ? cachedBlock.headerSize() : "N/A"), + (cachedBlock != null ? cachedBlock.getOnDiskSizeWithHeader() : "N/A"), + (cachedBlock != null ? cachedBlock.getUncompressedSizeWithoutHeader() : "N/A"), + (cachedBlock != null ? cachedBlock.getBufferReadOnly().limit() : "N/A"), + (cachedBlock != null + ? cachedBlock.getBufferReadOnly().getShort(cachedBlock.headerSize()) + : "N/A")); + } + return null; } finally { // Count bytes read as cached block is being returned if (isScanMetricsEnabled && cachedBlock != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java index c87897de8187..6c84312cf599 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java @@ -18,7 +18,12 @@ package org.apache.hadoop.hbase.io.hfile; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; @@ -116,6 +121,23 @@ public void testRecordBlockSize() throws IOException { } } + @Test + public void testReadWorksWhenCacheCorrupt() throws Exception { + BlockCache mockedCache = mock(BlockCache.class); + when(mockedCache.getBlock(any(), anyBoolean(), anyBoolean(), anyBoolean(), any())) + .thenThrow(new RuntimeException("Injected error")); + Path p = makeNewFile(); + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Configuration conf = TEST_UTIL.getConfiguration(); + HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf, mockedCache), true, conf); + long offset = 0; + while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { + HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null, false); + assertNotNull(block); + offset += block.getOnDiskSizeWithHeader(); + } + } + @Test public void testSeekBefore() throws Exception { Path p = makeNewFile(); From e0cec314c839158eac7dbd080582ebce83718a0b Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 29 Sep 2025 10:57:46 +0800 Subject: [PATCH 31/92] HBASE-29614 Remove static final field modification in tests around Unsafe (#7337) Signed-off-by: Peng Lu --- .../hbase/util/ByteBufferUtilsTestBase.java | 600 +++++++++ .../hadoop/hbase/util/BytesTestBase.java | 578 ++++++++ .../hbase/util/TestByteBufferUtils.java | 652 +-------- .../util/TestByteBufferUtilsWoUnsafe.java | 43 + .../apache/hadoop/hbase/util/TestBytes.java | 613 +-------- .../hadoop/hbase/util/TestBytesWoUnsafe.java | 41 + hbase-server/pom.xml | 5 + .../hadoop/hbase/TestHBaseTestingUtil.java | 37 - .../hadoop/hbase/TestPortAllocator.java | 67 + .../hbase/client/FromClientSide3TestBase.java | 1188 +++++++++++++++++ .../hbase/client/TestFromClientSide3.java | 1183 +--------------- .../client/TestScannersFromClientSide.java | 2 +- .../hbase/ipc/TestProtobufRpcServiceImpl.java | 66 +- .../security/access/TestRpcAccessChecks.java | 5 +- .../util/TestFromClientSide3WoUnsafe.java | 42 +- 15 files changed, 2629 insertions(+), 2493 deletions(-) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/util/ByteBufferUtilsTestBase.java create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/util/BytesTestBase.java create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtilsWoUnsafe.java create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytesWoUnsafe.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestPortAllocator.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSide3TestBase.java diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ByteBufferUtilsTestBase.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ByteBufferUtilsTestBase.java new file mode 100644 index 000000000000..194915475775 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ByteBufferUtilsTestBase.java @@ -0,0 +1,600 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.io.WritableUtils; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ByteBufferUtilsTestBase { + + private static final Logger LOG = LoggerFactory.getLogger(ByteBufferUtilsTestBase.class); + + private static int MAX_VLONG_LENGTH = 9; + private static Collection testNumbers; + + private byte[] array; + + @BeforeAll + public static void setUpBeforeAll() { + SortedSet a = new TreeSet<>(); + for (int i = 0; i <= 63; ++i) { + long v = -1L << i; + assertTrue(v < 0); + addNumber(a, v); + v = (1L << i) - 1; + assertTrue(v >= 0); + addNumber(a, v); + } + + testNumbers = Collections.unmodifiableSet(a); + LOG.info("Testing variable-length long serialization using: {} (count: {})", testNumbers, + testNumbers.size()); + assertEquals(1753, testNumbers.size()); + assertEquals(Long.MIN_VALUE, a.first().longValue()); + assertEquals(Long.MAX_VALUE, a.last().longValue()); + } + + /** + * Create an array with sample data. + */ + @BeforeEach + public void setUp() { + array = new byte[8]; + for (int i = 0; i < array.length; ++i) { + array[i] = (byte) ('a' + i); + } + } + + private static void addNumber(Set a, long l) { + if (l != Long.MIN_VALUE) { + a.add(l - 1); + } + a.add(l); + if (l != Long.MAX_VALUE) { + a.add(l + 1); + } + for (long divisor = 3; divisor <= 10; ++divisor) { + for (long delta = -1; delta <= 1; ++delta) { + a.add(l / divisor + delta); + } + } + } + + @Test + public void testReadWriteVLong() { + for (long l : testNumbers) { + ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); + ByteBufferUtils.writeVLong(b, l); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(b)); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + } + } + + @Test + public void testReadWriteConsecutiveVLong() { + for (long l : testNumbers) { + ByteBuffer b = ByteBuffer.allocate(2 * MAX_VLONG_LENGTH); + ByteBufferUtils.writeVLong(b, l); + ByteBufferUtils.writeVLong(b, l - 4); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(b)); + assertEquals(l - 4, ByteBufferUtils.readVLong(b)); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + assertEquals(l - 4, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + } + } + + @Test + public void testConsistencyWithHadoopVLong() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dos = new DataOutputStream(baos); + for (long l : testNumbers) { + baos.reset(); + ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); + ByteBufferUtils.writeVLong(b, l); + String bufStr = Bytes.toStringBinary(b.array(), b.arrayOffset(), b.position()); + WritableUtils.writeVLong(dos, l); + String baosStr = Bytes.toStringBinary(baos.toByteArray()); + assertEquals(baosStr, bufStr); + } + } + + /** + * Test copying to stream from buffer. + */ + @Test + public void testMoveBufferToStream() throws IOException { + final int arrayOffset = 7; + final int initialPosition = 10; + final int endPadding = 5; + byte[] arrayWrapper = new byte[arrayOffset + initialPosition + array.length + endPadding]; + System.arraycopy(array, 0, arrayWrapper, arrayOffset + initialPosition, array.length); + ByteBuffer buffer = + ByteBuffer.wrap(arrayWrapper, arrayOffset, initialPosition + array.length).slice(); + assertEquals(initialPosition + array.length, buffer.limit()); + assertEquals(0, buffer.position()); + buffer.position(initialPosition); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ByteBufferUtils.moveBufferToStream(bos, buffer, array.length); + assertArrayEquals(array, bos.toByteArray()); + assertEquals(initialPosition + array.length, buffer.position()); + } + + /** + * Test copying to stream from buffer with offset. + * @throws IOException On test failure. + */ + @Test + public void testCopyToStreamWithOffset() throws IOException { + ByteBuffer buffer = ByteBuffer.wrap(array); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + + ByteBufferUtils.copyBufferToStream(bos, buffer, array.length / 2, array.length / 2); + + byte[] returnedArray = bos.toByteArray(); + for (int i = 0; i < array.length / 2; ++i) { + int pos = array.length / 2 + i; + assertEquals(returnedArray[i], array[pos]); + } + } + + /** + * Test copying data from stream. + * @throws IOException On test failure. + */ + @Test + public void testCopyFromStream() throws IOException { + ByteBuffer buffer = ByteBuffer.allocate(array.length); + ByteArrayInputStream bis = new ByteArrayInputStream(array); + DataInputStream dis = new DataInputStream(bis); + + ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length / 2); + ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length - array.length / 2); + for (int i = 0; i < array.length; ++i) { + assertEquals(array[i], buffer.get(i)); + } + } + + /** + * Test copying from buffer. + */ + @Test + public void testCopyFromBuffer() { + ByteBuffer srcBuffer = ByteBuffer.allocate(array.length); + ByteBuffer dstBuffer = ByteBuffer.allocate(array.length); + srcBuffer.put(array); + + ByteBufferUtils.copyFromBufferToBuffer(srcBuffer, dstBuffer, array.length / 2, + array.length / 4); + for (int i = 0; i < array.length / 4; ++i) { + assertEquals(srcBuffer.get(i + array.length / 2), dstBuffer.get(i)); + } + } + + /** + * Test 7-bit encoding of integers. + * @throws IOException On test failure. + */ + @Test + public void testCompressedInt() throws IOException { + testCompressedInt(0); + testCompressedInt(Integer.MAX_VALUE); + testCompressedInt(Integer.MIN_VALUE); + + for (int i = 0; i < 3; i++) { + testCompressedInt((128 << i) - 1); + } + + for (int i = 0; i < 3; i++) { + testCompressedInt((128 << i)); + } + } + + /** + * Test how much bytes we need to store integer. + */ + @Test + public void testIntFitsIn() { + assertEquals(1, ByteBufferUtils.intFitsIn(0)); + assertEquals(1, ByteBufferUtils.intFitsIn(1)); + assertEquals(2, ByteBufferUtils.intFitsIn(1 << 8)); + assertEquals(3, ByteBufferUtils.intFitsIn(1 << 16)); + assertEquals(4, ByteBufferUtils.intFitsIn(-1)); + assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MAX_VALUE)); + assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MIN_VALUE)); + } + + /** + * Test how much bytes we need to store long. + */ + @Test + public void testLongFitsIn() { + assertEquals(1, ByteBufferUtils.longFitsIn(0)); + assertEquals(1, ByteBufferUtils.longFitsIn(1)); + assertEquals(3, ByteBufferUtils.longFitsIn(1L << 16)); + assertEquals(5, ByteBufferUtils.longFitsIn(1L << 32)); + assertEquals(8, ByteBufferUtils.longFitsIn(-1)); + assertEquals(8, ByteBufferUtils.longFitsIn(Long.MIN_VALUE)); + assertEquals(8, ByteBufferUtils.longFitsIn(Long.MAX_VALUE)); + } + + /** + * Test if we are comparing equal bytes. + */ + @Test + public void testArePartEqual() { + byte[] array = new byte[] { 1, 2, 3, 4, 5, 1, 2, 3, 4 }; + ByteBuffer buffer = ByteBuffer.wrap(array); + assertTrue(ByteBufferUtils.arePartsEqual(buffer, 0, 4, 5, 4)); + assertTrue(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 2)); + assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 3)); + assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 3, 6, 2)); + assertFalse(ByteBufferUtils.arePartsEqual(buffer, 0, 3, 6, 3)); + } + + /** + * Test serializing int to bytes + */ + @Test + public void testPutInt() { + testPutInt(0); + testPutInt(Integer.MAX_VALUE); + + for (int i = 0; i < 3; i++) { + testPutInt((128 << i) - 1); + } + + for (int i = 0; i < 3; i++) { + testPutInt((128 << i)); + } + } + + @Test + public void testToBytes() { + ByteBuffer buffer = ByteBuffer.allocate(5); + buffer.put(new byte[] { 0, 1, 2, 3, 4 }); + assertEquals(5, buffer.position()); + assertEquals(5, buffer.limit()); + byte[] copy = ByteBufferUtils.toBytes(buffer, 2); + assertArrayEquals(new byte[] { 2, 3, 4 }, copy); + assertEquals(5, buffer.position()); + assertEquals(5, buffer.limit()); + } + + @Test + public void testToPrimitiveTypes() { + ByteBuffer buffer = ByteBuffer.allocate(15); + long l = 988L; + int i = 135; + short s = 7; + buffer.putLong(l); + buffer.putShort(s); + buffer.putInt(i); + assertEquals(l, ByteBufferUtils.toLong(buffer, 0)); + assertEquals(s, ByteBufferUtils.toShort(buffer, 8)); + assertEquals(i, ByteBufferUtils.toInt(buffer, 10)); + } + + @Test + public void testCopyFromArrayToBuffer() { + byte[] b = new byte[15]; + b[0] = -1; + long l = 988L; + int i = 135; + short s = 7; + Bytes.putLong(b, 1, l); + Bytes.putShort(b, 9, s); + Bytes.putInt(b, 11, i); + ByteBuffer buffer = ByteBuffer.allocate(14); + ByteBufferUtils.copyFromArrayToBuffer(buffer, b, 1, 14); + buffer.rewind(); + assertEquals(l, buffer.getLong()); + assertEquals(s, buffer.getShort()); + assertEquals(i, buffer.getInt()); + } + + private void testCopyFromSrcToDestWithThreads(Object input, Object output, List lengthes, + List offsets) throws InterruptedException { + assertTrue((input instanceof ByteBuffer) || (input instanceof byte[])); + assertTrue((output instanceof ByteBuffer) || (output instanceof byte[])); + assertEquals(lengthes.size(), offsets.size()); + + final int threads = lengthes.size(); + CountDownLatch latch = new CountDownLatch(1); + List exes = new ArrayList<>(threads); + int oldInputPos = (input instanceof ByteBuffer) ? ((ByteBuffer) input).position() : 0; + int oldOutputPos = (output instanceof ByteBuffer) ? ((ByteBuffer) output).position() : 0; + for (int i = 0; i != threads; ++i) { + int offset = offsets.get(i); + int length = lengthes.get(i); + exes.add(() -> { + try { + latch.await(); + if (input instanceof ByteBuffer && output instanceof byte[]) { + ByteBufferUtils.copyFromBufferToArray((byte[]) output, (ByteBuffer) input, offset, + offset, length); + } + if (input instanceof byte[] && output instanceof ByteBuffer) { + ByteBufferUtils.copyFromArrayToBuffer((ByteBuffer) output, offset, (byte[]) input, + offset, length); + } + if (input instanceof ByteBuffer && output instanceof ByteBuffer) { + ByteBufferUtils.copyFromBufferToBuffer((ByteBuffer) input, (ByteBuffer) output, offset, + offset, length); + } + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + }); + } + ExecutorService service = Executors.newFixedThreadPool(threads); + exes.forEach(service::execute); + latch.countDown(); + service.shutdown(); + assertTrue(service.awaitTermination(5, TimeUnit.SECONDS)); + if (input instanceof ByteBuffer) { + assertEquals(oldInputPos, ((ByteBuffer) input).position()); + } + if (output instanceof ByteBuffer) { + assertEquals(oldOutputPos, ((ByteBuffer) output).position()); + } + String inputString = (input instanceof ByteBuffer) + ? Bytes.toString(Bytes.toBytes((ByteBuffer) input)) + : Bytes.toString((byte[]) input); + String outputString = (output instanceof ByteBuffer) + ? Bytes.toString(Bytes.toBytes((ByteBuffer) output)) + : Bytes.toString((byte[]) output); + assertEquals(inputString, outputString); + } + + @Test + public void testCopyFromSrcToDestWithThreads() throws InterruptedException { + List words = + Arrays.asList(Bytes.toBytes("with"), Bytes.toBytes("great"), Bytes.toBytes("power"), + Bytes.toBytes("comes"), Bytes.toBytes("great"), Bytes.toBytes("responsibility")); + List lengthes = words.stream().map(v -> v.length).collect(Collectors.toList()); + List offsets = new ArrayList<>(words.size()); + for (int i = 0; i != words.size(); ++i) { + offsets.add(words.subList(0, i).stream().mapToInt(v -> v.length).sum()); + } + + int totalSize = words.stream().mapToInt(v -> v.length).sum(); + byte[] fullContent = new byte[totalSize]; + int offset = 0; + for (byte[] w : words) { + offset = Bytes.putBytes(fullContent, offset, w, 0, w.length); + } + + // test copyFromBufferToArray + for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + words.forEach(input::put); + byte[] output = new byte[totalSize]; + testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); + } + + // test copyFromArrayToBuffer + for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + byte[] input = fullContent; + testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); + } + + // test copyFromBufferToBuffer + for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + words.forEach(input::put); + for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); + } + } + } + + @Test + public void testCopyFromBufferToArray() { + ByteBuffer buffer = ByteBuffer.allocate(15); + buffer.put((byte) -1); + long l = 988L; + int i = 135; + short s = 7; + buffer.putShort(s); + buffer.putInt(i); + buffer.putLong(l); + byte[] b = new byte[15]; + ByteBufferUtils.copyFromBufferToArray(b, buffer, 1, 1, 14); + assertEquals(s, Bytes.toShort(b, 1)); + assertEquals(i, Bytes.toInt(b, 3)); + assertEquals(l, Bytes.toLong(b, 7)); + } + + @Test + public void testRelativeCopyFromBuffertoBuffer() { + ByteBuffer bb1 = ByteBuffer.allocate(135); + ByteBuffer bb2 = ByteBuffer.allocate(135); + fillBB(bb1, (byte) 5); + ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); + assertTrue(bb1.position() == bb2.position()); + assertTrue(bb1.limit() == bb2.limit()); + bb1 = ByteBuffer.allocateDirect(135); + bb2 = ByteBuffer.allocateDirect(135); + fillBB(bb1, (byte) 5); + ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); + assertTrue(bb1.position() == bb2.position()); + assertTrue(bb1.limit() == bb2.limit()); + } + + @Test + public void testCompareTo() { + ByteBuffer bb1 = ByteBuffer.allocate(135); + ByteBuffer bb2 = ByteBuffer.allocate(135); + byte[] b = new byte[71]; + fillBB(bb1, (byte) 5); + fillBB(bb2, (byte) 5); + fillArray(b, (byte) 5); + assertEquals(0, ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), b, 0, b.length) > 0); + bb2.put(134, (byte) 6); + assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) < 0); + bb2.put(6, (byte) 4); + assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) > 0); + // Assert reverse comparing BB and bytearray works. + ByteBuffer bb3 = ByteBuffer.allocate(135); + fillBB(bb3, (byte) 0); + byte[] b3 = new byte[135]; + fillArray(b3, (byte) 1); + int result = ByteBufferUtils.compareTo(b3, 0, b3.length, bb3, 0, bb3.remaining()); + assertTrue(result > 0); + result = ByteBufferUtils.compareTo(bb3, 0, bb3.remaining(), b3, 0, b3.length); + assertTrue(result < 0); + byte[] b4 = Bytes.toBytes("123"); + ByteBuffer bb4 = ByteBuffer.allocate(10 + b4.length); + for (int i = 10; i < bb4.capacity(); ++i) { + bb4.put(i, b4[i - 10]); + } + result = ByteBufferUtils.compareTo(b4, 0, b4.length, bb4, 10, b4.length); + assertEquals(0, result); + } + + @Test + public void testEquals() { + byte[] a = Bytes.toBytes("http://A"); + ByteBuffer bb = ByteBuffer.wrap(a); + + assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, + HConstants.EMPTY_BYTE_BUFFER, 0, 0)); + + assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, bb, 0, a.length)); + + assertFalse(ByteBufferUtils.equals(bb, 0, 0, HConstants.EMPTY_BYTE_BUFFER, 0, a.length)); + + assertTrue(ByteBufferUtils.equals(bb, 0, a.length, bb, 0, a.length)); + + assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0)); + + assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, a, 0, a.length)); + + assertFalse(ByteBufferUtils.equals(bb, 0, a.length, HConstants.EMPTY_BYTE_ARRAY, 0, 0)); + + assertTrue(ByteBufferUtils.equals(bb, 0, a.length, a, 0, a.length)); + } + + @Test + public void testFindCommonPrefix() { + ByteBuffer bb1 = ByteBuffer.allocate(135); + ByteBuffer bb2 = ByteBuffer.allocate(135); + ByteBuffer bb3 = ByteBuffer.allocateDirect(135); + byte[] b = new byte[71]; + + fillBB(bb1, (byte) 5); + fillBB(bb2, (byte) 5); + fillBB(bb3, (byte) 5); + fillArray(b, (byte) 5); + + assertEquals(135, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + assertEquals(71, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); + assertEquals(135, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb3, 0, bb3.remaining())); + assertEquals(71, ByteBufferUtils.findCommonPrefix(bb3, 0, bb3.remaining(), b, 0, b.length)); + + b[13] = 9; + assertEquals(13, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); + + bb2.put(134, (byte) 6); + assertEquals(134, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + + bb2.put(6, (byte) 4); + assertEquals(6, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + } + + // Below are utility methods invoked from test methods + private static void testCompressedInt(int value) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ByteBufferUtils.putCompressedInt(bos, value); + ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray()); + int parsedValue = ByteBufferUtils.readCompressedInt(bis); + assertEquals(value, parsedValue); + } + + private static void testPutInt(int value) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + ByteBufferUtils.putInt(baos, value); + } catch (IOException e) { + throw new RuntimeException("Bug in putIn()", e); + } + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + DataInputStream dis = new DataInputStream(bais); + try { + assertEquals(dis.readInt(), value); + } catch (IOException e) { + throw new RuntimeException("Bug in test!", e); + } + } + + private static void fillBB(ByteBuffer bb, byte b) { + for (int i = bb.position(); i < bb.limit(); i++) { + bb.put(i, b); + } + } + + private static void fillArray(byte[] bb, byte b) { + for (int i = 0; i < bb.length; i++) { + bb[i] = b; + } + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/BytesTestBase.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/BytesTestBase.java new file mode 100644 index 000000000000..96df8bc39396 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/BytesTestBase.java @@ -0,0 +1,578 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.io.WritableUtils; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class BytesTestBase { + + private static final Logger LOG = LoggerFactory.getLogger(BytesTestBase.class); + + @Test + public void testShort() throws Exception { + for (short n : Arrays.asList(Short.MIN_VALUE, (short) -100, (short) -1, (short) 0, (short) 1, + (short) 300, Short.MAX_VALUE)) { + byte[] bytes = Bytes.toBytes(n); + assertEquals(Bytes.toShort(bytes, 0, bytes.length), n); + } + } + + @Test + public void testNullHashCode() { + byte[] b = null; + Exception ee = null; + try { + Bytes.hashCode(b); + } catch (Exception e) { + ee = e; + } + assertNotNull(ee); + } + + @Test + public void testAdd() { + byte[] a = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + byte[] b = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + byte[] c = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }; + byte[] result1 = Bytes.add(a, b, c); + byte[] result2 = Bytes.add(new byte[][] { a, b, c }); + assertEquals(0, Bytes.compareTo(result1, result2)); + } + + @Test + public void testSplit() { + byte[] lowest = Bytes.toBytes("AAA"); + byte[] middle = Bytes.toBytes("CCC"); + byte[] highest = Bytes.toBytes("EEE"); + byte[][] parts = Bytes.split(lowest, highest, 1); + for (byte[] bytes : parts) { + LOG.info(Bytes.toString(bytes)); + } + assertEquals(3, parts.length); + assertTrue(Bytes.equals(parts[1], middle)); + // Now divide into three parts. Change highest so split is even. + highest = Bytes.toBytes("DDD"); + parts = Bytes.split(lowest, highest, 2); + for (byte[] part : parts) { + LOG.info(Bytes.toString(part)); + } + assertEquals(4, parts.length); + // Assert that 3rd part is 'CCC'. + assertTrue(Bytes.equals(parts[2], middle)); + } + + @Test + public void testSplit2() { + // More split tests. + byte[] lowest = Bytes.toBytes("http://A"); + byte[] highest = Bytes.toBytes("http://z"); + byte[] middle = Bytes.toBytes("http://]"); + byte[][] parts = Bytes.split(lowest, highest, 1); + for (byte[] part : parts) { + LOG.info(Bytes.toString(part)); + } + assertEquals(3, parts.length); + assertTrue(Bytes.equals(parts[1], middle)); + } + + @Test + public void testSplit3() { + // Test invalid split cases + byte[] low = { 1, 1, 1 }; + byte[] high = { 1, 1, 3 }; + + // If swapped, should throw IAE + try { + Bytes.split(high, low, 1); + fail("Should not be able to split if low > high"); + } catch (IllegalArgumentException iae) { + // Correct + } + + // Single split should work + byte[][] parts = Bytes.split(low, high, 1); + for (int i = 0; i < parts.length; i++) { + LOG.info("" + i + " -> " + Bytes.toStringBinary(parts[i])); + } + assertEquals(3, parts.length, "Returned split should have 3 parts but has " + parts.length); + + // If split more than once, use additional byte to split + parts = Bytes.split(low, high, 2); + assertNotNull(parts, "Split with an additional byte"); + assertEquals(parts.length, low.length + 1); + + // Split 0 times should throw IAE + try { + Bytes.split(low, high, 0); + fail("Should not be able to split 0 times"); + } catch (IllegalArgumentException iae) { + // Correct + } + } + + @Test + public void testToInt() { + int[] ints = { -1, 123, Integer.MIN_VALUE, Integer.MAX_VALUE }; + for (int anInt : ints) { + byte[] b = Bytes.toBytes(anInt); + assertEquals(anInt, Bytes.toInt(b)); + byte[] b2 = bytesWithOffset(b); + assertEquals(anInt, Bytes.toInt(b2, 1)); + assertEquals(anInt, Bytes.toInt(b2, 1, Bytes.SIZEOF_INT)); + } + } + + @Test + public void testToLong() { + long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; + for (long aLong : longs) { + byte[] b = Bytes.toBytes(aLong); + assertEquals(aLong, Bytes.toLong(b)); + byte[] b2 = bytesWithOffset(b); + assertEquals(aLong, Bytes.toLong(b2, 1)); + assertEquals(aLong, Bytes.toLong(b2, 1, Bytes.SIZEOF_LONG)); + } + } + + @Test + public void testToFloat() { + float[] floats = { -1f, 123.123f, Float.MAX_VALUE }; + for (float aFloat : floats) { + byte[] b = Bytes.toBytes(aFloat); + assertEquals(aFloat, Bytes.toFloat(b), 0.0f); + byte[] b2 = bytesWithOffset(b); + assertEquals(aFloat, Bytes.toFloat(b2, 1), 0.0f); + } + } + + @Test + public void testToDouble() { + double[] doubles = { Double.MIN_VALUE, Double.MAX_VALUE }; + for (double aDouble : doubles) { + byte[] b = Bytes.toBytes(aDouble); + assertEquals(aDouble, Bytes.toDouble(b), 0.0); + byte[] b2 = bytesWithOffset(b); + assertEquals(aDouble, Bytes.toDouble(b2, 1), 0.0); + } + } + + @Test + public void testToBigDecimal() { + BigDecimal[] decimals = + { new BigDecimal("-1"), new BigDecimal("123.123"), new BigDecimal("123123123123") }; + for (BigDecimal decimal : decimals) { + byte[] b = Bytes.toBytes(decimal); + assertEquals(decimal, Bytes.toBigDecimal(b)); + byte[] b2 = bytesWithOffset(b); + assertEquals(decimal, Bytes.toBigDecimal(b2, 1, b.length)); + } + } + + private byte[] bytesWithOffset(byte[] src) { + // add one byte in front to test offset + byte[] result = new byte[src.length + 1]; + result[0] = (byte) 0xAA; + System.arraycopy(src, 0, result, 1, src.length); + return result; + } + + @Test + public void testToBytesForByteBuffer() { + byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + ByteBuffer target = ByteBuffer.wrap(array); + target.position(2); + target.limit(7); + + byte[] actual = Bytes.toBytes(target); + byte[] expected = { 0, 1, 2, 3, 4, 5, 6 }; + assertArrayEquals(expected, actual); + assertEquals(2, target.position()); + assertEquals(7, target.limit()); + + ByteBuffer target2 = target.slice(); + assertEquals(0, target2.position()); + assertEquals(5, target2.limit()); + + byte[] actual2 = Bytes.toBytes(target2); + byte[] expected2 = { 2, 3, 4, 5, 6 }; + assertArrayEquals(expected2, actual2); + assertEquals(0, target2.position()); + assertEquals(5, target2.limit()); + } + + @Test + public void testGetBytesForByteBuffer() { + byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + ByteBuffer target = ByteBuffer.wrap(array); + target.position(2); + target.limit(7); + + byte[] actual = Bytes.getBytes(target); + byte[] expected = { 2, 3, 4, 5, 6 }; + assertArrayEquals(expected, actual); + assertEquals(2, target.position()); + assertEquals(7, target.limit()); + } + + @Test + public void testReadAsVLong() throws Exception { + long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; + for (long aLong : longs) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream output = new DataOutputStream(baos); + WritableUtils.writeVLong(output, aLong); + byte[] long_bytes_no_offset = baos.toByteArray(); + assertEquals(aLong, Bytes.readAsVLong(long_bytes_no_offset, 0)); + byte[] long_bytes_with_offset = bytesWithOffset(long_bytes_no_offset); + assertEquals(aLong, Bytes.readAsVLong(long_bytes_with_offset, 1)); + } + } + + @Test + public void testToStringBinaryForBytes() { + byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; + String actual = Bytes.toStringBinary(array); + String expected = "09azAZ@\\x01"; + assertEquals(expected, actual); + + String actual2 = Bytes.toStringBinary(array, 2, 3); + String expected2 = "azA"; + assertEquals(expected2, actual2); + } + + @Test + public void testToStringBinaryForArrayBasedByteBuffer() { + byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; + ByteBuffer target = ByteBuffer.wrap(array); + String actual = Bytes.toStringBinary(target); + String expected = "09azAZ@\\x01"; + assertEquals(expected, actual); + } + + @Test + public void testToStringBinaryForReadOnlyByteBuffer() { + byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; + ByteBuffer target = ByteBuffer.wrap(array).asReadOnlyBuffer(); + String actual = Bytes.toStringBinary(target); + String expected = "09azAZ@\\x01"; + assertEquals(expected, actual); + } + + @Test + public void testBinarySearch() { + byte[][] arr = { { 1 }, { 3 }, { 5 }, { 7 }, { 9 }, { 11 }, { 13 }, { 15 }, }; + byte[] key1 = { 3, 1 }; + byte[] key2 = { 4, 9 }; + byte[] key2_2 = { 4 }; + byte[] key3 = { 5, 11 }; + byte[] key4 = { 0 }; + byte[] key5 = { 2 }; + + assertEquals(1, Bytes.binarySearch(arr, key1, 0, 1)); + assertEquals(0, Bytes.binarySearch(arr, key1, 1, 1)); + assertEquals(-(2 + 1), Arrays.binarySearch(arr, key2_2, Bytes.BYTES_COMPARATOR)); + assertEquals(-(2 + 1), Bytes.binarySearch(arr, key2, 0, 1)); + assertEquals(4, Bytes.binarySearch(arr, key2, 1, 1)); + assertEquals(2, Bytes.binarySearch(arr, key3, 0, 1)); + assertEquals(5, Bytes.binarySearch(arr, key3, 1, 1)); + assertEquals(-1, Bytes.binarySearch(arr, key4, 0, 1)); + assertEquals(-2, Bytes.binarySearch(arr, key5, 0, 1)); + + // Search for values to the left and to the right of each item in the array. + for (int i = 0; i < arr.length; ++i) { + assertEquals(-(i + 1), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] - 1) }, 0, 1)); + assertEquals(-(i + 2), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] + 1) }, 0, 1)); + } + } + + @Test + public void testToStringBytesBinaryReversible() { + byte[] randomBytes = new byte[1000]; + for (int i = 0; i < 1000; i++) { + Bytes.random(randomBytes); + verifyReversibleForBytes(randomBytes); + } + // some specific cases + verifyReversibleForBytes(new byte[] {}); + verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D' }); + verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D', '\\' }); + } + + private void verifyReversibleForBytes(byte[] originalBytes) { + String convertedString = Bytes.toStringBinary(originalBytes); + byte[] convertedBytes = Bytes.toBytesBinary(convertedString); + if (Bytes.compareTo(originalBytes, convertedBytes) != 0) { + fail("Not reversible for\nbyte[]: " + Arrays.toString(originalBytes) + ",\nStringBinary: " + + convertedString); + } + } + + @Test + public void testStartsWith() { + assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("h"))); + assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes(""))); + assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("hello"))); + assertFalse(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("helloworld"))); + assertFalse(Bytes.startsWith(Bytes.toBytes(""), Bytes.toBytes("hello"))); + } + + @Test + public void testIncrementBytes() { + assertTrue(checkTestIncrementBytes(10, 1)); + assertTrue(checkTestIncrementBytes(12, 123435445)); + assertTrue(checkTestIncrementBytes(124634654, 1)); + assertTrue(checkTestIncrementBytes(10005460, 5005645)); + assertTrue(checkTestIncrementBytes(1, -1)); + assertTrue(checkTestIncrementBytes(10, -1)); + assertTrue(checkTestIncrementBytes(10, -5)); + assertTrue(checkTestIncrementBytes(1005435000, -5)); + assertTrue(checkTestIncrementBytes(10, -43657655)); + assertTrue(checkTestIncrementBytes(-1, 1)); + assertTrue(checkTestIncrementBytes(-26, 5034520)); + assertTrue(checkTestIncrementBytes(-10657200, 5)); + assertTrue(checkTestIncrementBytes(-12343250, 45376475)); + assertTrue(checkTestIncrementBytes(-10, -5)); + assertTrue(checkTestIncrementBytes(-12343250, -5)); + assertTrue(checkTestIncrementBytes(-12, -34565445)); + assertTrue(checkTestIncrementBytes(-1546543452, -34565445)); + } + + private static boolean checkTestIncrementBytes(long val, long amount) { + byte[] value = Bytes.toBytes(val); + byte[] testValue = { -1, -1, -1, -1, -1, -1, -1, -1 }; + if (value[0] > 0) { + testValue = new byte[Bytes.SIZEOF_LONG]; + } + System.arraycopy(value, 0, testValue, testValue.length - value.length, value.length); + + long incrementResult = Bytes.toLong(Bytes.incrementBytes(value, amount)); + + return (Bytes.toLong(testValue) + amount) == incrementResult; + } + + @Test + public void testFixedSizeString() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dos = new DataOutputStream(baos); + Bytes.writeStringFixedSize(dos, "Hello", 5); + Bytes.writeStringFixedSize(dos, "World", 18); + Bytes.writeStringFixedSize(dos, "", 9); + + try { + // Use a long dash which is three bytes in UTF-8. If encoding happens + // using ISO-8859-1, this will fail. + Bytes.writeStringFixedSize(dos, "Too\u2013Long", 9); + fail("Exception expected"); + } catch (IOException ex) { + assertEquals( + "Trying to write 10 bytes (Too\\xE2\\x80\\x93Long) into a field of " + "length 9", + ex.getMessage()); + } + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + DataInputStream dis = new DataInputStream(bais); + assertEquals("Hello", Bytes.readStringFixedSize(dis, 5)); + assertEquals("World", Bytes.readStringFixedSize(dis, 18)); + assertEquals("", Bytes.readStringFixedSize(dis, 9)); + } + + @Test + public void testCopy() { + byte[] bytes = Bytes.toBytes("ABCDEFGHIJKLMNOPQRSTUVWXYZ"); + byte[] copy = Bytes.copy(bytes); + assertNotSame(bytes, copy); + assertTrue(Bytes.equals(bytes, copy)); + } + + @Test + public void testToBytesBinaryTrailingBackslashes() { + try { + Bytes.toBytesBinary("abc\\x00\\x01\\"); + } catch (StringIndexOutOfBoundsException ex) { + fail("Illegal string access: " + ex.getMessage()); + } + } + + @Test + public void testToStringBinary_toBytesBinary_Reversable() { + String bytes = Bytes.toStringBinary(Bytes.toBytes(2.17)); + assertEquals(2.17, Bytes.toDouble(Bytes.toBytesBinary(bytes)), 0); + } + + @Test + public void testUnsignedBinarySearch() { + byte[] bytes = new byte[] { 0, 5, 123, 127, -128, -100, -1 }; + assertEquals(1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 5)); + assertEquals(3, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 127)); + assertEquals(4, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -128)); + assertEquals(5, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -100)); + assertEquals(6, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -1)); + assertEquals(-1 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 2)); + assertEquals(-6 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -5)); + } + + @Test + public void testUnsignedIncrement() { + byte[] a = Bytes.toBytes(0); + int a2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(a), 0); + assertEquals(1, a2); + + byte[] b = Bytes.toBytes(-1); + byte[] actual = Bytes.unsignedCopyAndIncrement(b); + assertNotSame(b, actual); + byte[] expected = new byte[] { 1, 0, 0, 0, 0 }; + assertArrayEquals(expected, actual); + + byte[] c = Bytes.toBytes(255);// should wrap to the next significant byte + int c2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(c), 0); + assertEquals(256, c2); + } + + @Test + public void testIndexOf() { + byte[] array = Bytes.toBytes("hello"); + assertEquals(1, Bytes.indexOf(array, (byte) 'e')); + assertEquals(4, Bytes.indexOf(array, (byte) 'o')); + assertEquals(-1, Bytes.indexOf(array, (byte) 'a')); + assertEquals(0, Bytes.indexOf(array, Bytes.toBytes("hel"))); + assertEquals(2, Bytes.indexOf(array, Bytes.toBytes("ll"))); + assertEquals(-1, Bytes.indexOf(array, Bytes.toBytes("hll"))); + } + + @Test + public void testContains() { + byte[] array = Bytes.toBytes("hello world"); + assertTrue(Bytes.contains(array, (byte) 'e')); + assertTrue(Bytes.contains(array, (byte) 'd')); + assertFalse(Bytes.contains(array, (byte) 'a')); + assertTrue(Bytes.contains(array, Bytes.toBytes("world"))); + assertTrue(Bytes.contains(array, Bytes.toBytes("ello"))); + assertFalse(Bytes.contains(array, Bytes.toBytes("owo"))); + } + + @Test + public void testZero() { + byte[] array = Bytes.toBytes("hello"); + Bytes.zero(array); + for (byte b : array) { + assertEquals(0, b); + } + array = Bytes.toBytes("hello world"); + Bytes.zero(array, 2, 7); + assertFalse(array[0] == 0); + assertFalse(array[1] == 0); + for (int i = 2; i < 9; i++) { + assertEquals(0, array[i]); + } + for (int i = 9; i < array.length; i++) { + assertFalse(array[i] == 0); + } + } + + @Test + public void testPutBuffer() { + byte[] b = new byte[100]; + for (byte i = 0; i < 100; i++) { + Bytes.putByteBuffer(b, i, ByteBuffer.wrap(new byte[] { i })); + } + for (byte i = 0; i < 100; i++) { + assertEquals(i, b[i]); + } + } + + @Test + public void testToFromHex() { + List testStrings = new ArrayList<>(8); + testStrings.addAll(Arrays.asList("", "00", "A0", "ff", "FFffFFFFFFFFFF", "12", + "0123456789abcdef", "283462839463924623984692834692346ABCDFEDDCA0")); + for (String testString : testStrings) { + byte[] byteData = Bytes.fromHex(testString); + assertEquals(testString.length() / 2, byteData.length); + String result = Bytes.toHex(byteData); + assertTrue(testString.equalsIgnoreCase(result)); + } + + List testByteData = new ArrayList<>(5); + testByteData.addAll(Arrays.asList(new byte[0], new byte[1], new byte[10], + new byte[] { 1, 2, 3, 4, 5 }, new byte[] { (byte) 0xFF })); + Random rand = ThreadLocalRandom.current(); + for (int i = 0; i < 20; i++) { + byte[] bytes = new byte[rand.nextInt(100)]; + Bytes.random(bytes); + testByteData.add(bytes); + } + + for (byte[] testData : testByteData) { + String hexString = Bytes.toHex(testData); + assertEquals(testData.length * 2, hexString.length()); + byte[] result = Bytes.fromHex(hexString); + assertArrayEquals(testData, result); + } + } + + @Test + public void testFindCommonPrefix() throws Exception { + // tests for common prefixes less than 8 bytes in length (i.e. using non-vectorized path) + byte[] hello = Bytes.toBytes("hello"); + byte[] helloWorld = Bytes.toBytes("helloworld"); + + assertEquals(5, + Bytes.findCommonPrefix(hello, helloWorld, hello.length, helloWorld.length, 0, 0)); + assertEquals(5, Bytes.findCommonPrefix(hello, hello, hello.length, hello.length, 0, 0)); + assertEquals(3, Bytes.findCommonPrefix(hello, hello, hello.length - 2, hello.length - 2, 2, 2)); + assertEquals(0, Bytes.findCommonPrefix(hello, hello, 0, 0, 0, 0)); + + // tests for common prefixes greater than 8 bytes in length which may use the vectorized path + byte[] hellohello = Bytes.toBytes("hellohello"); + byte[] hellohellohi = Bytes.toBytes("hellohellohi"); + + assertEquals(10, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, + hellohellohi.length, 0, 0)); + assertEquals(10, Bytes.findCommonPrefix(hellohellohi, hellohello, hellohellohi.length, + hellohello.length, 0, 0)); + assertEquals(10, + Bytes.findCommonPrefix(hellohello, hellohello, hellohello.length, hellohello.length, 0, 0)); + + hellohello[2] = 0; + assertEquals(2, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, + hellohellohi.length, 0, 0)); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java index e07e75bffdb2..b451d71ed879 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java @@ -17,654 +17,12 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseCommonTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.nio.ByteBuff; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; -import org.apache.hadoop.io.WritableUtils; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category({ MiscTests.class, MediumTests.class }) -@RunWith(Parameterized.class) -public class TestByteBufferUtils { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestByteBufferUtils.class); - - private static final String UNSAFE_AVAIL_NAME = "UNSAFE_AVAIL"; - private static final String UNSAFE_UNALIGNED_NAME = "UNSAFE_UNALIGNED"; - private byte[] array; - - @AfterClass - public static void afterClass() throws Exception { - detectAvailabilityOfUnsafe(); - } - - @Parameterized.Parameters - public static Collection parameters() { - return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; - } - - private static void setUnsafe(String fieldName, boolean value) throws Exception { - Field field = ByteBufferUtils.class.getDeclaredField(fieldName); - field.setAccessible(true); - Field modifiersField = ReflectionUtils.getModifiersField(); - modifiersField.setAccessible(true); - int oldModifiers = field.getModifiers(); - modifiersField.setInt(field, oldModifiers & ~Modifier.FINAL); - try { - field.set(null, value); - } finally { - modifiersField.setInt(field, oldModifiers); - } - } - - static void disableUnsafe() throws Exception { - if (ByteBufferUtils.UNSAFE_AVAIL) { - setUnsafe(UNSAFE_AVAIL_NAME, false); - } - if (ByteBufferUtils.UNSAFE_UNALIGNED) { - setUnsafe(UNSAFE_UNALIGNED_NAME, false); - } - assertFalse(ByteBufferUtils.UNSAFE_AVAIL); - assertFalse(ByteBufferUtils.UNSAFE_UNALIGNED); - } - - static void detectAvailabilityOfUnsafe() throws Exception { - if (ByteBufferUtils.UNSAFE_AVAIL != HBasePlatformDependent.isUnsafeAvailable()) { - setUnsafe(UNSAFE_AVAIL_NAME, HBasePlatformDependent.isUnsafeAvailable()); - } - if (ByteBufferUtils.UNSAFE_UNALIGNED != HBasePlatformDependent.unaligned()) { - setUnsafe(UNSAFE_UNALIGNED_NAME, HBasePlatformDependent.unaligned()); - } - assertEquals(ByteBufferUtils.UNSAFE_AVAIL, HBasePlatformDependent.isUnsafeAvailable()); - assertEquals(ByteBufferUtils.UNSAFE_UNALIGNED, HBasePlatformDependent.unaligned()); - } - - public TestByteBufferUtils(boolean useUnsafeIfPossible) throws Exception { - if (useUnsafeIfPossible) { - detectAvailabilityOfUnsafe(); - } else { - disableUnsafe(); - } - } - - /** - * Create an array with sample data. - */ - @Before - public void setUp() { - array = new byte[8]; - for (int i = 0; i < array.length; ++i) { - array[i] = (byte) ('a' + i); - } - } - - private static final int MAX_VLONG_LENGTH = 9; - private static final Collection testNumbers; - - private static void addNumber(Set a, long l) { - if (l != Long.MIN_VALUE) { - a.add(l - 1); - } - a.add(l); - if (l != Long.MAX_VALUE) { - a.add(l + 1); - } - for (long divisor = 3; divisor <= 10; ++divisor) { - for (long delta = -1; delta <= 1; ++delta) { - a.add(l / divisor + delta); - } - } - } - - static { - SortedSet a = new TreeSet<>(); - for (int i = 0; i <= 63; ++i) { - long v = -1L << i; - assertTrue(v < 0); - addNumber(a, v); - v = (1L << i) - 1; - assertTrue(v >= 0); - addNumber(a, v); - } - - testNumbers = Collections.unmodifiableSet(a); - System.err.println("Testing variable-length long serialization using: " + testNumbers - + " (count: " + testNumbers.size() + ")"); - assertEquals(1753, testNumbers.size()); - assertEquals(Long.MIN_VALUE, a.first().longValue()); - assertEquals(Long.MAX_VALUE, a.last().longValue()); - } - - @Test - public void testReadWriteVLong() { - for (long l : testNumbers) { - ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); - ByteBufferUtils.writeVLong(b, l); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(b)); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); - } - } - - @Test - public void testReadWriteConsecutiveVLong() { - for (long l : testNumbers) { - ByteBuffer b = ByteBuffer.allocate(2 * MAX_VLONG_LENGTH); - ByteBufferUtils.writeVLong(b, l); - ByteBufferUtils.writeVLong(b, l - 4); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(b)); - assertEquals(l - 4, ByteBufferUtils.readVLong(b)); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); - assertEquals(l - 4, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); - } - } - - @Test - public void testConsistencyWithHadoopVLong() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - for (long l : testNumbers) { - baos.reset(); - ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); - ByteBufferUtils.writeVLong(b, l); - String bufStr = Bytes.toStringBinary(b.array(), b.arrayOffset(), b.position()); - WritableUtils.writeVLong(dos, l); - String baosStr = Bytes.toStringBinary(baos.toByteArray()); - assertEquals(baosStr, bufStr); - } - } - - /** - * Test copying to stream from buffer. - */ - @Test - public void testMoveBufferToStream() throws IOException { - final int arrayOffset = 7; - final int initialPosition = 10; - final int endPadding = 5; - byte[] arrayWrapper = new byte[arrayOffset + initialPosition + array.length + endPadding]; - System.arraycopy(array, 0, arrayWrapper, arrayOffset + initialPosition, array.length); - ByteBuffer buffer = - ByteBuffer.wrap(arrayWrapper, arrayOffset, initialPosition + array.length).slice(); - assertEquals(initialPosition + array.length, buffer.limit()); - assertEquals(0, buffer.position()); - buffer.position(initialPosition); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ByteBufferUtils.moveBufferToStream(bos, buffer, array.length); - assertArrayEquals(array, bos.toByteArray()); - assertEquals(initialPosition + array.length, buffer.position()); - } - - /** - * Test copying to stream from buffer with offset. - * @throws IOException On test failure. - */ - @Test - public void testCopyToStreamWithOffset() throws IOException { - ByteBuffer buffer = ByteBuffer.wrap(array); - - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - - ByteBufferUtils.copyBufferToStream(bos, buffer, array.length / 2, array.length / 2); - - byte[] returnedArray = bos.toByteArray(); - for (int i = 0; i < array.length / 2; ++i) { - int pos = array.length / 2 + i; - assertEquals(returnedArray[i], array[pos]); - } - } - - /** - * Test copying data from stream. - * @throws IOException On test failure. - */ - @Test - public void testCopyFromStream() throws IOException { - ByteBuffer buffer = ByteBuffer.allocate(array.length); - ByteArrayInputStream bis = new ByteArrayInputStream(array); - DataInputStream dis = new DataInputStream(bis); - - ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length / 2); - ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length - array.length / 2); - for (int i = 0; i < array.length; ++i) { - assertEquals(array[i], buffer.get(i)); - } - } - - /** - * Test copying from buffer. - */ - @Test - public void testCopyFromBuffer() { - ByteBuffer srcBuffer = ByteBuffer.allocate(array.length); - ByteBuffer dstBuffer = ByteBuffer.allocate(array.length); - srcBuffer.put(array); - - ByteBufferUtils.copyFromBufferToBuffer(srcBuffer, dstBuffer, array.length / 2, - array.length / 4); - for (int i = 0; i < array.length / 4; ++i) { - assertEquals(srcBuffer.get(i + array.length / 2), dstBuffer.get(i)); - } - } - - /** - * Test 7-bit encoding of integers. - * @throws IOException On test failure. - */ - @Test - public void testCompressedInt() throws IOException { - testCompressedInt(0); - testCompressedInt(Integer.MAX_VALUE); - testCompressedInt(Integer.MIN_VALUE); - - for (int i = 0; i < 3; i++) { - testCompressedInt((128 << i) - 1); - } - - for (int i = 0; i < 3; i++) { - testCompressedInt((128 << i)); - } - } - - /** - * Test how much bytes we need to store integer. - */ - @Test - public void testIntFitsIn() { - assertEquals(1, ByteBufferUtils.intFitsIn(0)); - assertEquals(1, ByteBufferUtils.intFitsIn(1)); - assertEquals(2, ByteBufferUtils.intFitsIn(1 << 8)); - assertEquals(3, ByteBufferUtils.intFitsIn(1 << 16)); - assertEquals(4, ByteBufferUtils.intFitsIn(-1)); - assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MAX_VALUE)); - assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MIN_VALUE)); - } - - /** - * Test how much bytes we need to store long. - */ - @Test - public void testLongFitsIn() { - assertEquals(1, ByteBufferUtils.longFitsIn(0)); - assertEquals(1, ByteBufferUtils.longFitsIn(1)); - assertEquals(3, ByteBufferUtils.longFitsIn(1L << 16)); - assertEquals(5, ByteBufferUtils.longFitsIn(1L << 32)); - assertEquals(8, ByteBufferUtils.longFitsIn(-1)); - assertEquals(8, ByteBufferUtils.longFitsIn(Long.MIN_VALUE)); - assertEquals(8, ByteBufferUtils.longFitsIn(Long.MAX_VALUE)); - } - - /** - * Test if we are comparing equal bytes. - */ - @Test - public void testArePartEqual() { - byte[] array = new byte[] { 1, 2, 3, 4, 5, 1, 2, 3, 4 }; - ByteBuffer buffer = ByteBuffer.wrap(array); - assertTrue(ByteBufferUtils.arePartsEqual(buffer, 0, 4, 5, 4)); - assertTrue(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 2)); - assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 3)); - assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 3, 6, 2)); - assertFalse(ByteBufferUtils.arePartsEqual(buffer, 0, 3, 6, 3)); - } - - /** - * Test serializing int to bytes - */ - @Test - public void testPutInt() { - testPutInt(0); - testPutInt(Integer.MAX_VALUE); - - for (int i = 0; i < 3; i++) { - testPutInt((128 << i) - 1); - } - - for (int i = 0; i < 3; i++) { - testPutInt((128 << i)); - } - } - - // Utility methods invoked from test methods - - private void testCompressedInt(int value) throws IOException { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ByteBufferUtils.putCompressedInt(bos, value); - ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray()); - int parsedValue = ByteBufferUtils.readCompressedInt(bis); - assertEquals(value, parsedValue); - } - - private void testPutInt(int value) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try { - ByteBufferUtils.putInt(baos, value); - } catch (IOException e) { - throw new RuntimeException("Bug in putIn()", e); - } - - ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); - DataInputStream dis = new DataInputStream(bais); - try { - assertEquals(dis.readInt(), value); - } catch (IOException e) { - throw new RuntimeException("Bug in test!", e); - } - } - - @Test - public void testToBytes() { - ByteBuffer buffer = ByteBuffer.allocate(5); - buffer.put(new byte[] { 0, 1, 2, 3, 4 }); - assertEquals(5, buffer.position()); - assertEquals(5, buffer.limit()); - byte[] copy = ByteBufferUtils.toBytes(buffer, 2); - assertArrayEquals(new byte[] { 2, 3, 4 }, copy); - assertEquals(5, buffer.position()); - assertEquals(5, buffer.limit()); - } - - @Test - public void testToPrimitiveTypes() { - ByteBuffer buffer = ByteBuffer.allocate(15); - long l = 988L; - int i = 135; - short s = 7; - buffer.putLong(l); - buffer.putShort(s); - buffer.putInt(i); - assertEquals(l, ByteBufferUtils.toLong(buffer, 0)); - assertEquals(s, ByteBufferUtils.toShort(buffer, 8)); - assertEquals(i, ByteBufferUtils.toInt(buffer, 10)); - } - - @Test - public void testCopyFromArrayToBuffer() { - byte[] b = new byte[15]; - b[0] = -1; - long l = 988L; - int i = 135; - short s = 7; - Bytes.putLong(b, 1, l); - Bytes.putShort(b, 9, s); - Bytes.putInt(b, 11, i); - ByteBuffer buffer = ByteBuffer.allocate(14); - ByteBufferUtils.copyFromArrayToBuffer(buffer, b, 1, 14); - buffer.rewind(); - assertEquals(l, buffer.getLong()); - assertEquals(s, buffer.getShort()); - assertEquals(i, buffer.getInt()); - } - - private void testCopyFromSrcToDestWithThreads(Object input, Object output, List lengthes, - List offsets) throws InterruptedException { - assertTrue((input instanceof ByteBuffer) || (input instanceof byte[])); - assertTrue((output instanceof ByteBuffer) || (output instanceof byte[])); - assertEquals(lengthes.size(), offsets.size()); - - final int threads = lengthes.size(); - CountDownLatch latch = new CountDownLatch(1); - List exes = new ArrayList<>(threads); - int oldInputPos = (input instanceof ByteBuffer) ? ((ByteBuffer) input).position() : 0; - int oldOutputPos = (output instanceof ByteBuffer) ? ((ByteBuffer) output).position() : 0; - for (int i = 0; i != threads; ++i) { - int offset = offsets.get(i); - int length = lengthes.get(i); - exes.add(() -> { - try { - latch.await(); - if (input instanceof ByteBuffer && output instanceof byte[]) { - ByteBufferUtils.copyFromBufferToArray((byte[]) output, (ByteBuffer) input, offset, - offset, length); - } - if (input instanceof byte[] && output instanceof ByteBuffer) { - ByteBufferUtils.copyFromArrayToBuffer((ByteBuffer) output, offset, (byte[]) input, - offset, length); - } - if (input instanceof ByteBuffer && output instanceof ByteBuffer) { - ByteBufferUtils.copyFromBufferToBuffer((ByteBuffer) input, (ByteBuffer) output, offset, - offset, length); - } - } catch (InterruptedException ex) { - throw new RuntimeException(ex); - } - }); - } - ExecutorService service = Executors.newFixedThreadPool(threads); - exes.forEach(service::execute); - latch.countDown(); - service.shutdown(); - assertTrue(service.awaitTermination(5, TimeUnit.SECONDS)); - if (input instanceof ByteBuffer) { - assertEquals(oldInputPos, ((ByteBuffer) input).position()); - } - if (output instanceof ByteBuffer) { - assertEquals(oldOutputPos, ((ByteBuffer) output).position()); - } - String inputString = (input instanceof ByteBuffer) - ? Bytes.toString(Bytes.toBytes((ByteBuffer) input)) - : Bytes.toString((byte[]) input); - String outputString = (output instanceof ByteBuffer) - ? Bytes.toString(Bytes.toBytes((ByteBuffer) output)) - : Bytes.toString((byte[]) output); - assertEquals(inputString, outputString); - } - - @Test - public void testCopyFromSrcToDestWithThreads() throws InterruptedException { - List words = - Arrays.asList(Bytes.toBytes("with"), Bytes.toBytes("great"), Bytes.toBytes("power"), - Bytes.toBytes("comes"), Bytes.toBytes("great"), Bytes.toBytes("responsibility")); - List lengthes = words.stream().map(v -> v.length).collect(Collectors.toList()); - List offsets = new ArrayList<>(words.size()); - for (int i = 0; i != words.size(); ++i) { - offsets.add(words.subList(0, i).stream().mapToInt(v -> v.length).sum()); - } - - int totalSize = words.stream().mapToInt(v -> v.length).sum(); - byte[] fullContent = new byte[totalSize]; - int offset = 0; - for (byte[] w : words) { - offset = Bytes.putBytes(fullContent, offset, w, 0, w.length); - } - - // test copyFromBufferToArray - for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - words.forEach(input::put); - byte[] output = new byte[totalSize]; - testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); - } - - // test copyFromArrayToBuffer - for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - byte[] input = fullContent; - testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); - } - - // test copyFromBufferToBuffer - for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - words.forEach(input::put); - for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); - } - } - } - - @Test - public void testCopyFromBufferToArray() { - ByteBuffer buffer = ByteBuffer.allocate(15); - buffer.put((byte) -1); - long l = 988L; - int i = 135; - short s = 7; - buffer.putShort(s); - buffer.putInt(i); - buffer.putLong(l); - byte[] b = new byte[15]; - ByteBufferUtils.copyFromBufferToArray(b, buffer, 1, 1, 14); - assertEquals(s, Bytes.toShort(b, 1)); - assertEquals(i, Bytes.toInt(b, 3)); - assertEquals(l, Bytes.toLong(b, 7)); - } - - @Test - public void testRelativeCopyFromBuffertoBuffer() { - ByteBuffer bb1 = ByteBuffer.allocate(135); - ByteBuffer bb2 = ByteBuffer.allocate(135); - fillBB(bb1, (byte) 5); - ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); - assertTrue(bb1.position() == bb2.position()); - assertTrue(bb1.limit() == bb2.limit()); - bb1 = ByteBuffer.allocateDirect(135); - bb2 = ByteBuffer.allocateDirect(135); - fillBB(bb1, (byte) 5); - ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); - assertTrue(bb1.position() == bb2.position()); - assertTrue(bb1.limit() == bb2.limit()); - } - - @Test - public void testCompareTo() { - ByteBuffer bb1 = ByteBuffer.allocate(135); - ByteBuffer bb2 = ByteBuffer.allocate(135); - byte[] b = new byte[71]; - fillBB(bb1, (byte) 5); - fillBB(bb2, (byte) 5); - fillArray(b, (byte) 5); - assertEquals(0, ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), b, 0, b.length) > 0); - bb2.put(134, (byte) 6); - assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) < 0); - bb2.put(6, (byte) 4); - assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) > 0); - // Assert reverse comparing BB and bytearray works. - ByteBuffer bb3 = ByteBuffer.allocate(135); - fillBB(bb3, (byte) 0); - byte[] b3 = new byte[135]; - fillArray(b3, (byte) 1); - int result = ByteBufferUtils.compareTo(b3, 0, b3.length, bb3, 0, bb3.remaining()); - assertTrue(result > 0); - result = ByteBufferUtils.compareTo(bb3, 0, bb3.remaining(), b3, 0, b3.length); - assertTrue(result < 0); - byte[] b4 = Bytes.toBytes("123"); - ByteBuffer bb4 = ByteBuffer.allocate(10 + b4.length); - for (int i = 10; i < bb4.capacity(); ++i) { - bb4.put(i, b4[i - 10]); - } - result = ByteBufferUtils.compareTo(b4, 0, b4.length, bb4, 10, b4.length); - assertEquals(0, result); - } - - @Test - public void testEquals() { - byte[] a = Bytes.toBytes("http://A"); - ByteBuffer bb = ByteBuffer.wrap(a); - - assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, - HConstants.EMPTY_BYTE_BUFFER, 0, 0)); - - assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, bb, 0, a.length)); - - assertFalse(ByteBufferUtils.equals(bb, 0, 0, HConstants.EMPTY_BYTE_BUFFER, 0, a.length)); - - assertTrue(ByteBufferUtils.equals(bb, 0, a.length, bb, 0, a.length)); - - assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, - HConstants.EMPTY_BYTE_ARRAY, 0, 0)); - - assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, a, 0, a.length)); - - assertFalse(ByteBufferUtils.equals(bb, 0, a.length, HConstants.EMPTY_BYTE_ARRAY, 0, 0)); - - assertTrue(ByteBufferUtils.equals(bb, 0, a.length, a, 0, a.length)); - } - - @Test - public void testFindCommonPrefix() { - ByteBuffer bb1 = ByteBuffer.allocate(135); - ByteBuffer bb2 = ByteBuffer.allocate(135); - ByteBuffer bb3 = ByteBuffer.allocateDirect(135); - byte[] b = new byte[71]; - - fillBB(bb1, (byte) 5); - fillBB(bb2, (byte) 5); - fillBB(bb3, (byte) 5); - fillArray(b, (byte) 5); - - assertEquals(135, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - assertEquals(71, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); - assertEquals(135, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb3, 0, bb3.remaining())); - assertEquals(71, ByteBufferUtils.findCommonPrefix(bb3, 0, bb3.remaining(), b, 0, b.length)); - - b[13] = 9; - assertEquals(13, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); - - bb2.put(134, (byte) 6); - assertEquals(134, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - - bb2.put(6, (byte) 4); - assertEquals(6, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - } - - private static void fillBB(ByteBuffer bb, byte b) { - for (int i = bb.position(); i < bb.limit(); i++) { - bb.put(i, b); - } - } +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; - private static void fillArray(byte[] bb, byte b) { - for (int i = 0; i < bb.length; i++) { - bb[i] = b; - } - } +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestByteBufferUtils extends ByteBufferUtilsTestBase { } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtilsWoUnsafe.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtilsWoUnsafe.java new file mode 100644 index 000000000000..c02db2142c1c --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtilsWoUnsafe.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mockStatic; + +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.mockito.MockedStatic; + +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestByteBufferUtilsWoUnsafe extends ByteBufferUtilsTestBase { + + @BeforeAll + public static void disableUnsafe() { + try (MockedStatic mocked = mockStatic(HBasePlatformDependent.class)) { + mocked.when(HBasePlatformDependent::isUnsafeAvailable).thenReturn(false); + mocked.when(HBasePlatformDependent::unaligned).thenReturn(false); + assertFalse(ByteBufferUtils.UNSAFE_AVAIL); + assertFalse(ByteBufferUtils.UNSAFE_UNALIGNED); + } + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index b74348959982..0122e91d7ea9 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -17,615 +17,12 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.math.BigDecimal; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Random; -import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; -import org.apache.hadoop.io.WritableUtils; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ MiscTests.class, MediumTests.class }) -public class TestBytes { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBytes.class); - - private static void setUnsafe(boolean value) throws Exception { - Field field = Bytes.class.getDeclaredField("UNSAFE_UNALIGNED"); - field.setAccessible(true); - - Field modifiersField = ReflectionUtils.getModifiersField(); - modifiersField.setAccessible(true); - int oldModifiers = field.getModifiers(); - modifiersField.setInt(field, oldModifiers & ~Modifier.FINAL); - try { - field.set(null, value); - } finally { - modifiersField.setInt(field, oldModifiers); - } - assertEquals(Bytes.UNSAFE_UNALIGNED, value); - } - - @Test - public void testShort() throws Exception { - testShort(false); - } - - @Test - public void testShortUnsafe() throws Exception { - testShort(true); - } - - private static void testShort(boolean unsafe) throws Exception { - setUnsafe(unsafe); - try { - for (short n : Arrays.asList(Short.MIN_VALUE, (short) -100, (short) -1, (short) 0, (short) 1, - (short) 300, Short.MAX_VALUE)) { - byte[] bytes = Bytes.toBytes(n); - assertEquals(Bytes.toShort(bytes, 0, bytes.length), n); - } - } finally { - setUnsafe(HBasePlatformDependent.unaligned()); - } - } - - @Test - public void testNullHashCode() { - byte[] b = null; - Exception ee = null; - try { - Bytes.hashCode(b); - } catch (Exception e) { - ee = e; - } - assertNotNull(ee); - } - - @Test - public void testAdd() { - byte[] a = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - byte[] b = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - byte[] c = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }; - byte[] result1 = Bytes.add(a, b, c); - byte[] result2 = Bytes.add(new byte[][] { a, b, c }); - assertEquals(0, Bytes.compareTo(result1, result2)); - } - - @Test - public void testSplit() { - byte[] lowest = Bytes.toBytes("AAA"); - byte[] middle = Bytes.toBytes("CCC"); - byte[] highest = Bytes.toBytes("EEE"); - byte[][] parts = Bytes.split(lowest, highest, 1); - for (byte[] bytes : parts) { - System.out.println(Bytes.toString(bytes)); - } - assertEquals(3, parts.length); - assertTrue(Bytes.equals(parts[1], middle)); - // Now divide into three parts. Change highest so split is even. - highest = Bytes.toBytes("DDD"); - parts = Bytes.split(lowest, highest, 2); - for (byte[] part : parts) { - System.out.println(Bytes.toString(part)); - } - assertEquals(4, parts.length); - // Assert that 3rd part is 'CCC'. - assertTrue(Bytes.equals(parts[2], middle)); - } - - @Test - public void testSplit2() { - // More split tests. - byte[] lowest = Bytes.toBytes("http://A"); - byte[] highest = Bytes.toBytes("http://z"); - byte[] middle = Bytes.toBytes("http://]"); - byte[][] parts = Bytes.split(lowest, highest, 1); - for (byte[] part : parts) { - System.out.println(Bytes.toString(part)); - } - assertEquals(3, parts.length); - assertTrue(Bytes.equals(parts[1], middle)); - } - - @Test - public void testSplit3() { - // Test invalid split cases - byte[] low = { 1, 1, 1 }; - byte[] high = { 1, 1, 3 }; - - // If swapped, should throw IAE - try { - Bytes.split(high, low, 1); - fail("Should not be able to split if low > high"); - } catch (IllegalArgumentException iae) { - // Correct - } - - // Single split should work - byte[][] parts = Bytes.split(low, high, 1); - for (int i = 0; i < parts.length; i++) { - System.out.println("" + i + " -> " + Bytes.toStringBinary(parts[i])); - } - assertEquals("Returned split should have 3 parts but has " + parts.length, 3, parts.length); - - // If split more than once, use additional byte to split - parts = Bytes.split(low, high, 2); - assertNotNull("Split with an additional byte", parts); - assertEquals(parts.length, low.length + 1); - - // Split 0 times should throw IAE - try { - Bytes.split(low, high, 0); - fail("Should not be able to split 0 times"); - } catch (IllegalArgumentException iae) { - // Correct - } - } - - @Test - public void testToInt() { - int[] ints = { -1, 123, Integer.MIN_VALUE, Integer.MAX_VALUE }; - for (int anInt : ints) { - byte[] b = Bytes.toBytes(anInt); - assertEquals(anInt, Bytes.toInt(b)); - byte[] b2 = bytesWithOffset(b); - assertEquals(anInt, Bytes.toInt(b2, 1)); - assertEquals(anInt, Bytes.toInt(b2, 1, Bytes.SIZEOF_INT)); - } - } - - @Test - public void testToLong() { - long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; - for (long aLong : longs) { - byte[] b = Bytes.toBytes(aLong); - assertEquals(aLong, Bytes.toLong(b)); - byte[] b2 = bytesWithOffset(b); - assertEquals(aLong, Bytes.toLong(b2, 1)); - assertEquals(aLong, Bytes.toLong(b2, 1, Bytes.SIZEOF_LONG)); - } - } - - @Test - public void testToFloat() { - float[] floats = { -1f, 123.123f, Float.MAX_VALUE }; - for (float aFloat : floats) { - byte[] b = Bytes.toBytes(aFloat); - assertEquals(aFloat, Bytes.toFloat(b), 0.0f); - byte[] b2 = bytesWithOffset(b); - assertEquals(aFloat, Bytes.toFloat(b2, 1), 0.0f); - } - } - - @Test - public void testToDouble() { - double[] doubles = { Double.MIN_VALUE, Double.MAX_VALUE }; - for (double aDouble : doubles) { - byte[] b = Bytes.toBytes(aDouble); - assertEquals(aDouble, Bytes.toDouble(b), 0.0); - byte[] b2 = bytesWithOffset(b); - assertEquals(aDouble, Bytes.toDouble(b2, 1), 0.0); - } - } - - @Test - public void testToBigDecimal() { - BigDecimal[] decimals = - { new BigDecimal("-1"), new BigDecimal("123.123"), new BigDecimal("123123123123") }; - for (BigDecimal decimal : decimals) { - byte[] b = Bytes.toBytes(decimal); - assertEquals(decimal, Bytes.toBigDecimal(b)); - byte[] b2 = bytesWithOffset(b); - assertEquals(decimal, Bytes.toBigDecimal(b2, 1, b.length)); - } - } - - private byte[] bytesWithOffset(byte[] src) { - // add one byte in front to test offset - byte[] result = new byte[src.length + 1]; - result[0] = (byte) 0xAA; - System.arraycopy(src, 0, result, 1, src.length); - return result; - } - - @Test - public void testToBytesForByteBuffer() { - byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; - ByteBuffer target = ByteBuffer.wrap(array); - target.position(2); - target.limit(7); - - byte[] actual = Bytes.toBytes(target); - byte[] expected = { 0, 1, 2, 3, 4, 5, 6 }; - assertArrayEquals(expected, actual); - assertEquals(2, target.position()); - assertEquals(7, target.limit()); - - ByteBuffer target2 = target.slice(); - assertEquals(0, target2.position()); - assertEquals(5, target2.limit()); - - byte[] actual2 = Bytes.toBytes(target2); - byte[] expected2 = { 2, 3, 4, 5, 6 }; - assertArrayEquals(expected2, actual2); - assertEquals(0, target2.position()); - assertEquals(5, target2.limit()); - } - - @Test - public void testGetBytesForByteBuffer() { - byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; - ByteBuffer target = ByteBuffer.wrap(array); - target.position(2); - target.limit(7); - - byte[] actual = Bytes.getBytes(target); - byte[] expected = { 2, 3, 4, 5, 6 }; - assertArrayEquals(expected, actual); - assertEquals(2, target.position()); - assertEquals(7, target.limit()); - } - - @Test - public void testReadAsVLong() throws Exception { - long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; - for (long aLong : longs) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream output = new DataOutputStream(baos); - WritableUtils.writeVLong(output, aLong); - byte[] long_bytes_no_offset = baos.toByteArray(); - assertEquals(aLong, Bytes.readAsVLong(long_bytes_no_offset, 0)); - byte[] long_bytes_with_offset = bytesWithOffset(long_bytes_no_offset); - assertEquals(aLong, Bytes.readAsVLong(long_bytes_with_offset, 1)); - } - } - - @Test - public void testToStringBinaryForBytes() { - byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; - String actual = Bytes.toStringBinary(array); - String expected = "09azAZ@\\x01"; - assertEquals(expected, actual); - - String actual2 = Bytes.toStringBinary(array, 2, 3); - String expected2 = "azA"; - assertEquals(expected2, actual2); - } - - @Test - public void testToStringBinaryForArrayBasedByteBuffer() { - byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; - ByteBuffer target = ByteBuffer.wrap(array); - String actual = Bytes.toStringBinary(target); - String expected = "09azAZ@\\x01"; - assertEquals(expected, actual); - } - - @Test - public void testToStringBinaryForReadOnlyByteBuffer() { - byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; - ByteBuffer target = ByteBuffer.wrap(array).asReadOnlyBuffer(); - String actual = Bytes.toStringBinary(target); - String expected = "09azAZ@\\x01"; - assertEquals(expected, actual); - } - - @Test - public void testBinarySearch() { - byte[][] arr = { { 1 }, { 3 }, { 5 }, { 7 }, { 9 }, { 11 }, { 13 }, { 15 }, }; - byte[] key1 = { 3, 1 }; - byte[] key2 = { 4, 9 }; - byte[] key2_2 = { 4 }; - byte[] key3 = { 5, 11 }; - byte[] key4 = { 0 }; - byte[] key5 = { 2 }; - - assertEquals(1, Bytes.binarySearch(arr, key1, 0, 1)); - assertEquals(0, Bytes.binarySearch(arr, key1, 1, 1)); - assertEquals(-(2 + 1), Arrays.binarySearch(arr, key2_2, Bytes.BYTES_COMPARATOR)); - assertEquals(-(2 + 1), Bytes.binarySearch(arr, key2, 0, 1)); - assertEquals(4, Bytes.binarySearch(arr, key2, 1, 1)); - assertEquals(2, Bytes.binarySearch(arr, key3, 0, 1)); - assertEquals(5, Bytes.binarySearch(arr, key3, 1, 1)); - assertEquals(-1, Bytes.binarySearch(arr, key4, 0, 1)); - assertEquals(-2, Bytes.binarySearch(arr, key5, 0, 1)); - - // Search for values to the left and to the right of each item in the array. - for (int i = 0; i < arr.length; ++i) { - assertEquals(-(i + 1), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] - 1) }, 0, 1)); - assertEquals(-(i + 2), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] + 1) }, 0, 1)); - } - } - - @Test - public void testToStringBytesBinaryReversible() { - byte[] randomBytes = new byte[1000]; - for (int i = 0; i < 1000; i++) { - Bytes.random(randomBytes); - verifyReversibleForBytes(randomBytes); - } - // some specific cases - verifyReversibleForBytes(new byte[] {}); - verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D' }); - verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D', '\\' }); - } - - private void verifyReversibleForBytes(byte[] originalBytes) { - String convertedString = Bytes.toStringBinary(originalBytes); - byte[] convertedBytes = Bytes.toBytesBinary(convertedString); - if (Bytes.compareTo(originalBytes, convertedBytes) != 0) { - fail("Not reversible for\nbyte[]: " + Arrays.toString(originalBytes) + ",\nStringBinary: " - + convertedString); - } - } - - @Test - public void testStartsWith() { - assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("h"))); - assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes(""))); - assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("hello"))); - assertFalse(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("helloworld"))); - assertFalse(Bytes.startsWith(Bytes.toBytes(""), Bytes.toBytes("hello"))); - } - - @Test - public void testIncrementBytes() { - assertTrue(checkTestIncrementBytes(10, 1)); - assertTrue(checkTestIncrementBytes(12, 123435445)); - assertTrue(checkTestIncrementBytes(124634654, 1)); - assertTrue(checkTestIncrementBytes(10005460, 5005645)); - assertTrue(checkTestIncrementBytes(1, -1)); - assertTrue(checkTestIncrementBytes(10, -1)); - assertTrue(checkTestIncrementBytes(10, -5)); - assertTrue(checkTestIncrementBytes(1005435000, -5)); - assertTrue(checkTestIncrementBytes(10, -43657655)); - assertTrue(checkTestIncrementBytes(-1, 1)); - assertTrue(checkTestIncrementBytes(-26, 5034520)); - assertTrue(checkTestIncrementBytes(-10657200, 5)); - assertTrue(checkTestIncrementBytes(-12343250, 45376475)); - assertTrue(checkTestIncrementBytes(-10, -5)); - assertTrue(checkTestIncrementBytes(-12343250, -5)); - assertTrue(checkTestIncrementBytes(-12, -34565445)); - assertTrue(checkTestIncrementBytes(-1546543452, -34565445)); - } - - private static boolean checkTestIncrementBytes(long val, long amount) { - byte[] value = Bytes.toBytes(val); - byte[] testValue = { -1, -1, -1, -1, -1, -1, -1, -1 }; - if (value[0] > 0) { - testValue = new byte[Bytes.SIZEOF_LONG]; - } - System.arraycopy(value, 0, testValue, testValue.length - value.length, value.length); - - long incrementResult = Bytes.toLong(Bytes.incrementBytes(value, amount)); - - return (Bytes.toLong(testValue) + amount) == incrementResult; - } - - @Test - public void testFixedSizeString() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - Bytes.writeStringFixedSize(dos, "Hello", 5); - Bytes.writeStringFixedSize(dos, "World", 18); - Bytes.writeStringFixedSize(dos, "", 9); - - try { - // Use a long dash which is three bytes in UTF-8. If encoding happens - // using ISO-8859-1, this will fail. - Bytes.writeStringFixedSize(dos, "Too\u2013Long", 9); - fail("Exception expected"); - } catch (IOException ex) { - assertEquals( - "Trying to write 10 bytes (Too\\xE2\\x80\\x93Long) into a field of " + "length 9", - ex.getMessage()); - } - - ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); - DataInputStream dis = new DataInputStream(bais); - assertEquals("Hello", Bytes.readStringFixedSize(dis, 5)); - assertEquals("World", Bytes.readStringFixedSize(dis, 18)); - assertEquals("", Bytes.readStringFixedSize(dis, 9)); - } - - @Test - public void testCopy() { - byte[] bytes = Bytes.toBytes("ABCDEFGHIJKLMNOPQRSTUVWXYZ"); - byte[] copy = Bytes.copy(bytes); - assertNotSame(bytes, copy); - assertTrue(Bytes.equals(bytes, copy)); - } - - @Test - public void testToBytesBinaryTrailingBackslashes() { - try { - Bytes.toBytesBinary("abc\\x00\\x01\\"); - } catch (StringIndexOutOfBoundsException ex) { - fail("Illegal string access: " + ex.getMessage()); - } - } - - @Test - public void testToStringBinary_toBytesBinary_Reversable() { - String bytes = Bytes.toStringBinary(Bytes.toBytes(2.17)); - assertEquals(2.17, Bytes.toDouble(Bytes.toBytesBinary(bytes)), 0); - } - - @Test - public void testUnsignedBinarySearch() { - byte[] bytes = new byte[] { 0, 5, 123, 127, -128, -100, -1 }; - Assert.assertEquals(1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 5)); - Assert.assertEquals(3, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 127)); - Assert.assertEquals(4, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -128)); - Assert.assertEquals(5, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -100)); - Assert.assertEquals(6, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -1)); - Assert.assertEquals(-1 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 2)); - Assert.assertEquals(-6 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -5)); - } - - @Test - public void testUnsignedIncrement() { - byte[] a = Bytes.toBytes(0); - int a2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(a), 0); - Assert.assertEquals(1, a2); - - byte[] b = Bytes.toBytes(-1); - byte[] actual = Bytes.unsignedCopyAndIncrement(b); - Assert.assertNotSame(b, actual); - byte[] expected = new byte[] { 1, 0, 0, 0, 0 }; - assertArrayEquals(expected, actual); - - byte[] c = Bytes.toBytes(255);// should wrap to the next significant byte - int c2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(c), 0); - Assert.assertEquals(256, c2); - } - - @Test - public void testIndexOf() { - byte[] array = Bytes.toBytes("hello"); - assertEquals(1, Bytes.indexOf(array, (byte) 'e')); - assertEquals(4, Bytes.indexOf(array, (byte) 'o')); - assertEquals(-1, Bytes.indexOf(array, (byte) 'a')); - assertEquals(0, Bytes.indexOf(array, Bytes.toBytes("hel"))); - assertEquals(2, Bytes.indexOf(array, Bytes.toBytes("ll"))); - assertEquals(-1, Bytes.indexOf(array, Bytes.toBytes("hll"))); - } - - @Test - public void testContains() { - byte[] array = Bytes.toBytes("hello world"); - assertTrue(Bytes.contains(array, (byte) 'e')); - assertTrue(Bytes.contains(array, (byte) 'd')); - assertFalse(Bytes.contains(array, (byte) 'a')); - assertTrue(Bytes.contains(array, Bytes.toBytes("world"))); - assertTrue(Bytes.contains(array, Bytes.toBytes("ello"))); - assertFalse(Bytes.contains(array, Bytes.toBytes("owo"))); - } - - @Test - public void testZero() { - byte[] array = Bytes.toBytes("hello"); - Bytes.zero(array); - for (byte b : array) { - assertEquals(0, b); - } - array = Bytes.toBytes("hello world"); - Bytes.zero(array, 2, 7); - assertFalse(array[0] == 0); - assertFalse(array[1] == 0); - for (int i = 2; i < 9; i++) { - assertEquals(0, array[i]); - } - for (int i = 9; i < array.length; i++) { - assertFalse(array[i] == 0); - } - } - - @Test - public void testPutBuffer() { - byte[] b = new byte[100]; - for (byte i = 0; i < 100; i++) { - Bytes.putByteBuffer(b, i, ByteBuffer.wrap(new byte[] { i })); - } - for (byte i = 0; i < 100; i++) { - Assert.assertEquals(i, b[i]); - } - } - - @Test - public void testToFromHex() { - List testStrings = new ArrayList<>(8); - testStrings.addAll(Arrays.asList("", "00", "A0", "ff", "FFffFFFFFFFFFF", "12", - "0123456789abcdef", "283462839463924623984692834692346ABCDFEDDCA0")); - for (String testString : testStrings) { - byte[] byteData = Bytes.fromHex(testString); - Assert.assertEquals(testString.length() / 2, byteData.length); - String result = Bytes.toHex(byteData); - Assert.assertTrue(testString.equalsIgnoreCase(result)); - } - - List testByteData = new ArrayList<>(5); - testByteData.addAll(Arrays.asList(new byte[0], new byte[1], new byte[10], - new byte[] { 1, 2, 3, 4, 5 }, new byte[] { (byte) 0xFF })); - Random rand = ThreadLocalRandom.current(); - for (int i = 0; i < 20; i++) { - byte[] bytes = new byte[rand.nextInt(100)]; - Bytes.random(bytes); - testByteData.add(bytes); - } - - for (byte[] testData : testByteData) { - String hexString = Bytes.toHex(testData); - Assert.assertEquals(testData.length * 2, hexString.length()); - byte[] result = Bytes.fromHex(hexString); - assertArrayEquals(testData, result); - } - } - - @Test - public void testFindCommonPrefix() throws Exception { - testFindCommonPrefix(false); - } - - @Test - public void testFindCommonPrefixUnsafe() throws Exception { - testFindCommonPrefix(true); - } - - private static void testFindCommonPrefix(boolean unsafe) throws Exception { - setUnsafe(unsafe); - try { - // tests for common prefixes less than 8 bytes in length (i.e. using non-vectorized path) - byte[] hello = Bytes.toBytes("hello"); - byte[] helloWorld = Bytes.toBytes("helloworld"); - - assertEquals(5, - Bytes.findCommonPrefix(hello, helloWorld, hello.length, helloWorld.length, 0, 0)); - assertEquals(5, Bytes.findCommonPrefix(hello, hello, hello.length, hello.length, 0, 0)); - assertEquals(3, - Bytes.findCommonPrefix(hello, hello, hello.length - 2, hello.length - 2, 2, 2)); - assertEquals(0, Bytes.findCommonPrefix(hello, hello, 0, 0, 0, 0)); - - // tests for common prefixes greater than 8 bytes in length which may use the vectorized path - byte[] hellohello = Bytes.toBytes("hellohello"); - byte[] hellohellohi = Bytes.toBytes("hellohellohi"); +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; - assertEquals(10, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, - hellohellohi.length, 0, 0)); - assertEquals(10, Bytes.findCommonPrefix(hellohellohi, hellohello, hellohellohi.length, - hellohello.length, 0, 0)); - assertEquals(10, - Bytes.findCommonPrefix(hellohello, hellohello, hellohello.length, hellohello.length, 0, 0)); +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestBytes extends BytesTestBase { - hellohello[2] = 0; - assertEquals(2, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, - hellohellohi.length, 0, 0)); - } finally { - setUnsafe(HBasePlatformDependent.unaligned()); - } - } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytesWoUnsafe.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytesWoUnsafe.java new file mode 100644 index 000000000000..8aacab4b8514 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytesWoUnsafe.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mockStatic; + +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.mockito.MockedStatic; + +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestBytesWoUnsafe extends BytesTestBase { + + @BeforeAll + public static void disableUnsafe() { + try (MockedStatic mocked = mockStatic(HBasePlatformDependent.class)) { + mocked.when(HBasePlatformDependent::unaligned).thenReturn(false); + assertFalse(Bytes.UNSAFE_UNALIGNED); + } + } +} diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 1abee9db3ba0..2ddcf0415e66 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -305,6 +305,11 @@ mockito-core test + + org.mockito + mockito-inline + test + org.slf4j jcl-over-slf4j diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtil.java index e9f060be65ea..dee082ab08fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtil.java @@ -21,13 +21,9 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import java.io.File; import java.util.List; -import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -48,9 +44,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -412,38 +405,8 @@ public void testTestDir() throws Exception { assertTrue(hbt.cleanupTestDir()); } - @Test - public void testResolvePortConflict() throws Exception { - // raises port conflict between 1st call and 2nd call of randomPort() by mocking Random object - Random random = mock(Random.class); - when(random.nextInt(anyInt())).thenAnswer(new Answer() { - int[] numbers = { 1, 1, 2 }; - int count = 0; - - @Override - public Integer answer(InvocationOnMock invocation) { - int ret = numbers[count]; - count++; - return ret; - } - }); - - HBaseTestingUtil.PortAllocator.AvailablePortChecker portChecker = - mock(HBaseTestingUtil.PortAllocator.AvailablePortChecker.class); - when(portChecker.available(anyInt())).thenReturn(true); - - HBaseTestingUtil.PortAllocator portAllocator = - new HBaseTestingUtil.PortAllocator(random, portChecker); - - int port1 = portAllocator.randomFreePort(); - int port2 = portAllocator.randomFreePort(); - assertNotEquals(port1, port2); - Mockito.verify(random, Mockito.times(3)).nextInt(anyInt()); - } - @Test public void testOverridingOfDefaultPorts() throws Exception { - // confirm that default port properties being overridden to random Configuration defaultConfig = HBaseConfiguration.create(); defaultConfig.setInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPortAllocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPortAllocator.java new file mode 100644 index 000000000000..e92b08938fa6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPortAllocator.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Random; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestPortAllocator { + + @Test + public void testResolvePortConflict() throws Exception { + // raises port conflict between 1st call and 2nd call of randomPort() by mocking Random object + Random random = mock(Random.class); + when(random.nextInt(anyInt())).thenAnswer(new Answer() { + int[] numbers = { 1, 1, 2 }; + int count = 0; + + @Override + public Integer answer(InvocationOnMock invocation) { + int ret = numbers[count]; + count++; + return ret; + } + }); + + HBaseTestingUtil.PortAllocator.AvailablePortChecker portChecker = + mock(HBaseTestingUtil.PortAllocator.AvailablePortChecker.class); + when(portChecker.available(anyInt())).thenReturn(true); + + HBaseTestingUtil.PortAllocator portAllocator = + new HBaseTestingUtil.PortAllocator(random, portChecker); + + int port1 = portAllocator.randomFreePort(); + int port2 = portAllocator.randomFreePort(); + assertNotEquals(port1, port2); + verify(random, Mockito.times(3)).nextInt(anyInt()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSide3TestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSide3TestBase.java new file mode 100644 index 000000000000..518fe56bac49 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSide3TestBase.java @@ -0,0 +1,1188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos; + +public class FromClientSide3TestBase { + + private static final Logger LOG = LoggerFactory.getLogger(FromClientSide3TestBase.class); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + private static int WAITTABLE_MILLIS; + private static byte[] FAMILY; + private static int SLAVES; + private static byte[] ROW; + private static byte[] ANOTHERROW; + private static byte[] QUALIFIER; + private static byte[] VALUE; + private static byte[] COL_QUAL; + private static byte[] VAL_BYTES; + private static byte[] ROW_BYTES; + + private TableName tableName; + + protected static void startCluster() throws Exception { + WAITTABLE_MILLIS = 10000; + FAMILY = Bytes.toBytes("testFamily"); + SLAVES = 3; + ROW = Bytes.toBytes("testRow"); + ANOTHERROW = Bytes.toBytes("anotherrow"); + QUALIFIER = Bytes.toBytes("testQualifier"); + VALUE = Bytes.toBytes("testValue"); + COL_QUAL = Bytes.toBytes("f1"); + VAL_BYTES = Bytes.toBytes("v1"); + ROW_BYTES = Bytes.toBytes("r1"); + TEST_UTIL.startMiniCluster(SLAVES); + } + + @AfterAll + public static void shutdownCluster() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @BeforeEach + public void setUp(TestInfo testInfo) throws Exception { + tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); + } + + @AfterEach + public void tearDown() throws Exception { + for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + TEST_UTIL.deleteTable(htd.getTableName()); + } + } + + private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts) throws Exception { + Put put = new Put(row); + Random rand = ThreadLocalRandom.current(); + for (int i = 0; i < nPuts; i++) { + byte[] qualifier = Bytes.toBytes(rand.nextInt()); + byte[] value = Bytes.toBytes(rand.nextInt()); + put.addColumn(family, qualifier, value); + } + table.put(put); + } + + private void performMultiplePutAndFlush(Admin admin, Table table, byte[] row, byte[] family, + int nFlushes, int nPuts) throws Exception { + for (int i = 0; i < nFlushes; i++) { + randomCFPuts(table, row, family, nPuts); + admin.flush(table.getName()); + } + } + + private static List toList(ResultScanner scanner) { + try { + List cells = new ArrayList<>(); + for (Result r : scanner) { + cells.addAll(r.listCells()); + } + return cells; + } finally { + scanner.close(); + } + } + + @Test + public void testScanAfterDeletingSpecifiedRow() throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + byte[] row = Bytes.toBytes("SpecifiedRow"); + byte[] value0 = Bytes.toBytes("value_0"); + byte[] value1 = Bytes.toBytes("value_1"); + Put put = new Put(row); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + Delete d = new Delete(row); + table.delete(d); + put = new Put(row); + put.addColumn(FAMILY, null, value0); + table.put(put); + put = new Put(row); + put.addColumn(FAMILY, null, value1); + table.put(put); + List cells = toList(table.getScanner(new Scan())); + assertEquals(1, cells.size()); + assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); + + cells = toList(table.getScanner(new Scan().addFamily(FAMILY))); + assertEquals(1, cells.size()); + assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); + + cells = toList(table.getScanner(new Scan().addColumn(FAMILY, QUALIFIER))); + assertEquals(0, cells.size()); + + TEST_UTIL.getAdmin().flush(tableName); + cells = toList(table.getScanner(new Scan())); + assertEquals(1, cells.size()); + assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); + + cells = toList(table.getScanner(new Scan().addFamily(FAMILY))); + assertEquals(1, cells.size()); + assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); + + cells = toList(table.getScanner(new Scan().addColumn(FAMILY, QUALIFIER))); + assertEquals(0, cells.size()); + } + } + + @Test + public void testScanAfterDeletingSpecifiedRowV2() throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + byte[] row = Bytes.toBytes("SpecifiedRow"); + byte[] qual0 = Bytes.toBytes("qual0"); + byte[] qual1 = Bytes.toBytes("qual1"); + long now = EnvironmentEdgeManager.currentTime(); + Delete d = new Delete(row, now); + table.delete(d); + + Put put = new Put(row); + put.addColumn(FAMILY, null, now + 1, VALUE); + table.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qual1, now + 2, qual1); + table.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qual0, now + 3, qual0); + table.put(put); + + Result r = table.get(new Get(row)); + assertEquals(3, r.size(), r.toString()); + assertEquals("testValue", Bytes.toString(CellUtil.cloneValue(r.rawCells()[0]))); + assertEquals("qual0", Bytes.toString(CellUtil.cloneValue(r.rawCells()[1]))); + assertEquals("qual1", Bytes.toString(CellUtil.cloneValue(r.rawCells()[2]))); + + TEST_UTIL.getAdmin().flush(tableName); + r = table.get(new Get(row)); + assertEquals(3, r.size()); + assertEquals("testValue", Bytes.toString(CellUtil.cloneValue(r.rawCells()[0]))); + assertEquals("qual0", Bytes.toString(CellUtil.cloneValue(r.rawCells()[1]))); + assertEquals("qual1", Bytes.toString(CellUtil.cloneValue(r.rawCells()[2]))); + } + } + + private int getStoreFileCount(Admin admin, ServerName serverName, RegionInfo region) + throws IOException { + for (RegionMetrics metrics : admin.getRegionMetrics(serverName, region.getTable())) { + if (Bytes.equals(region.getRegionName(), metrics.getRegionName())) { + return metrics.getStoreFileCount(); + } + } + return 0; + } + + // override the config settings at the CF level and ensure priority + @Test + public void testAdvancedConfigOverride() throws Exception { + /* + * Overall idea: (1) create 3 store files and issue a compaction. config's compaction.min == 3, + * so should work. (2) Increase the compaction.min toggle in the HTD to 5 and modify table. If + * we use the HTD value instead of the default config value, adding 3 files and issuing a + * compaction SHOULD NOT work (3) Decrease the compaction.min toggle in the HCD to 2 and modify + * table. The CF schema should override the Table schema and now cause a minor compaction. + */ + TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3); + + try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 10)) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + Admin admin = TEST_UTIL.getAdmin(); + + // Create 3 store files. + byte[] row = Bytes.toBytes(ThreadLocalRandom.current().nextInt()); + performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 100); + + try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { + // Verify we have multiple store files. + HRegionLocation loc = locator.getRegionLocation(row, true); + assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) > 1); + + // Issue a compaction request + admin.compact(tableName); + + // poll wait for the compactions to happen + for (int i = 0; i < 10 * 1000 / 40; ++i) { + // The number of store files after compaction should be lesser. + loc = locator.getRegionLocation(row, true); + if (!loc.getRegion().isOffline()) { + if (getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) <= 1) { + break; + } + } + Thread.sleep(40); + } + // verify the compactions took place and that we didn't just time out + assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) <= 1); + + // change the compaction.min config option for this table to 5 + LOG.info("hbase.hstore.compaction.min should now be 5"); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(table.getDescriptor()) + .setValue("hbase.hstore.compaction.min", String.valueOf(5)).build(); + admin.modifyTable(htd); + LOG.info("alter status finished"); + + // Create 3 more store files. + performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 10); + + // Issue a compaction request + admin.compact(tableName); + + // This time, the compaction request should not happen + Thread.sleep(10 * 1000); + loc = locator.getRegionLocation(row, true); + int sfCount = getStoreFileCount(admin, loc.getServerName(), loc.getRegion()); + assertTrue(sfCount > 1); + + // change an individual CF's config option to 2 & online schema update + LOG.info("hbase.hstore.compaction.min should now be 2"); + htd = TableDescriptorBuilder.newBuilder(htd) + .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) + .setValue("hbase.hstore.compaction.min", String.valueOf(2)).build()) + .build(); + admin.modifyTable(htd); + LOG.info("alter status finished"); + + // Issue a compaction request + admin.compact(tableName); + + // poll wait for the compactions to happen + for (int i = 0; i < 10 * 1000 / 40; ++i) { + loc = locator.getRegionLocation(row, true); + try { + if (getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) < sfCount) { + break; + } + } catch (Exception e) { + LOG.debug("Waiting for region to come online: " + + Bytes.toStringBinary(loc.getRegion().getRegionName())); + } + Thread.sleep(40); + } + + // verify the compaction took place and that we didn't just time out + assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) < sfCount); + + // Finally, ensure that we can remove a custom config value after we made it + LOG.info("Removing CF config value"); + LOG.info("hbase.hstore.compaction.min should now be 5"); + htd = TableDescriptorBuilder.newBuilder(htd) + .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) + .setValue("hbase.hstore.compaction.min", null).build()) + .build(); + admin.modifyTable(htd); + LOG.info("alter status finished"); + assertNull(table.getDescriptor().getColumnFamily(FAMILY) + .getValue(Bytes.toBytes("hbase.hstore.compaction.min"))); + } + } + } + + @Test + public void testHTableBatchWithEmptyPut() throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + List actions = new ArrayList<>(); + Object[] results = new Object[2]; + // create an empty Put + Put put1 = new Put(ROW); + actions.add(put1); + + Put put2 = new Put(ANOTHERROW); + put2.addColumn(FAMILY, QUALIFIER, VALUE); + actions.add(put2); + + table.batch(actions, results); + fail("Empty Put should have failed the batch call"); + } catch (IllegalArgumentException iae) { + } + } + + // Test Table.batch with large amount of mutations against the same key. + // It used to trigger read lock's "Maximum lock count exceeded" Error. + @Test + public void testHTableWithLargeBatch() throws IOException, InterruptedException { + int sixtyFourK = 64 * 1024; + List actions = new ArrayList<>(); + Object[] results = new Object[(sixtyFourK + 1) * 2]; + + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + for (int i = 0; i < sixtyFourK + 1; i++) { + Put put1 = new Put(ROW); + put1.addColumn(FAMILY, QUALIFIER, VALUE); + actions.add(put1); + + Put put2 = new Put(ANOTHERROW); + put2.addColumn(FAMILY, QUALIFIER, VALUE); + actions.add(put2); + } + + table.batch(actions, results); + } + } + + @Test + public void testBatchWithRowMutation() throws Exception { + LOG.info("Starting testBatchWithRowMutation"); + byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b") }; + + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + RowMutations arm = RowMutations + .of(Collections.singletonList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE))); + Object[] batchResult = new Object[1]; + table.batch(Arrays.asList(arm), batchResult); + + Get g = new Get(ROW); + Result r = table.get(g); + assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0]))); + + arm = RowMutations.of(Arrays.asList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[1], VALUE), + new Delete(ROW).addColumns(FAMILY, QUALIFIERS[0]))); + table.batch(Arrays.asList(arm), batchResult); + r = table.get(g); + assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1]))); + assertNull(r.getValue(FAMILY, QUALIFIERS[0])); + + // Test that we get the correct remote exception for RowMutations from batch() + try { + arm = RowMutations.of(Collections.singletonList( + new Put(ROW).addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, QUALIFIERS[0], VALUE))); + table.batch(Arrays.asList(arm), batchResult); + fail("Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException"); + } catch (RetriesExhaustedException e) { + String msg = e.getMessage(); + assertTrue(msg.contains("NoSuchColumnFamilyException")); + } + } + } + + @Test + public void testBatchWithCheckAndMutate() throws Exception { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + byte[] row1 = Bytes.toBytes("row1"); + byte[] row2 = Bytes.toBytes("row2"); + byte[] row3 = Bytes.toBytes("row3"); + byte[] row4 = Bytes.toBytes("row4"); + byte[] row5 = Bytes.toBytes("row5"); + byte[] row6 = Bytes.toBytes("row6"); + byte[] row7 = Bytes.toBytes("row7"); + + table + .put(Arrays.asList(new Put(row1).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), + new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), + new Put(row6).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)), + new Put(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")))); + + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row1).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new RowMutations(row1) + .add(new Put(row1).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("g"))) + .add(new Delete(row1).addColumns(FAMILY, Bytes.toBytes("A"))) + .add(new Increment(row1).addColumn(FAMILY, Bytes.toBytes("C"), 3L)) + .add(new Append(row1).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + Get get = new Get(row2).addColumn(FAMILY, Bytes.toBytes("B")); + RowMutations mutations = + new RowMutations(row3).add(new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C"))) + .add(new Put(row3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) + .add(new Increment(row3).addColumn(FAMILY, Bytes.toBytes("A"), 5L)) + .add(new Append(row3).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row4).ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("a")) + .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("h"))); + Put put = new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("f")); + CheckAndMutate checkAndMutate3 = + CheckAndMutate.newBuilder(row6).ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)) + .build(new Increment(row6).addColumn(FAMILY, Bytes.toBytes("F"), 1)); + CheckAndMutate checkAndMutate4 = + CheckAndMutate.newBuilder(row7).ifEquals(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")) + .build(new Append(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g"))); + + List actions = Arrays.asList(checkAndMutate1, get, mutations, checkAndMutate2, put, + checkAndMutate3, checkAndMutate4); + Object[] results = new Object[actions.size()]; + table.batch(actions, results); + + CheckAndMutateResult checkAndMutateResult = (CheckAndMutateResult) results[0]; + assertTrue(checkAndMutateResult.isSuccess()); + assertEquals(3L, + Bytes.toLong(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("C")))); + assertEquals("d", + Bytes.toString(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("D")))); + + assertEquals("b", Bytes.toString(((Result) results[1]).getValue(FAMILY, Bytes.toBytes("B")))); + + Result result = (Result) results[2]; + assertTrue(result.getExists()); + assertEquals(5L, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("A")))); + assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); + + checkAndMutateResult = (CheckAndMutateResult) results[3]; + assertFalse(checkAndMutateResult.isSuccess()); + assertNull(checkAndMutateResult.getResult()); + + assertTrue(((Result) results[4]).isEmpty()); + + checkAndMutateResult = (CheckAndMutateResult) results[5]; + assertTrue(checkAndMutateResult.isSuccess()); + assertEquals(11, + Bytes.toLong(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("F")))); + + checkAndMutateResult = (CheckAndMutateResult) results[6]; + assertTrue(checkAndMutateResult.isSuccess()); + assertEquals("gg", + Bytes.toString(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("G")))); + + result = table.get(new Get(row1)); + assertEquals("g", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); + assertNull(result.getValue(FAMILY, Bytes.toBytes("A"))); + assertEquals(3L, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("C")))); + assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); + + result = table.get(new Get(row3)); + assertNull(result.getValue(FAMILY, Bytes.toBytes("C"))); + assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("F")))); + assertNull(Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("C")))); + assertEquals(5L, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("A")))); + assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); + + result = table.get(new Get(row4)); + assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); + + result = table.get(new Get(row5)); + assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("E")))); + + result = table.get(new Get(row6)); + assertEquals(11, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("F")))); + + result = table.get(new Get(row7)); + assertEquals("gg", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("G")))); + } + } + + @Test + public void testHTableExistsMethodSingleRegionSingleGet() + throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + // Test with a single region table. + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + + Get get = new Get(ROW); + + boolean exist = table.exists(get); + assertFalse(exist); + + table.put(put); + + exist = table.exists(get); + assertTrue(exist); + } + } + + @Test + public void testHTableExistsMethodSingleRegionMultipleGets() + throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + List gets = new ArrayList<>(); + gets.add(new Get(ROW)); + gets.add(new Get(ANOTHERROW)); + + boolean[] results = table.exists(gets); + assertTrue(results[0]); + assertFalse(results[1]); + } + } + + @Test + public void testHTableExistsBeforeGet() throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + Get get = new Get(ROW); + + boolean exist = table.exists(get); + assertEquals(true, exist); + + Result result = table.get(get); + assertEquals(false, result.isEmpty()); + assertTrue(Bytes.equals(VALUE, result.getValue(FAMILY, QUALIFIER))); + } + } + + @Test + public void testHTableExistsAllBeforeGet() throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + final byte[] ROW2 = Bytes.add(ROW, Bytes.toBytes("2")); + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + put = new Put(ROW2); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + Get get = new Get(ROW); + Get get2 = new Get(ROW2); + ArrayList getList = new ArrayList<>(2); + getList.add(get); + getList.add(get2); + + boolean[] exists = table.exists(getList); + assertEquals(true, exists[0]); + assertEquals(true, exists[1]); + + Result[] result = table.get(getList); + assertEquals(false, result[0].isEmpty()); + assertTrue(Bytes.equals(VALUE, result[0].getValue(FAMILY, QUALIFIER))); + assertEquals(false, result[1].isEmpty()); + assertTrue(Bytes.equals(VALUE, result[1].getValue(FAMILY, QUALIFIER))); + } + } + + @Test + public void testHTableExistsMethodMultipleRegionsSingleGet() throws Exception { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, + new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + + Get get = new Get(ROW); + + boolean exist = table.exists(get); + assertFalse(exist); + + table.put(put); + + exist = table.exists(get); + assertTrue(exist); + } + } + + @Test + public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, + new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + List gets = new ArrayList<>(); + gets.add(new Get(ANOTHERROW)); + gets.add(new Get(Bytes.add(ROW, new byte[] { 0x00 }))); + gets.add(new Get(ROW)); + gets.add(new Get(Bytes.add(ANOTHERROW, new byte[] { 0x00 }))); + + LOG.info("Calling exists"); + boolean[] results = table.exists(gets); + assertFalse(results[0]); + assertFalse(results[1]); + assertTrue(results[2]); + assertFalse(results[3]); + + // Test with the first region. + put = new Put(new byte[] { 0x00 }); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + gets = new ArrayList<>(); + gets.add(new Get(new byte[] { 0x00 })); + gets.add(new Get(new byte[] { 0x00, 0x00 })); + results = table.exists(gets); + assertTrue(results[0]); + assertFalse(results[1]); + + // Test with the last region + put = new Put(new byte[] { (byte) 0xff, (byte) 0xff }); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + gets = new ArrayList<>(); + gets.add(new Get(new byte[] { (byte) 0xff })); + gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff })); + gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff })); + results = table.exists(gets); + assertFalse(results[0]); + assertTrue(results[1]); + assertFalse(results[2]); + } + } + + @Test + public void testGetEmptyRow() throws Exception { + // Create a table and put in 1 row + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + Put put = new Put(ROW_BYTES); + put.addColumn(FAMILY, COL_QUAL, VAL_BYTES); + table.put(put); + + // Try getting the row with an empty row key + Result res = null; + try { + res = table.get(new Get(new byte[0])); + fail(); + } catch (IllegalArgumentException e) { + // Expected. + } + assertTrue(res == null); + res = table.get(new Get(Bytes.toBytes("r1-not-exist"))); + assertTrue(res.isEmpty() == true); + res = table.get(new Get(ROW_BYTES)); + assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES)); + } + } + + @Test + public void testConnectionDefaultUsesCodec() throws Exception { + try ( + RpcClient client = RpcClientFactory.createClient(TEST_UTIL.getConfiguration(), "cluster")) { + assertTrue(client.hasCellBlockSupport()); + } + } + + @Test + public void testPutWithPreBatchMutate() throws Exception { + testPreBatchMutate(tableName, () -> { + try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + t.put(put); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + }); + } + + @Test + public void testRowMutationsWithPreBatchMutate() throws Exception { + testPreBatchMutate(tableName, () -> { + try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { + RowMutations rm = new RowMutations(ROW, 1); + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + rm.add(put); + t.mutateRow(rm); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + }); + } + + private void testPreBatchMutate(TableName tableName, Runnable rn) throws Exception { + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(WaitingForScanObserver.class.getName()).build(); + TEST_UTIL.getAdmin().createTable(tableDescriptor); + // Don't use waitTableAvailable(), because the scanner will mess up the co-processor + + ExecutorService service = Executors.newFixedThreadPool(2); + service.execute(rn); + final List cells = new ArrayList<>(); + service.execute(() -> { + try { + // waiting for update. + TimeUnit.SECONDS.sleep(3); + try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { + Scan scan = new Scan(); + try (ResultScanner scanner = t.getScanner(scan)) { + for (Result r : scanner) { + cells.addAll(Arrays.asList(r.rawCells())); + } + } + } + } catch (IOException | InterruptedException ex) { + throw new RuntimeException(ex); + } + }); + service.shutdown(); + service.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); + assertEquals(0, cells.size(), "The write is blocking by RegionObserver#postBatchMutate" + + ", so the data is invisible to reader"); + TEST_UTIL.deleteTable(tableName); + } + + @Test + public void testLockLeakWithDelta() throws Exception, Throwable { + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) + .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); + TEST_UTIL.getAdmin().createTable(tableDescriptor); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + // new a connection for lower retry number. + Configuration copy = new Configuration(TEST_UTIL.getConfiguration()); + copy.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); + try (Connection con = ConnectionFactory.createConnection(copy)) { + HRegion region = (HRegion) find(tableName); + region.setTimeoutForWriteLock(10); + ExecutorService putService = Executors.newSingleThreadExecutor(); + putService.execute(() -> { + try (Table table = con.getTable(tableName)) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + // the put will be blocked by WaitingForMultiMutationsObserver. + table.put(put); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + }); + ExecutorService appendService = Executors.newSingleThreadExecutor(); + appendService.execute(() -> { + Append append = new Append(ROW); + append.addColumn(FAMILY, QUALIFIER, VALUE); + try (Table table = con.getTable(tableName)) { + table.append(append); + fail("The APPEND should fail because the target lock is blocked by previous put"); + } catch (Exception ex) { + } + }); + appendService.shutdown(); + appendService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); + WaitingForMultiMutationsObserver observer = + find(tableName, WaitingForMultiMutationsObserver.class); + observer.latch.countDown(); + putService.shutdown(); + putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); + try (Table table = con.getTable(tableName)) { + Result r = table.get(new Get(ROW)); + assertFalse(r.isEmpty()); + assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), VALUE)); + } + } + HRegion region = (HRegion) find(tableName); + int readLockCount = region.getReadLockCount(); + LOG.info("readLockCount:" + readLockCount); + assertEquals(0, readLockCount); + } + + @Test + public void testMultiRowMutations() throws Exception, Throwable { + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(MultiRowMutationEndpoint.class.getName()) + .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) + .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); + TEST_UTIL.getAdmin().createTable(tableDescriptor); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + // new a connection for lower retry number. + Configuration copy = new Configuration(TEST_UTIL.getConfiguration()); + copy.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); + try (Connection con = ConnectionFactory.createConnection(copy)) { + byte[] row = Bytes.toBytes("ROW-0"); + byte[] rowLocked = Bytes.toBytes("ROW-1"); + byte[] value0 = Bytes.toBytes("VALUE-0"); + byte[] value1 = Bytes.toBytes("VALUE-1"); + byte[] value2 = Bytes.toBytes("VALUE-2"); + assertNoLocks(tableName); + ExecutorService putService = Executors.newSingleThreadExecutor(); + putService.execute(() -> { + try (Table table = con.getTable(tableName)) { + Put put0 = new Put(rowLocked); + put0.addColumn(FAMILY, QUALIFIER, value0); + // the put will be blocked by WaitingForMultiMutationsObserver. + table.put(put0); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + }); + ExecutorService cpService = Executors.newSingleThreadExecutor(); + AtomicBoolean exceptionDuringMutateRows = new AtomicBoolean(); + cpService.execute(() -> { + Put put1 = new Put(row); + Put put2 = new Put(rowLocked); + put1.addColumn(FAMILY, QUALIFIER, value1); + put2.addColumn(FAMILY, QUALIFIER, value2); + try (Table table = con.getTable(tableName)) { + MultiRowMutationProtos.MutateRowsRequest request = + MultiRowMutationProtos.MutateRowsRequest.newBuilder() + .addMutationRequest( + ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put1)) + .addMutationRequest( + ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put2)) + .build(); + table.coprocessorService(MultiRowMutationProtos.MultiRowMutationService.class, ROW, ROW, + (MultiRowMutationProtos.MultiRowMutationService exe) -> { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback< + MultiRowMutationProtos.MutateRowsResponse> rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + exe.mutateRows(controller, request, rpcCallback); + if ( + controller.failedOnException() + && !(controller.getFailedOn() instanceof UnknownProtocolException) + ) { + exceptionDuringMutateRows.set(true); + } + return rpcCallback.get(); + }); + } catch (Throwable ex) { + LOG.error("encountered " + ex); + } + }); + cpService.shutdown(); + cpService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); + WaitingForMultiMutationsObserver observer = + find(tableName, WaitingForMultiMutationsObserver.class); + observer.latch.countDown(); + putService.shutdown(); + putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); + try (Table table = con.getTable(tableName)) { + Get g0 = new Get(row); + Get g1 = new Get(rowLocked); + Result r0 = table.get(g0); + Result r1 = table.get(g1); + assertTrue(r0.isEmpty()); + assertFalse(r1.isEmpty()); + assertTrue(Bytes.equals(r1.getValue(FAMILY, QUALIFIER), value0)); + } + assertNoLocks(tableName); + if (!exceptionDuringMutateRows.get()) { + fail("This cp should fail because the target lock is blocked by previous put"); + } + } + } + + /** + * A test case for issue HBASE-17482 After combile seqid with mvcc readpoint, seqid/mvcc is + * acquired and stamped onto cells in the append thread, a countdown latch is used to ensure that + * happened before cells can be put into memstore. But the MVCCPreAssign patch(HBASE-16698) make + * the seqid/mvcc acquirement in handler thread and stamping in append thread No countdown latch + * to assure cells in memstore are stamped with seqid/mvcc. If cells without mvcc(A.K.A mvcc=0) + * are put into memstore, then a scanner with a smaller readpoint can see these data, which + * disobey the multi version concurrency control rules. This test case is to reproduce this + * scenario. + */ + @Test + public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedException { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + // put two row first to init the scanner + Put put = new Put(Bytes.toBytes("0")); + put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes("0")); + table.put(put); + put = new Put(Bytes.toBytes("00")); + put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes("0")); + table.put(put); + Scan scan = new Scan(); + scan.setTimeRange(0, Long.MAX_VALUE); + scan.setCaching(1); + ResultScanner scanner = table.getScanner(scan); + int rowNum = scanner.next() != null ? 1 : 0; + // the started scanner shouldn't see the rows put below + for (int i = 1; i < 1000; i++) { + put = new Put(Bytes.toBytes(String.valueOf(i))); + put.setDurability(Durability.ASYNC_WAL); + put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes(i)); + table.put(put); + } + for (Result result : scanner) { + rowNum++; + } + // scanner should only see two rows + assertEquals(2, rowNum); + scanner = table.getScanner(scan); + rowNum = 0; + for (Result result : scanner) { + rowNum++; + } + // the new scanner should see all rows + assertEquals(1001, rowNum); + } + } + + @Test + public void testPutThenGetWithMultipleThreads() throws Exception { + final int THREAD_NUM = 20; + final int ROUND_NUM = 10; + for (int round = 0; round < ROUND_NUM; round++) { + ArrayList threads = new ArrayList<>(THREAD_NUM); + final AtomicInteger successCnt = new AtomicInteger(0); + try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + for (int i = 0; i < THREAD_NUM; i++) { + final int index = i; + Thread t = new Thread(new Runnable() { + + @Override + public void run() { + final byte[] row = Bytes.toBytes("row-" + index); + final byte[] value = Bytes.toBytes("v" + index); + try { + Put put = new Put(row); + put.addColumn(FAMILY, QUALIFIER, value); + ht.put(put); + Get get = new Get(row); + Result result = ht.get(get); + byte[] returnedValue = result.getValue(FAMILY, QUALIFIER); + if (Bytes.equals(value, returnedValue)) { + successCnt.getAndIncrement(); + } else { + LOG.error("Should be equal but not, original value: " + Bytes.toString(value) + + ", returned value: " + + (returnedValue == null ? "null" : Bytes.toString(returnedValue))); + } + } catch (Throwable e) { + // do nothing + } + } + }); + threads.add(t); + } + for (Thread t : threads) { + t.start(); + } + for (Thread t : threads) { + t.join(); + } + assertEquals(THREAD_NUM, successCnt.get(), "Not equal in round " + round); + } + TEST_UTIL.deleteTable(tableName); + } + } + + private static void assertNoLocks(final TableName tableName) + throws IOException, InterruptedException { + HRegion region = (HRegion) find(tableName); + assertEquals(0, region.getLockedRows().size()); + } + + private static HRegion find(final TableName tableName) throws IOException, InterruptedException { + HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName); + List regions = rs.getRegions(tableName); + assertEquals(1, regions.size()); + return regions.get(0); + } + + private static T find(final TableName tableName, Class clz) + throws IOException, InterruptedException { + HRegion region = find(tableName); + Coprocessor cp = region.getCoprocessorHost().findCoprocessor(clz.getName()); + assertTrue(clz.isInstance(cp), "The cp instance should be " + clz.getName() + + ", current instance is " + cp.getClass().getName()); + return clz.cast(cp); + } + + public static class WaitingForMultiMutationsObserver + implements RegionCoprocessor, RegionObserver { + final CountDownLatch latch = new CountDownLatch(1); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void postBatchMutate(final ObserverContext c, + final MiniBatchOperationInProgress miniBatchOp) throws IOException { + try { + latch.await(); + } catch (InterruptedException ex) { + throw new IOException(ex); + } + } + } + + public static class WaitingForScanObserver implements RegionCoprocessor, RegionObserver { + private final CountDownLatch latch = new CountDownLatch(1); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void postBatchMutate(final ObserverContext c, + final MiniBatchOperationInProgress miniBatchOp) throws IOException { + try { + // waiting for scanner + latch.await(); + } catch (InterruptedException ex) { + throw new IOException(ex); + } + } + + @Override + public RegionScanner postScannerOpen( + final ObserverContext e, final Scan scan, + final RegionScanner s) throws IOException { + latch.countDown(); + return s; + } + } + + static byte[] generateHugeValue(int size) { + Random rand = ThreadLocalRandom.current(); + byte[] value = new byte[size]; + for (int i = 0; i < value.length; i++) { + value[i] = (byte) rand.nextInt(256); + } + return value; + } + + @Test + public void testScanWithBatchSizeReturnIncompleteCells() + throws IOException, InterruptedException { + TableDescriptor hd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(3).build()) + .build(); + try (Table table = TEST_UTIL.createTable(hd, null)) { + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + + Put put = new Put(ROW); + put.addColumn(FAMILY, Bytes.toBytes(0), generateHugeValue(3 * 1024 * 1024)); + table.put(put); + + put = new Put(ROW); + put.addColumn(FAMILY, Bytes.toBytes(1), generateHugeValue(4 * 1024 * 1024)); + table.put(put); + + for (int i = 2; i < 5; i++) { + for (int version = 0; version < 2; version++) { + put = new Put(ROW); + put.addColumn(FAMILY, Bytes.toBytes(i), generateHugeValue(1024)); + table.put(put); + } + } + + Scan scan = new Scan(); + scan.withStartRow(ROW).withStopRow(ROW, true).addFamily(FAMILY).setBatch(3) + .setMaxResultSize(4 * 1024 * 1024); + Result result; + try (ResultScanner scanner = table.getScanner(scan)) { + List list = new ArrayList<>(); + /* + * The first scan rpc should return a result with 2 cells, because 3MB + 4MB > 4MB; The + * second scan rpc should return a result with 3 cells, because reach the batch limit = 3; + * The mayHaveMoreCellsInRow in last result should be false in the scan rpc. BTW, the + * moreResultsInRegion also would be false. Finally, the client should collect all the cells + * into two result: 2+3 -> 3+2; + */ + while ((result = scanner.next()) != null) { + list.add(result); + } + + assertEquals(5, list.stream().mapToInt(Result::size).sum()); + assertEquals(2, list.size()); + assertEquals(3, list.get(0).size()); + assertEquals(2, list.get(1).size()); + } + + scan = new Scan(); + scan.withStartRow(ROW).withStopRow(ROW, true).addFamily(FAMILY).setBatch(2) + .setMaxResultSize(4 * 1024 * 1024); + try (ResultScanner scanner = table.getScanner(scan)) { + List list = new ArrayList<>(); + while ((result = scanner.next()) != null) { + list.add(result); + } + assertEquals(5, list.stream().mapToInt(Result::size).sum()); + assertEquals(3, list.size()); + assertEquals(2, list.get(0).size()); + assertEquals(2, list.get(1).size()); + assertEquals(1, list.get(2).size()); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 27a08a8e9b2e..daad7ce31886 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -17,1184 +17,17 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.RegionMetrics; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -import org.apache.hadoop.hbase.ipc.RpcClient; -import org.apache.hadoop.hbase.ipc.RpcClientFactory; -import org.apache.hadoop.hbase.ipc.ServerRpcController; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; -import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos; - -@Category({ LargeTests.class, ClientTests.class }) -public class TestFromClientSide3 { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSide3.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide3.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final int WAITTABLE_MILLIS = 10000; - private static byte[] FAMILY = Bytes.toBytes("testFamily"); - private static int SLAVES = 3; - private static final byte[] ROW = Bytes.toBytes("testRow"); - private static final byte[] ANOTHERROW = Bytes.toBytes("anotherrow"); - private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier"); - private static final byte[] VALUE = Bytes.toBytes("testValue"); - private static final byte[] COL_QUAL = Bytes.toBytes("f1"); - private static final byte[] VAL_BYTES = Bytes.toBytes("v1"); - private static final byte[] ROW_BYTES = Bytes.toBytes("r1"); - - @Rule - public TestName name = new TestName(); - private TableName tableName; - - /** - * @throws java.lang.Exception - */ - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(SLAVES); - } - - /** - * @throws java.lang.Exception - */ - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Before - public void setUp() throws Exception { - tableName = TableName.valueOf(name.getMethodName()); - } - - @After - public void tearDown() throws Exception { - for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) { - LOG.info("Tear down, remove table=" + htd.getTableName()); - TEST_UTIL.deleteTable(htd.getTableName()); - } - } - - private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts) throws Exception { - Put put = new Put(row); - Random rand = ThreadLocalRandom.current(); - for (int i = 0; i < nPuts; i++) { - byte[] qualifier = Bytes.toBytes(rand.nextInt()); - byte[] value = Bytes.toBytes(rand.nextInt()); - put.addColumn(family, qualifier, value); - } - table.put(put); - } - - private void performMultiplePutAndFlush(Admin admin, Table table, byte[] row, byte[] family, - int nFlushes, int nPuts) throws Exception { - for (int i = 0; i < nFlushes; i++) { - randomCFPuts(table, row, family, nPuts); - admin.flush(table.getName()); - } - } - - private static List toList(ResultScanner scanner) { - try { - List cells = new ArrayList<>(); - for (Result r : scanner) { - cells.addAll(r.listCells()); - } - return cells; - } finally { - scanner.close(); - } - } - - @Test - public void testScanAfterDeletingSpecifiedRow() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - byte[] row = Bytes.toBytes("SpecifiedRow"); - byte[] value0 = Bytes.toBytes("value_0"); - byte[] value1 = Bytes.toBytes("value_1"); - Put put = new Put(row); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - Delete d = new Delete(row); - table.delete(d); - put = new Put(row); - put.addColumn(FAMILY, null, value0); - table.put(put); - put = new Put(row); - put.addColumn(FAMILY, null, value1); - table.put(put); - List cells = toList(table.getScanner(new Scan())); - assertEquals(1, cells.size()); - assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); - - cells = toList(table.getScanner(new Scan().addFamily(FAMILY))); - assertEquals(1, cells.size()); - assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); - - cells = toList(table.getScanner(new Scan().addColumn(FAMILY, QUALIFIER))); - assertEquals(0, cells.size()); - - TEST_UTIL.getAdmin().flush(tableName); - cells = toList(table.getScanner(new Scan())); - assertEquals(1, cells.size()); - assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); - - cells = toList(table.getScanner(new Scan().addFamily(FAMILY))); - assertEquals(1, cells.size()); - assertEquals("value_1", Bytes.toString(CellUtil.cloneValue(cells.get(0)))); - - cells = toList(table.getScanner(new Scan().addColumn(FAMILY, QUALIFIER))); - assertEquals(0, cells.size()); - } - } - - @Test - public void testScanAfterDeletingSpecifiedRowV2() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - byte[] row = Bytes.toBytes("SpecifiedRow"); - byte[] qual0 = Bytes.toBytes("qual0"); - byte[] qual1 = Bytes.toBytes("qual1"); - long now = EnvironmentEdgeManager.currentTime(); - Delete d = new Delete(row, now); - table.delete(d); - - Put put = new Put(row); - put.addColumn(FAMILY, null, now + 1, VALUE); - table.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qual1, now + 2, qual1); - table.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qual0, now + 3, qual0); - table.put(put); - - Result r = table.get(new Get(row)); - assertEquals(r.toString(), 3, r.size()); - assertEquals("testValue", Bytes.toString(CellUtil.cloneValue(r.rawCells()[0]))); - assertEquals("qual0", Bytes.toString(CellUtil.cloneValue(r.rawCells()[1]))); - assertEquals("qual1", Bytes.toString(CellUtil.cloneValue(r.rawCells()[2]))); - - TEST_UTIL.getAdmin().flush(tableName); - r = table.get(new Get(row)); - assertEquals(3, r.size()); - assertEquals("testValue", Bytes.toString(CellUtil.cloneValue(r.rawCells()[0]))); - assertEquals("qual0", Bytes.toString(CellUtil.cloneValue(r.rawCells()[1]))); - assertEquals("qual1", Bytes.toString(CellUtil.cloneValue(r.rawCells()[2]))); - } - } - - private int getStoreFileCount(Admin admin, ServerName serverName, RegionInfo region) - throws IOException { - for (RegionMetrics metrics : admin.getRegionMetrics(serverName, region.getTable())) { - if (Bytes.equals(region.getRegionName(), metrics.getRegionName())) { - return metrics.getStoreFileCount(); - } - } - return 0; - } - - // override the config settings at the CF level and ensure priority - @Test - public void testAdvancedConfigOverride() throws Exception { - /* - * Overall idea: (1) create 3 store files and issue a compaction. config's compaction.min == 3, - * so should work. (2) Increase the compaction.min toggle in the HTD to 5 and modify table. If - * we use the HTD value instead of the default config value, adding 3 files and issuing a - * compaction SHOULD NOT work (3) Decrease the compaction.min toggle in the HCD to 2 and modify - * table. The CF schema should override the Table schema and now cause a minor compaction. - */ - TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3); - - final TableName tableName = TableName.valueOf(name.getMethodName()); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - Admin admin = TEST_UTIL.getAdmin(); - - // Create 3 store files. - byte[] row = Bytes.toBytes(ThreadLocalRandom.current().nextInt()); - performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 100); - - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - // Verify we have multiple store files. - HRegionLocation loc = locator.getRegionLocation(row, true); - assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) > 1); - - // Issue a compaction request - admin.compact(tableName); - - // poll wait for the compactions to happen - for (int i = 0; i < 10 * 1000 / 40; ++i) { - // The number of store files after compaction should be lesser. - loc = locator.getRegionLocation(row, true); - if (!loc.getRegion().isOffline()) { - if (getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) <= 1) { - break; - } - } - Thread.sleep(40); - } - // verify the compactions took place and that we didn't just time out - assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) <= 1); - - // change the compaction.min config option for this table to 5 - LOG.info("hbase.hstore.compaction.min should now be 5"); - TableDescriptor htd = TableDescriptorBuilder.newBuilder(table.getDescriptor()) - .setValue("hbase.hstore.compaction.min", String.valueOf(5)).build(); - admin.modifyTable(htd); - LOG.info("alter status finished"); - - // Create 3 more store files. - performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 10); - - // Issue a compaction request - admin.compact(tableName); - - // This time, the compaction request should not happen - Thread.sleep(10 * 1000); - loc = locator.getRegionLocation(row, true); - int sfCount = getStoreFileCount(admin, loc.getServerName(), loc.getRegion()); - assertTrue(sfCount > 1); - - // change an individual CF's config option to 2 & online schema update - LOG.info("hbase.hstore.compaction.min should now be 2"); - htd = TableDescriptorBuilder.newBuilder(htd) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) - .setValue("hbase.hstore.compaction.min", String.valueOf(2)).build()) - .build(); - admin.modifyTable(htd); - LOG.info("alter status finished"); - - // Issue a compaction request - admin.compact(tableName); - - // poll wait for the compactions to happen - for (int i = 0; i < 10 * 1000 / 40; ++i) { - loc = locator.getRegionLocation(row, true); - try { - if (getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) < sfCount) { - break; - } - } catch (Exception e) { - LOG.debug("Waiting for region to come online: " - + Bytes.toStringBinary(loc.getRegion().getRegionName())); - } - Thread.sleep(40); - } - - // verify the compaction took place and that we didn't just time out - assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) < sfCount); - - // Finally, ensure that we can remove a custom config value after we made it - LOG.info("Removing CF config value"); - LOG.info("hbase.hstore.compaction.min should now be 5"); - htd = TableDescriptorBuilder.newBuilder(htd) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) - .setValue("hbase.hstore.compaction.min", null).build()) - .build(); - admin.modifyTable(htd); - LOG.info("alter status finished"); - assertNull(table.getDescriptor().getColumnFamily(FAMILY) - .getValue(Bytes.toBytes("hbase.hstore.compaction.min"))); - } - } - } - - @Test - public void testHTableBatchWithEmptyPut() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - List actions = (List) new ArrayList(); - Object[] results = new Object[2]; - // create an empty Put - Put put1 = new Put(ROW); - actions.add(put1); - - Put put2 = new Put(ANOTHERROW); - put2.addColumn(FAMILY, QUALIFIER, VALUE); - actions.add(put2); - - table.batch(actions, results); - fail("Empty Put should have failed the batch call"); - } catch (IllegalArgumentException iae) { - } - } - - // Test Table.batch with large amount of mutations against the same key. - // It used to trigger read lock's "Maximum lock count exceeded" Error. - @Test - public void testHTableWithLargeBatch() throws IOException, InterruptedException { - int sixtyFourK = 64 * 1024; - List actions = new ArrayList(); - Object[] results = new Object[(sixtyFourK + 1) * 2]; - - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - for (int i = 0; i < sixtyFourK + 1; i++) { - Put put1 = new Put(ROW); - put1.addColumn(FAMILY, QUALIFIER, VALUE); - actions.add(put1); - - Put put2 = new Put(ANOTHERROW); - put2.addColumn(FAMILY, QUALIFIER, VALUE); - actions.add(put2); - } - - table.batch(actions, results); - } - } - - @Test - public void testBatchWithRowMutation() throws Exception { - LOG.info("Starting testBatchWithRowMutation"); - byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b") }; - - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - RowMutations arm = RowMutations - .of(Collections.singletonList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE))); - Object[] batchResult = new Object[1]; - table.batch(Arrays.asList(arm), batchResult); - - Get g = new Get(ROW); - Result r = table.get(g); - assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0]))); - - arm = RowMutations.of(Arrays.asList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[1], VALUE), - new Delete(ROW).addColumns(FAMILY, QUALIFIERS[0]))); - table.batch(Arrays.asList(arm), batchResult); - r = table.get(g); - assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1]))); - assertNull(r.getValue(FAMILY, QUALIFIERS[0])); - - // Test that we get the correct remote exception for RowMutations from batch() - try { - arm = RowMutations.of(Collections.singletonList( - new Put(ROW).addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, QUALIFIERS[0], VALUE))); - table.batch(Arrays.asList(arm), batchResult); - fail("Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException"); - } catch (RetriesExhaustedException e) { - String msg = e.getMessage(); - assertTrue(msg.contains("NoSuchColumnFamilyException")); - } - } - } - - @Test - public void testBatchWithCheckAndMutate() throws Exception { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - byte[] row1 = Bytes.toBytes("row1"); - byte[] row2 = Bytes.toBytes("row2"); - byte[] row3 = Bytes.toBytes("row3"); - byte[] row4 = Bytes.toBytes("row4"); - byte[] row5 = Bytes.toBytes("row5"); - byte[] row6 = Bytes.toBytes("row6"); - byte[] row7 = Bytes.toBytes("row7"); - - table - .put(Arrays.asList(new Put(row1).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), - new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), - new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), - new Put(row6).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)), - new Put(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")))); - - CheckAndMutate checkAndMutate1 = - CheckAndMutate.newBuilder(row1).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new RowMutations(row1) - .add(new Put(row1).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("g"))) - .add(new Delete(row1).addColumns(FAMILY, Bytes.toBytes("A"))) - .add(new Increment(row1).addColumn(FAMILY, Bytes.toBytes("C"), 3L)) - .add(new Append(row1).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); - Get get = new Get(row2).addColumn(FAMILY, Bytes.toBytes("B")); - RowMutations mutations = - new RowMutations(row3).add(new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C"))) - .add(new Put(row3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) - .add(new Increment(row3).addColumn(FAMILY, Bytes.toBytes("A"), 5L)) - .add(new Append(row3).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); - CheckAndMutate checkAndMutate2 = - CheckAndMutate.newBuilder(row4).ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("a")) - .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("h"))); - Put put = new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("f")); - CheckAndMutate checkAndMutate3 = - CheckAndMutate.newBuilder(row6).ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)) - .build(new Increment(row6).addColumn(FAMILY, Bytes.toBytes("F"), 1)); - CheckAndMutate checkAndMutate4 = - CheckAndMutate.newBuilder(row7).ifEquals(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")) - .build(new Append(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g"))); - - List actions = Arrays.asList(checkAndMutate1, get, mutations, checkAndMutate2, put, - checkAndMutate3, checkAndMutate4); - Object[] results = new Object[actions.size()]; - table.batch(actions, results); - - CheckAndMutateResult checkAndMutateResult = (CheckAndMutateResult) results[0]; - assertTrue(checkAndMutateResult.isSuccess()); - assertEquals(3L, - Bytes.toLong(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("C")))); - assertEquals("d", - Bytes.toString(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("D")))); - - assertEquals("b", Bytes.toString(((Result) results[1]).getValue(FAMILY, Bytes.toBytes("B")))); - - Result result = (Result) results[2]; - assertTrue(result.getExists()); - assertEquals(5L, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("A")))); - assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); - - checkAndMutateResult = (CheckAndMutateResult) results[3]; - assertFalse(checkAndMutateResult.isSuccess()); - assertNull(checkAndMutateResult.getResult()); - - assertTrue(((Result) results[4]).isEmpty()); - - checkAndMutateResult = (CheckAndMutateResult) results[5]; - assertTrue(checkAndMutateResult.isSuccess()); - assertEquals(11, - Bytes.toLong(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("F")))); - - checkAndMutateResult = (CheckAndMutateResult) results[6]; - assertTrue(checkAndMutateResult.isSuccess()); - assertEquals("gg", - Bytes.toString(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("G")))); - - result = table.get(new Get(row1)); - assertEquals("g", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); - assertNull(result.getValue(FAMILY, Bytes.toBytes("A"))); - assertEquals(3L, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("C")))); - assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); - - result = table.get(new Get(row3)); - assertNull(result.getValue(FAMILY, Bytes.toBytes("C"))); - assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("F")))); - assertNull(Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("C")))); - assertEquals(5L, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("A")))); - assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); - - result = table.get(new Get(row4)); - assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); - - result = table.get(new Get(row5)); - assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("E")))); - - result = table.get(new Get(row6)); - assertEquals(11, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("F")))); - - result = table.get(new Get(row7)); - assertEquals("gg", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("G")))); - } - } - - @Test - public void testHTableExistsMethodSingleRegionSingleGet() - throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - // Test with a single region table. - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - - Get get = new Get(ROW); - - boolean exist = table.exists(get); - assertFalse(exist); - - table.put(put); - - exist = table.exists(get); - assertTrue(exist); - } - } - - @Test - public void testHTableExistsMethodSingleRegionMultipleGets() - throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - List gets = new ArrayList<>(); - gets.add(new Get(ROW)); - gets.add(new Get(ANOTHERROW)); - - boolean[] results = table.exists(gets); - assertTrue(results[0]); - assertFalse(results[1]); - } - } - - @Test - public void testHTableExistsBeforeGet() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - Get get = new Get(ROW); - - boolean exist = table.exists(get); - assertEquals(true, exist); - - Result result = table.get(get); - assertEquals(false, result.isEmpty()); - assertTrue(Bytes.equals(VALUE, result.getValue(FAMILY, QUALIFIER))); - } - } - - @Test - public void testHTableExistsAllBeforeGet() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - final byte[] ROW2 = Bytes.add(ROW, Bytes.toBytes("2")); - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - put = new Put(ROW2); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - Get get = new Get(ROW); - Get get2 = new Get(ROW2); - ArrayList getList = new ArrayList(2); - getList.add(get); - getList.add(get2); - - boolean[] exists = table.exists(getList); - assertEquals(true, exists[0]); - assertEquals(true, exists[1]); - - Result[] result = table.get(getList); - assertEquals(false, result[0].isEmpty()); - assertTrue(Bytes.equals(VALUE, result[0].getValue(FAMILY, QUALIFIER))); - assertEquals(false, result[1].isEmpty()); - assertTrue(Bytes.equals(VALUE, result[1].getValue(FAMILY, QUALIFIER))); - } - } - - @Test - public void testHTableExistsMethodMultipleRegionsSingleGet() throws Exception { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, - new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - - Get get = new Get(ROW); - - boolean exist = table.exists(get); - assertFalse(exist); - - table.put(put); - - exist = table.exists(get); - assertTrue(exist); - } - } - - @Test - public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, - new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - List gets = new ArrayList<>(); - gets.add(new Get(ANOTHERROW)); - gets.add(new Get(Bytes.add(ROW, new byte[] { 0x00 }))); - gets.add(new Get(ROW)); - gets.add(new Get(Bytes.add(ANOTHERROW, new byte[] { 0x00 }))); - - LOG.info("Calling exists"); - boolean[] results = table.exists(gets); - assertFalse(results[0]); - assertFalse(results[1]); - assertTrue(results[2]); - assertFalse(results[3]); - - // Test with the first region. - put = new Put(new byte[] { 0x00 }); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - gets = new ArrayList<>(); - gets.add(new Get(new byte[] { 0x00 })); - gets.add(new Get(new byte[] { 0x00, 0x00 })); - results = table.exists(gets); - assertTrue(results[0]); - assertFalse(results[1]); - - // Test with the last region - put = new Put(new byte[] { (byte) 0xff, (byte) 0xff }); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - gets = new ArrayList<>(); - gets.add(new Get(new byte[] { (byte) 0xff })); - gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff })); - gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff })); - results = table.exists(gets); - assertFalse(results[0]); - assertTrue(results[1]); - assertFalse(results[2]); - } - } - - @Test - public void testGetEmptyRow() throws Exception { - // Create a table and put in 1 row - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - Put put = new Put(ROW_BYTES); - put.addColumn(FAMILY, COL_QUAL, VAL_BYTES); - table.put(put); - - // Try getting the row with an empty row key - Result res = null; - try { - res = table.get(new Get(new byte[0])); - fail(); - } catch (IllegalArgumentException e) { - // Expected. - } - assertTrue(res == null); - res = table.get(new Get(Bytes.toBytes("r1-not-exist"))); - assertTrue(res.isEmpty() == true); - res = table.get(new Get(ROW_BYTES)); - assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES)); - } - } - - @Test - public void testConnectionDefaultUsesCodec() throws Exception { - try ( - RpcClient client = RpcClientFactory.createClient(TEST_UTIL.getConfiguration(), "cluster")) { - assertTrue(client.hasCellBlockSupport()); - } - } - - @Test - public void testPutWithPreBatchMutate() throws Exception { - testPreBatchMutate(tableName, () -> { - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - t.put(put); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - }); - } - - @Test - public void testRowMutationsWithPreBatchMutate() throws Exception { - testPreBatchMutate(tableName, () -> { - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - RowMutations rm = new RowMutations(ROW, 1); - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - rm.add(put); - t.mutateRow(rm); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - }); - } - - private void testPreBatchMutate(TableName tableName, Runnable rn) throws Exception { - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(WaitingForScanObserver.class.getName()).build(); - TEST_UTIL.getAdmin().createTable(tableDescriptor); - // Don't use waitTableAvailable(), because the scanner will mess up the co-processor - - ExecutorService service = Executors.newFixedThreadPool(2); - service.execute(rn); - final List cells = new ArrayList<>(); - service.execute(() -> { - try { - // waiting for update. - TimeUnit.SECONDS.sleep(3); - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - Scan scan = new Scan(); - try (ResultScanner scanner = t.getScanner(scan)) { - for (Result r : scanner) { - cells.addAll(Arrays.asList(r.rawCells())); - } - } - } - } catch (IOException | InterruptedException ex) { - throw new RuntimeException(ex); - } - }); - service.shutdown(); - service.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - assertEquals("The write is blocking by RegionObserver#postBatchMutate" - + ", so the data is invisible to reader", 0, cells.size()); - TEST_UTIL.deleteTable(tableName); - } - - @Test - public void testLockLeakWithDelta() throws Exception, Throwable { - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) - .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); - TEST_UTIL.getAdmin().createTable(tableDescriptor); - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - // new a connection for lower retry number. - Configuration copy = new Configuration(TEST_UTIL.getConfiguration()); - copy.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); - try (Connection con = ConnectionFactory.createConnection(copy)) { - HRegion region = (HRegion) find(tableName); - region.setTimeoutForWriteLock(10); - ExecutorService putService = Executors.newSingleThreadExecutor(); - putService.execute(() -> { - try (Table table = con.getTable(tableName)) { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - // the put will be blocked by WaitingForMultiMutationsObserver. - table.put(put); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - }); - ExecutorService appendService = Executors.newSingleThreadExecutor(); - appendService.execute(() -> { - Append append = new Append(ROW); - append.addColumn(FAMILY, QUALIFIER, VALUE); - try (Table table = con.getTable(tableName)) { - table.append(append); - fail("The APPEND should fail because the target lock is blocked by previous put"); - } catch (Exception ex) { - } - }); - appendService.shutdown(); - appendService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - WaitingForMultiMutationsObserver observer = - find(tableName, WaitingForMultiMutationsObserver.class); - observer.latch.countDown(); - putService.shutdown(); - putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - try (Table table = con.getTable(tableName)) { - Result r = table.get(new Get(ROW)); - assertFalse(r.isEmpty()); - assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), VALUE)); - } - } - HRegion region = (HRegion) find(tableName); - int readLockCount = region.getReadLockCount(); - LOG.info("readLockCount:" + readLockCount); - assertEquals(0, readLockCount); - } - - @Test - public void testMultiRowMutations() throws Exception, Throwable { - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(MultiRowMutationEndpoint.class.getName()) - .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) - .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); - TEST_UTIL.getAdmin().createTable(tableDescriptor); - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - // new a connection for lower retry number. - Configuration copy = new Configuration(TEST_UTIL.getConfiguration()); - copy.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); - try (Connection con = ConnectionFactory.createConnection(copy)) { - byte[] row = Bytes.toBytes("ROW-0"); - byte[] rowLocked = Bytes.toBytes("ROW-1"); - byte[] value0 = Bytes.toBytes("VALUE-0"); - byte[] value1 = Bytes.toBytes("VALUE-1"); - byte[] value2 = Bytes.toBytes("VALUE-2"); - assertNoLocks(tableName); - ExecutorService putService = Executors.newSingleThreadExecutor(); - putService.execute(() -> { - try (Table table = con.getTable(tableName)) { - Put put0 = new Put(rowLocked); - put0.addColumn(FAMILY, QUALIFIER, value0); - // the put will be blocked by WaitingForMultiMutationsObserver. - table.put(put0); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - }); - ExecutorService cpService = Executors.newSingleThreadExecutor(); - AtomicBoolean exceptionDuringMutateRows = new AtomicBoolean(); - cpService.execute(() -> { - Put put1 = new Put(row); - Put put2 = new Put(rowLocked); - put1.addColumn(FAMILY, QUALIFIER, value1); - put2.addColumn(FAMILY, QUALIFIER, value2); - try (Table table = con.getTable(tableName)) { - MultiRowMutationProtos.MutateRowsRequest request = - MultiRowMutationProtos.MutateRowsRequest.newBuilder() - .addMutationRequest( - ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put1)) - .addMutationRequest( - ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put2)) - .build(); - table.coprocessorService(MultiRowMutationProtos.MultiRowMutationService.class, ROW, ROW, - (MultiRowMutationProtos.MultiRowMutationService exe) -> { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback< - MultiRowMutationProtos.MutateRowsResponse> rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - exe.mutateRows(controller, request, rpcCallback); - if ( - controller.failedOnException() - && !(controller.getFailedOn() instanceof UnknownProtocolException) - ) { - exceptionDuringMutateRows.set(true); - } - return rpcCallback.get(); - }); - } catch (Throwable ex) { - LOG.error("encountered " + ex); - } - }); - cpService.shutdown(); - cpService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - WaitingForMultiMutationsObserver observer = - find(tableName, WaitingForMultiMutationsObserver.class); - observer.latch.countDown(); - putService.shutdown(); - putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - try (Table table = con.getTable(tableName)) { - Get g0 = new Get(row); - Get g1 = new Get(rowLocked); - Result r0 = table.get(g0); - Result r1 = table.get(g1); - assertTrue(r0.isEmpty()); - assertFalse(r1.isEmpty()); - assertTrue(Bytes.equals(r1.getValue(FAMILY, QUALIFIER), value0)); - } - assertNoLocks(tableName); - if (!exceptionDuringMutateRows.get()) { - fail("This cp should fail because the target lock is blocked by previous put"); - } - } - } - - /** - * A test case for issue HBASE-17482 After combile seqid with mvcc readpoint, seqid/mvcc is - * acquired and stamped onto cells in the append thread, a countdown latch is used to ensure that - * happened before cells can be put into memstore. But the MVCCPreAssign patch(HBASE-16698) make - * the seqid/mvcc acquirement in handler thread and stamping in append thread No countdown latch - * to assure cells in memstore are stamped with seqid/mvcc. If cells without mvcc(A.K.A mvcc=0) - * are put into memstore, then a scanner with a smaller readpoint can see these data, which - * disobey the multi version concurrency control rules. This test case is to reproduce this - * scenario. - */ - @Test - public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - // put two row first to init the scanner - Put put = new Put(Bytes.toBytes("0")); - put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes("0")); - table.put(put); - put = new Put(Bytes.toBytes("00")); - put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes("0")); - table.put(put); - Scan scan = new Scan(); - scan.setTimeRange(0, Long.MAX_VALUE); - scan.setCaching(1); - ResultScanner scanner = table.getScanner(scan); - int rowNum = scanner.next() != null ? 1 : 0; - // the started scanner shouldn't see the rows put below - for (int i = 1; i < 1000; i++) { - put = new Put(Bytes.toBytes(String.valueOf(i))); - put.setDurability(Durability.ASYNC_WAL); - put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes(i)); - table.put(put); - } - for (Result result : scanner) { - rowNum++; - } - // scanner should only see two rows - assertEquals(2, rowNum); - scanner = table.getScanner(scan); - rowNum = 0; - for (Result result : scanner) { - rowNum++; - } - // the new scanner should see all rows - assertEquals(1001, rowNum); - } - } - - @Test - public void testPutThenGetWithMultipleThreads() throws Exception { - final int THREAD_NUM = 20; - final int ROUND_NUM = 10; - for (int round = 0; round < ROUND_NUM; round++) { - ArrayList threads = new ArrayList<>(THREAD_NUM); - final AtomicInteger successCnt = new AtomicInteger(0); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - for (int i = 0; i < THREAD_NUM; i++) { - final int index = i; - Thread t = new Thread(new Runnable() { - - @Override - public void run() { - final byte[] row = Bytes.toBytes("row-" + index); - final byte[] value = Bytes.toBytes("v" + index); - try { - Put put = new Put(row); - put.addColumn(FAMILY, QUALIFIER, value); - ht.put(put); - Get get = new Get(row); - Result result = ht.get(get); - byte[] returnedValue = result.getValue(FAMILY, QUALIFIER); - if (Bytes.equals(value, returnedValue)) { - successCnt.getAndIncrement(); - } else { - LOG.error("Should be equal but not, original value: " + Bytes.toString(value) - + ", returned value: " - + (returnedValue == null ? "null" : Bytes.toString(returnedValue))); - } - } catch (Throwable e) { - // do nothing - } - } - }); - threads.add(t); - } - for (Thread t : threads) { - t.start(); - } - for (Thread t : threads) { - t.join(); - } - assertEquals("Not equal in round " + round, THREAD_NUM, successCnt.get()); - } - TEST_UTIL.deleteTable(tableName); - } - } - - private static void assertNoLocks(final TableName tableName) - throws IOException, InterruptedException { - HRegion region = (HRegion) find(tableName); - assertEquals(0, region.getLockedRows().size()); - } - - private static HRegion find(final TableName tableName) throws IOException, InterruptedException { - HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName); - List regions = rs.getRegions(tableName); - assertEquals(1, regions.size()); - return regions.get(0); - } - - private static T find(final TableName tableName, Class clz) - throws IOException, InterruptedException { - HRegion region = find(tableName); - Coprocessor cp = region.getCoprocessorHost().findCoprocessor(clz.getName()); - assertTrue("The cp instance should be " + clz.getName() + ", current instance is " - + cp.getClass().getName(), clz.isInstance(cp)); - return clz.cast(cp); - } - - public static class WaitingForMultiMutationsObserver - implements RegionCoprocessor, RegionObserver { - final CountDownLatch latch = new CountDownLatch(1); - - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public void postBatchMutate(final ObserverContext c, - final MiniBatchOperationInProgress miniBatchOp) throws IOException { - try { - latch.await(); - } catch (InterruptedException ex) { - throw new IOException(ex); - } - } - } - - public static class WaitingForScanObserver implements RegionCoprocessor, RegionObserver { - private final CountDownLatch latch = new CountDownLatch(1); - - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public void postBatchMutate(final ObserverContext c, - final MiniBatchOperationInProgress miniBatchOp) throws IOException { - try { - // waiting for scanner - latch.await(); - } catch (InterruptedException ex) { - throw new IOException(ex); - } - } - - @Override - public RegionScanner postScannerOpen( - final ObserverContext e, final Scan scan, - final RegionScanner s) throws IOException { - latch.countDown(); - return s; - } - } - - static byte[] generateHugeValue(int size) { - Random rand = ThreadLocalRandom.current(); - byte[] value = new byte[size]; - for (int i = 0; i < value.length; i++) { - value[i] = (byte) rand.nextInt(256); - } - return value; - } - - @Test - public void testScanWithBatchSizeReturnIncompleteCells() - throws IOException, InterruptedException { - TableDescriptor hd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(3).build()) - .build(); - try (Table table = TEST_UTIL.createTable(hd, null)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - - Put put = new Put(ROW); - put.addColumn(FAMILY, Bytes.toBytes(0), generateHugeValue(3 * 1024 * 1024)); - table.put(put); - - put = new Put(ROW); - put.addColumn(FAMILY, Bytes.toBytes(1), generateHugeValue(4 * 1024 * 1024)); - table.put(put); - - for (int i = 2; i < 5; i++) { - for (int version = 0; version < 2; version++) { - put = new Put(ROW); - put.addColumn(FAMILY, Bytes.toBytes(i), generateHugeValue(1024)); - table.put(put); - } - } - - Scan scan = new Scan(); - scan.withStartRow(ROW).withStopRow(ROW, true).addFamily(FAMILY).setBatch(3) - .setMaxResultSize(4 * 1024 * 1024); - Result result; - try (ResultScanner scanner = table.getScanner(scan)) { - List list = new ArrayList<>(); - /* - * The first scan rpc should return a result with 2 cells, because 3MB + 4MB > 4MB; The - * second scan rpc should return a result with 3 cells, because reach the batch limit = 3; - * The mayHaveMoreCellsInRow in last result should be false in the scan rpc. BTW, the - * moreResultsInRegion also would be false. Finally, the client should collect all the cells - * into two result: 2+3 -> 3+2; - */ - while ((result = scanner.next()) != null) { - list.add(result); - } +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; - Assert.assertEquals(5, list.stream().mapToInt(Result::size).sum()); - Assert.assertEquals(2, list.size()); - Assert.assertEquals(3, list.get(0).size()); - Assert.assertEquals(2, list.get(1).size()); - } +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +public class TestFromClientSide3 extends FromClientSide3TestBase { - scan = new Scan(); - scan.withStartRow(ROW).withStopRow(ROW, true).addFamily(FAMILY).setBatch(2) - .setMaxResultSize(4 * 1024 * 1024); - try (ResultScanner scanner = table.getScanner(scan)) { - List list = new ArrayList<>(); - while ((result = scanner.next()) != null) { - list.add(result); - } - Assert.assertEquals(5, list.stream().mapToInt(Result::size).sum()); - Assert.assertEquals(3, list.size()); - Assert.assertEquals(2, list.get(0).size()); - Assert.assertEquals(2, list.get(1).size()); - Assert.assertEquals(1, list.get(2).size()); - } - } + @BeforeAll + public static void setUpBeforeAll() throws Exception { + startCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 05e525ff19ca..98a72314dbc5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY; -import static org.apache.hadoop.hbase.client.TestFromClientSide3.generateHugeValue; +import static org.apache.hadoop.hbase.client.FromClientSide3TestBase.generateHugeValue; import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java index edb9a64a7a8e..31104b973e50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java @@ -21,6 +21,9 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellScanner; @@ -31,7 +34,9 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -46,7 +51,7 @@ import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.Interface; @InterfaceAudience.Private -public class TestProtobufRpcServiceImpl implements BlockingInterface { +public class TestProtobufRpcServiceImpl implements BlockingInterface, Interface { public static final BlockingService SERVICE = TestProtobufRpcProto.newReflectiveBlockingService(new TestProtobufRpcServiceImpl()); @@ -119,4 +124,63 @@ public AddrResponseProto addr(RpcController controller, EmptyRequestProto reques return AddrResponseProto.newBuilder() .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build(); } + + @Override + public void ping(RpcController controller, EmptyRequestProto request, + RpcCallback done) { + done.run(EmptyResponseProto.getDefaultInstance()); + } + + @Override + public void echo(RpcController controller, EchoRequestProto request, + RpcCallback done) { + if (controller instanceof HBaseRpcController) { + HBaseRpcController pcrc = (HBaseRpcController) controller; + // If cells, scan them to check we are able to iterate what we were given and since this is an + // echo, just put them back on the controller creating a new block. Tests our block building. + ExtendedCellScanner cellScanner = pcrc.cellScanner(); + List list = null; + if (cellScanner != null) { + list = new ArrayList<>(); + try { + while (cellScanner.advance()) { + list.add(cellScanner.current()); + } + } catch (IOException e) { + pcrc.setFailed(e); + return; + } + } + cellScanner = PrivateCellUtil.createExtendedCellScanner(list); + pcrc.setCellScanner(cellScanner); + } + done.run(EchoResponseProto.newBuilder().setMessage(request.getMessage()).build()); + } + + @Override + public void error(RpcController controller, EmptyRequestProto request, + RpcCallback done) { + if (controller instanceof HBaseRpcController) { + ((HBaseRpcController) controller).setFailed(new DoNotRetryIOException("server error!")); + } else { + controller.setFailed("server error!"); + } + } + + private final ScheduledExecutorService executor = + Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).build()); + + @Override + public void pause(RpcController controller, PauseRequestProto request, + RpcCallback done) { + executor.schedule(() -> done.run(EmptyResponseProto.getDefaultInstance()), request.getMs(), + TimeUnit.MILLISECONDS); + } + + @Override + public void addr(RpcController controller, EmptyRequestProto request, + RpcCallback done) { + done.run(AddrResponseProto.newBuilder() + .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java index 67b22833d18d..dffeaa206a24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -49,6 +48,7 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; +import org.apache.hadoop.hbase.ipc.TestProtobufRpcServiceImpl; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -113,7 +113,8 @@ public DummyCpService() { @Override public Iterable getServices() { - return Collections.singleton(mock(TestRpcServiceProtos.TestProtobufRpcProto.class)); + return Collections.singleton(TestRpcServiceProtos.TestProtobufRpcProto + .newReflectiveService(new TestProtobufRpcServiceImpl())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java index 71f3c3ee7924..ffec01edafd7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java @@ -17,31 +17,29 @@ */ package org.apache.hadoop.hbase.util; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.client.TestFromClientSide3; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mockStatic; + +import org.apache.hadoop.hbase.client.FromClientSide3TestBase; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; - -@Category({ LargeTests.class, ClientTests.class }) -public class TestFromClientSide3WoUnsafe extends TestFromClientSide3 { +import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.mockito.MockedStatic; - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSide3WoUnsafe.class); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TestByteBufferUtils.disableUnsafe(); - TestFromClientSide3.setUpBeforeClass(); - } +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +public class TestFromClientSide3WoUnsafe extends FromClientSide3TestBase { - @AfterClass - public static void tearDownAfterClass() throws Exception { - TestFromClientSide3.tearDownAfterClass(); - TestByteBufferUtils.detectAvailabilityOfUnsafe(); + @BeforeAll + public static void setUpBeforeAll() throws Exception { + try (MockedStatic mocked = mockStatic(HBasePlatformDependent.class)) { + mocked.when(HBasePlatformDependent::isUnsafeAvailable).thenReturn(false); + mocked.when(HBasePlatformDependent::unaligned).thenReturn(false); + assertFalse(ByteBufferUtils.UNSAFE_AVAIL); + assertFalse(ByteBufferUtils.UNSAFE_UNALIGNED); + } + startCluster(); } } From c4f7e6612a1bb323903f95bee215d6a636c6ee40 Mon Sep 17 00:00:00 2001 From: vinayak hegde Date: Mon, 29 Sep 2025 22:57:08 +0530 Subject: [PATCH 32/92] HBASE-29504 [DOC] Document Namespace Auto-Creation During Restore (#7199) --- src/main/asciidoc/_chapters/backup_restore.adoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/main/asciidoc/_chapters/backup_restore.adoc b/src/main/asciidoc/_chapters/backup_restore.adoc index c2beca0b9aee..49e328695b01 100644 --- a/src/main/asciidoc/_chapters/backup_restore.adoc +++ b/src/main/asciidoc/_chapters/backup_restore.adoc @@ -341,6 +341,12 @@ This command restores two tables of an incremental backup image. In this example • `backupId_1467823988425` is the backup ID. • `mytable1` and `mytable2` are the names of tables in the backup image to be restored. +[NOTE] +==== +If the namespace of a table being restored does not exist in the target environment, it will be automatically created during the restore operation. +link:https://issues.apache.org/jira/browse/HBASE-25707[HBASE-25707] +==== + // hbase backup merge [[br.merge.backup]] From 2c3b89b61c3d03f598ba8ed7f917a93bf797566d Mon Sep 17 00:00:00 2001 From: Siddharth Khillon Date: Tue, 30 Sep 2025 04:13:38 -0700 Subject: [PATCH 33/92] HBASE-29629 Record the quota user name value on metrics for RpcThrottlingExceptions (#7345) Signed-off-by: Wellington Chevreuil --- .../main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java | 2 +- .../hadoop/hbase/quotas/RegionServerRpcQuotaManager.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index 6b1585c58550..e912fe8ee153 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -229,7 +229,7 @@ protected boolean isExceedThrottleQuotaEnabled() { * username * @param ugi The request's UserGroupInformation */ - private String getQuotaUserName(final UserGroupInformation ugi) { + String getQuotaUserName(final UserGroupInformation ugi) { if (userOverrideRequestAttributeKey == null) { return ugi.getShortUserName(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index 7a42d0f1aa31..34fc57cb0814 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -199,7 +199,7 @@ public OperationQuota checkScanQuota(final Region region, LOG.debug("Throttling exception for user=" + ugi.getUserName() + " table=" + table + " scan=" + scanRequest.getScannerId() + ": " + e.getMessage()); - rsServices.getMetrics().recordThrottleException(e.getType(), ugi.getShortUserName(), + rsServices.getMetrics().recordThrottleException(e.getType(), quotaCache.getQuotaUserName(ugi), table.getNameAsString()); throw e; @@ -276,7 +276,7 @@ public OperationQuota checkBatchQuota(final Region region, final int numWrites, LOG.debug("Throttling exception for user=" + ugi.getUserName() + " table=" + table + " numWrites=" + numWrites + " numReads=" + numReads + ": " + e.getMessage()); - rsServices.getMetrics().recordThrottleException(e.getType(), ugi.getShortUserName(), + rsServices.getMetrics().recordThrottleException(e.getType(), quotaCache.getQuotaUserName(ugi), table.getNameAsString()); throw e; From c663fc47f0b16538aa067bbf23cfcd31f7d29629 Mon Sep 17 00:00:00 2001 From: vinayak hegde Date: Wed, 1 Oct 2025 22:41:11 +0530 Subject: [PATCH 34/92] HBASE-29497 Mention HFiles for incremental backups (#7216) * HBASE-29497 Mention HFiles for incremental backups * enhance the documention change --- .../asciidoc/_chapters/backup_restore.adoc | 35 +++++++++++++------ 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/src/main/asciidoc/_chapters/backup_restore.adoc b/src/main/asciidoc/_chapters/backup_restore.adoc index 49e328695b01..fa10c772859c 100644 --- a/src/main/asciidoc/_chapters/backup_restore.adoc +++ b/src/main/asciidoc/_chapters/backup_restore.adoc @@ -810,16 +810,28 @@ providing a comparable level of security. This is a manual step which users *mus [[br.technical.details]] == Technical Details of Incremental Backup and Restore -HBase incremental backups enable more efficient capture of HBase table images than previous attempts at serial backup and restore -solutions, such as those that only used HBase Export and Import APIs. Incremental backups use Write Ahead Logs (WALs) to capture -the data changes since the previous backup was created. A WAL roll (create new WALs) is executed across all RegionServers to track -the WALs that need to be in the backup. - -After the incremental backup image is created, the source backup files usually are on same node as the data source. A process similar -to the DistCp (distributed copy) tool is used to move the source backup files to the target file systems. When a table restore operation -starts, a two-step process is initiated. First, the full backup is restored from the full backup image. Second, all WAL files from -incremental backups between the last full backup and the incremental backup being restored are converted to HFiles, which the HBase -Bulk Load utility automatically imports as restored data in the table. +HBase incremental backups enable more efficient capture of HBase table images than previous attempts +at serial backup and restore solutions, such as those that only used HBase Export and Import APIs. +Incremental backups use Write Ahead Logs (WALs) to capture the data changes since the +previous backup was created. A WAL roll (create new WALs) is executed across all RegionServers +to track the WALs that need to be in the backup. +In addition to WALs, incremental backups also track bulk-loaded HFiles for tables under backup. + +Incremental backup gathers all WAL files generated since the last backup from the source cluster, +converts them to HFiles in a `.tmp` directory under the `BACKUP_ROOT`, and then moves these +HFiles to their final location under the backup root directory to form the backup image. +It also reads bulk load records from the backup system table, forms the paths for the corresponding +bulk-loaded HFiles, and copies those files to the backup destination. +Bulk-loaded files are preserved (not deleted by cleaner chores) until they've been included in a +backup (for each backup root). +A process similar to the DistCp (distributed copy) tool is used to move the backup files to the +target file system. + +When a table restore operation starts, a two-step process is initiated. +First, the full backup is restored from the full backup image. +Second, all HFiles from incremental backups between the last full backup and the incremental backup +being restored (including bulk-loaded HFiles) are bulk loaded into the table using the +HBase Bulk Load utility. You can only restore on a live HBase cluster because the data must be redistributed to complete the restore operation successfully. @@ -878,8 +890,9 @@ data at the full 80MB/s and `-w` is used to limit the job from spawning 16 worke Like we did for full backups, we have to understand the incremental backup process to approximate its runtime and cost. -* Identify new write-ahead logs since last full or incremental backup: negligible. Apriori knowledge from the backup system table(s). +* Identify new write-ahead logs since the last full or incremental backup: negligible. Apriori knowledge from the backup system table(s). * Read, filter, and write "minimized" HFiles equivalent to the WALs: dominated by the speed of writing data. Relative to write speed of HDFS. +* Read bulk load records from the backup system table, form the paths for bulk-loaded HFiles, and copy them to the backup destination. * DistCp the HFiles to the destination: <>. For the second step, the dominating cost of this operation would be the re-writing the data (under the assumption that a majority of the From a2a70d645af2a1329d208baaa0a2866fa7f29f98 Mon Sep 17 00:00:00 2001 From: vinayak hegde Date: Wed, 1 Oct 2025 22:43:39 +0530 Subject: [PATCH 35/92] HBASE-29505 [DOC] Document Enhanced Options for Backup Delete Command (#7200) * HBASE-29505 [DOC] Document Enhanced Options for Backup Delete Command * update the doc with cautions --- .../asciidoc/_chapters/backup_restore.adoc | 51 ++++++++++++++----- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/src/main/asciidoc/_chapters/backup_restore.adoc b/src/main/asciidoc/_chapters/backup_restore.adoc index fa10c772859c..52ba8655eff4 100644 --- a/src/main/asciidoc/_chapters/backup_restore.adoc +++ b/src/main/asciidoc/_chapters/backup_restore.adoc @@ -584,33 +584,60 @@ $ hbase backup describe backupId_1467823988425 // hbase backup delete [[br.delete.backup]] -=== Deleting a Backup Image +=== Deleting Backup Images -This command can be used to delete a backup image which is no longer needed. +The `hbase backup delete` command deletes backup images that are no longer needed. + +[[br.delete.backup.syntax]] +==== Syntax [source] ---- -$ hbase backup delete +$ hbase backup delete -l +$ hbase backup delete -k ---- -[[br.delete.backup.positional.cli.arguments]] -==== Positional Command-Line Arguments - -_backup_id_:: - The ID to the backup image which should be deleted. - [[br.delete.backup.named.cli.arguments]] ==== Named Command-Line Arguments -None. +_-l _:: +Comma-separated list of backup IDs to delete. + +_-k _:: +Deletes all backup images completed more than the specified number of days ago. + +NOTE: These options are **mutually exclusive**. Only one of `-l` or `-k` may be used at a time. [[br.delete.backup.example]] -==== Example usage +==== Example Usage + +Delete specific backup images by ID: [source] ---- -$ hbase backup delete backupId_1467823988425 +$ hbase backup delete -l backupId_1467823988425,backupId_1467824989999 +---- + +Delete all backup images older than 30 days: + +[source] ---- +$ hbase backup delete -k 30 +---- + +[CAUTION] +==== +* Deleting a backup may affect all following incremental backups (in the same backup root) up to + the next full backup. For example, if you take a full backup every 2 weeks and + daily incremental backups, running `hbase backup delete -k 7` when the full backup is older than + 7 days will effectively remove the data for all subsequent incremental backups. + The backup IDs may still be listed, but their data will be gone. + +* If the most recent backup is an incremental backup and you delete it, + you should run a **full backup** next. + Running another incremental backup immediately after may result in missing data in the + backup image. (See link:https://issues.apache.org/jira/browse/HBASE-28084[HBASE-28084]) +==== // hbase backup repair From 82e36a2e82e558ae52d60a1c2b52d0c5d774c47a Mon Sep 17 00:00:00 2001 From: Siddharth Khillon Date: Thu, 2 Oct 2025 12:30:07 -0700 Subject: [PATCH 36/92] HBASE-29631 Fix race condition in IncrementalTableBackupClient when HFiles are archived during backup (#7346) Co-authored-by: Hernan Romer Co-authored-by: skhillon Signed-off-by: Ray Mattingly --- .../impl/IncrementalTableBackupClient.java | 23 ++++- .../TestIncrementalBackupWithBulkLoad.java | 96 +++++++++++++++++++ 2 files changed, 116 insertions(+), 3 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index b68ed527833f..4fac0ca3c93c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -200,6 +200,9 @@ private void mergeSplitAndCopyBulkloadedHFiles(List activeFiles, int numActiveFiles = activeFiles.size(); updateFileLists(activeFiles, archiveFiles); if (activeFiles.size() < numActiveFiles) { + // We've archived some files, delete bulkloads directory + // and re-try + deleteBulkLoadDirectory(); continue; } @@ -242,7 +245,7 @@ private void mergeSplitAndCopyBulkloadedHFiles(List files, TableName tn, incrementalCopyBulkloadHFiles(tgtFs, tn); } - private void updateFileLists(List activeFiles, List archiveFiles) + public void updateFileLists(List activeFiles, List archiveFiles) throws IOException { List newlyArchived = new ArrayList<>(); @@ -252,9 +255,23 @@ private void updateFileLists(List activeFiles, List archiveFiles } } - if (newlyArchived.size() > 0) { + if (!newlyArchived.isEmpty()) { + String rootDir = CommonFSUtils.getRootDir(conf).toString(); + activeFiles.removeAll(newlyArchived); - archiveFiles.addAll(newlyArchived); + for (String file : newlyArchived) { + String archivedFile = file.substring(rootDir.length() + 1); + Path archivedFilePath = new Path(HFileArchiveUtil.getArchivePath(conf), archivedFile); + archivedFile = archivedFilePath.toString(); + + if (!fs.exists(archivedFilePath)) { + throw new IOException(String.format( + "File %s no longer exists, and no archived file %s exists for it", file, archivedFile)); + } + + LOG.debug("Archived file {} has been updated", archivedFile); + archiveFiles.add(archivedFile); + } } LOG.debug(newlyArchived.size() + " files have been archived."); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index 412fd5e32f7e..73c26dce7353 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -20,9 +20,11 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.FileSystem; @@ -38,6 +40,8 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.ClassRule; import org.junit.Test; @@ -147,6 +151,98 @@ private boolean containsRowWithKey(Table table, String rowKey) throws IOExceptio return result.containsColumn(famName, qualName); } + @Test + public void testUpdateFileListsRaceCondition() throws Exception { + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Test the race condition where files are archived during incremental backup + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + String regionName = "region1"; + String columnFamily = "cf"; + String filename1 = "hfile1"; + String filename2 = "hfile2"; + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + Path tableDir = CommonFSUtils.getTableDir(rootDir, table1); + Path activeFile1 = + new Path(tableDir, regionName + Path.SEPARATOR + columnFamily + Path.SEPARATOR + filename1); + Path activeFile2 = + new Path(tableDir, regionName + Path.SEPARATOR + columnFamily + Path.SEPARATOR + filename2); + + fs.mkdirs(activeFile1.getParent()); + fs.create(activeFile1).close(); + fs.create(activeFile2).close(); + + List activeFiles = new ArrayList<>(); + activeFiles.add(activeFile1.toString()); + activeFiles.add(activeFile2.toString()); + List archiveFiles = new ArrayList<>(); + + Path archiveDir = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), table1, + regionName, columnFamily); + Path archivedFile1 = new Path(archiveDir, filename1); + fs.mkdirs(archiveDir); + assertTrue("File should be moved to archive", fs.rename(activeFile1, archivedFile1)); + + TestBackupBase.IncrementalTableBackupClientForTest client = + new TestBackupBase.IncrementalTableBackupClientForTest(TEST_UTIL.getConnection(), + "test_backup_id", + createBackupRequest(BackupType.INCREMENTAL, List.of(table1), BACKUP_ROOT_DIR)); + + client.updateFileLists(activeFiles, archiveFiles); + + assertEquals("Only one file should remain in active files", 1, activeFiles.size()); + assertEquals("File2 should still be in active files", activeFile2.toString(), + activeFiles.get(0)); + assertEquals("One file should be added to archive files", 1, archiveFiles.size()); + assertEquals("Archived file should have correct path", archivedFile1.toString(), + archiveFiles.get(0)); + systemTable.finishBackupExclusiveOperation(); + } + + } + + @Test + public void testUpdateFileListsMissingArchivedFile() throws Exception { + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Test that IOException is thrown when file doesn't exist in archive location + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + String regionName = "region2"; + String columnFamily = "cf"; + String filename = "missing_file"; + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + Path tableDir = CommonFSUtils.getTableDir(rootDir, table1); + Path activeFile = + new Path(tableDir, regionName + Path.SEPARATOR + columnFamily + Path.SEPARATOR + filename); + + fs.mkdirs(activeFile.getParent()); + fs.create(activeFile).close(); + + List activeFiles = new ArrayList<>(); + activeFiles.add(activeFile.toString()); + List archiveFiles = new ArrayList<>(); + + // Delete the file but don't create it in archive location + fs.delete(activeFile, false); + + TestBackupBase.IncrementalTableBackupClientForTest client = + new TestBackupBase.IncrementalTableBackupClientForTest(TEST_UTIL.getConnection(), + "test_backup_id", + createBackupRequest(BackupType.INCREMENTAL, List.of(table1), BACKUP_ROOT_DIR)); + + // This should throw IOException since file doesn't exist in archive + try { + client.updateFileLists(activeFiles, archiveFiles); + fail("Expected IOException to be thrown"); + } catch (IOException e) { + // Expected + } + systemTable.finishBackupExclusiveOperation(); + } + } + private void performBulkLoad(String keyPrefix) throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path baseDirectory = TEST_UTIL.getDataTestDirOnTestFS(TEST_NAME); From 2d881209ec595a7e58d132786960a7359bafe22b Mon Sep 17 00:00:00 2001 From: sanjeet006py <36011005+sanjeet006py@users.noreply.github.com> Date: Sat, 4 Oct 2025 03:36:58 +0530 Subject: [PATCH 37/92] HBASE-29626: Refactor server side scan metrics for Coproc hooks (#7340) Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/io/hfile/HFile.java | 4 +-- .../org/apache/hadoop/hbase/ipc/RpcCall.java | 4 --- .../apache/hadoop/hbase/ipc/RpcServer.java | 6 ++--- .../apache/hadoop/hbase/ipc/ServerCall.java | 11 -------- .../ThreadLocalServerSideScanMetrics.java | 23 +++++++++++++++- .../hbase/regionserver/RSRpcServices.java | 12 +++++---- .../hbase/regionserver/RegionScannerImpl.java | 26 ------------------- .../namequeues/TestNamedQueueRecorder.java | 10 ------- .../hbase/namequeues/TestRpcLogDetails.java | 10 ------- .../region/TestRegionProcedureStore.java | 10 ------- 10 files changed, 34 insertions(+), 82 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index a99eac4085e4..7080a865aa68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; -import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; @@ -190,7 +190,7 @@ public static final long getChecksumFailuresCount() { } public static final void updateReadLatency(long latencyMillis, boolean pread, boolean tooSlow) { - RpcServer.getCurrentCall().ifPresent(call -> call.updateFsReadTime(latencyMillis)); + ThreadLocalServerSideScanMetrics.addFsReadTime(latencyMillis); if (pread) { MetricsIO.getInstance().updateFsPreadTime(latencyMillis); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 804d7b32bb42..ff3bae19e296 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -133,8 +133,4 @@ void setResponse(Message param, ExtendedCellScanner cells, Throwable errorThrowa /** Returns A short string format of this call without possibly lengthy params */ String toShortString(); - - void updateFsReadTime(long latencyMillis); - - long getFsReadTime(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 2db08fd7398b..6dfb5bfb4113 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.regionserver.RSRpcServices; @@ -461,19 +462,18 @@ public Pair call(RpcCall call, MonitoredRPCHandler int processingTime = (int) (endTime - startTime); int qTime = (int) (startTime - receiveTime); int totalTime = (int) (endTime - receiveTime); + long fsReadTime = ThreadLocalServerSideScanMetrics.getFsReadTimeCounter().get(); if (LOG.isTraceEnabled()) { LOG.trace( "{}, response: {}, receiveTime: {}, queueTime: {}, processingTime: {}, " + "totalTime: {}, fsReadTime: {}", CurCall.get().toString(), TextFormat.shortDebugString(result), - CurCall.get().getReceiveTime(), qTime, processingTime, totalTime, - CurCall.get().getFsReadTime()); + CurCall.get().getReceiveTime(), qTime, processingTime, totalTime, fsReadTime); } // Use the raw request call size for now. long requestSize = call.getSize(); long responseSize = result.getSerializedSize(); long responseBlockSize = call.getBlockBytesScanned(); - long fsReadTime = call.getFsReadTime(); if (call.isClientCellBlockSupported()) { // Include the payload size in HBaseRpcController responseSize += call.getResponseCellSize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index fc3b6fc0a6b1..ed7e67edfaf0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -101,7 +101,6 @@ public abstract class ServerCall implements RpcCa private long responseCellSize = 0; private long responseBlockSize = 0; - private long fsReadTimeMillis = 0; // cumulative size of serialized exceptions private long exceptionSize = 0; private final boolean retryImmediatelySupported; @@ -604,14 +603,4 @@ public int getRemotePort() { public synchronized BufferChain getResponse() { return response; } - - @Override - public void updateFsReadTime(long latencyMillis) { - fsReadTimeMillis += latencyMillis; - } - - @Override - public long getFsReadTime() { - return fsReadTimeMillis; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java index 8c9ec24e8662..e14761ab6e18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java @@ -18,10 +18,12 @@ package org.apache.hadoop.hbase.monitoring; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** * Thread-local storage for server-side scan metrics that captures performance data separately for @@ -61,7 +63,8 @@ * @see RegionScanner * @see org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) +@InterfaceStability.Evolving public final class ThreadLocalServerSideScanMetrics { private ThreadLocalServerSideScanMetrics() { } @@ -81,6 +84,9 @@ private ThreadLocalServerSideScanMetrics() { private static final ThreadLocal BLOCK_READ_OPS_COUNT = ThreadLocal.withInitial(() -> new AtomicLong(0)); + private static final ThreadLocal FS_READ_TIME = + ThreadLocal.withInitial(() -> new AtomicLong(0)); + public static void setScanMetricsEnabled(boolean enable) { IS_SCAN_METRICS_ENABLED.set(enable); } @@ -101,6 +107,10 @@ public static long addBlockReadOpsCount(long count) { return BLOCK_READ_OPS_COUNT.get().addAndGet(count); } + public static long addFsReadTime(long time) { + return FS_READ_TIME.get().addAndGet(time); + } + public static boolean isScanMetricsEnabled() { return IS_SCAN_METRICS_ENABLED.get(); } @@ -121,6 +131,10 @@ public static AtomicLong getBlockReadOpsCountCounter() { return BLOCK_READ_OPS_COUNT.get(); } + public static AtomicLong getFsReadTimeCounter() { + return FS_READ_TIME.get(); + } + public static long getBytesReadFromFsAndReset() { return getBytesReadFromFsCounter().getAndSet(0); } @@ -137,11 +151,16 @@ public static long getBlockReadOpsCountAndReset() { return getBlockReadOpsCountCounter().getAndSet(0); } + public static long getFsReadTimeAndReset() { + return getFsReadTimeCounter().getAndSet(0); + } + public static void reset() { getBytesReadFromFsAndReset(); getBytesReadFromBlockCacheAndReset(); getBytesReadFromMemstoreAndReset(); getBlockReadOpsCountAndReset(); + getFsReadTimeAndReset(); } public static void populateServerSideScanMetrics(ServerSideScanMetrics metrics) { @@ -156,5 +175,7 @@ public static void populateServerSideScanMetrics(ServerSideScanMetrics metrics) getBytesReadFromMemstoreCounter().get()); metrics.addToCounter(ServerSideScanMetrics.BLOCK_READ_OPS_COUNT_METRIC_NAME, getBlockReadOpsCountCounter().get()); + metrics.addToCounter(ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME, + getFsReadTimeCounter().get()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index d325c67a82af..11d5917dda65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -103,6 +103,7 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement; @@ -3519,10 +3520,6 @@ private void scan(HBaseRpcController controller, ScanRequest request, RegionScan // from block size progress before writing into the response scanMetrics.setCounter(ServerSideScanMetrics.BLOCK_BYTES_SCANNED_KEY_METRIC_NAME, scannerContext.getBlockSizeProgress()); - if (rpcCall != null) { - scanMetrics.setCounter(ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME, - rpcCall.getFsReadTime()); - } } } } finally { @@ -3589,6 +3586,11 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } throw new ServiceException(e); } + boolean trackMetrics = request.hasTrackScanMetrics() && request.getTrackScanMetrics(); + ThreadLocalServerSideScanMetrics.setScanMetricsEnabled(trackMetrics); + if (trackMetrics) { + ThreadLocalServerSideScanMetrics.reset(); + } requestCount.increment(); rpcScanRequestCount.increment(); RegionScannerContext rsx; @@ -3659,7 +3661,6 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque boolean scannerClosed = false; try { List results = new ArrayList<>(Math.min(rows, 512)); - boolean trackMetrics = request.hasTrackScanMetrics() && request.getTrackScanMetrics(); ServerSideScanMetrics scanMetrics = trackMetrics ? new ServerSideScanMetrics() : null; if (rows > 0) { boolean done = false; @@ -3741,6 +3742,7 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque scanMetrics.addToCounter(ServerSideScanMetrics.RPC_SCAN_QUEUE_WAIT_TIME_METRIC_NAME, rpcQueueWaitTime); } + ThreadLocalServerSideScanMetrics.populateServerSideScanMetrics(scanMetrics); Map metrics = scanMetrics.getMetricsMap(); ScanMetrics.Builder metricBuilder = ScanMetrics.newBuilder(); NameInt64Pair.Builder pairBuilder = NameInt64Pair.newBuilder(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java index aa2809fece6e..c69dc6e2df6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcCallback; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; @@ -96,8 +95,6 @@ public class RegionScannerImpl implements RegionScanner, Shipper, RpcCallback { private RegionServerServices rsServices; - private ServerSideScanMetrics scannerInitMetrics = null; - @Override public RegionInfo getRegionInfo() { return region.getRegionInfo(); @@ -148,16 +145,7 @@ private static boolean hasNonce(HRegion region, long nonce) { } finally { region.smallestReadPointCalcLock.unlock(ReadPointCalculationLock.LockType.RECORDING_LOCK); } - boolean isScanMetricsEnabled = scan.isScanMetricsEnabled(); - ThreadLocalServerSideScanMetrics.setScanMetricsEnabled(isScanMetricsEnabled); - if (isScanMetricsEnabled) { - this.scannerInitMetrics = new ServerSideScanMetrics(); - ThreadLocalServerSideScanMetrics.reset(); - } initializeScanners(scan, additionalScanners); - if (isScanMetricsEnabled) { - ThreadLocalServerSideScanMetrics.populateServerSideScanMetrics(scannerInitMetrics); - } } public ScannerContext getContext() { @@ -291,16 +279,6 @@ public boolean nextRaw(List outResults, ScannerContext sca throw new UnknownScannerException("Scanner was closed"); } boolean moreValues = false; - boolean isScanMetricsEnabled = scannerContext.isTrackingMetrics(); - ThreadLocalServerSideScanMetrics.setScanMetricsEnabled(isScanMetricsEnabled); - if (isScanMetricsEnabled) { - ThreadLocalServerSideScanMetrics.reset(); - ServerSideScanMetrics scanMetrics = scannerContext.getMetrics(); - if (scannerInitMetrics != null) { - scannerInitMetrics.getMetricsMap().forEach(scanMetrics::addToCounter); - scannerInitMetrics = null; - } - } if (outResults.isEmpty()) { // Usually outResults is empty. This is true when next is called // to handle scan or get operation. @@ -310,10 +288,6 @@ public boolean nextRaw(List outResults, ScannerContext sca moreValues = nextInternal(tmpList, scannerContext); outResults.addAll(tmpList); } - if (isScanMetricsEnabled) { - ServerSideScanMetrics scanMetrics = scannerContext.getMetrics(); - ThreadLocalServerSideScanMetrics.populateServerSideScanMetrics(scanMetrics); - } region.addReadRequestsCount(1); if (region.getMetrics() != null) { region.getMetrics().updateReadRequestCount(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 1cafc5bffac0..b0a2dfc1bab1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -919,16 +919,6 @@ public long getResponseExceptionSize() { @Override public void incrementResponseExceptionSize(long exceptionSize) { } - - @Override - public void updateFsReadTime(long latencyMillis) { - - } - - @Override - public long getFsReadTime() { - return 0; - } }; return rpcCall; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java index 4ec3e90aad86..39e942b04837 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestRpcLogDetails.java @@ -264,16 +264,6 @@ public long getResponseExceptionSize() { @Override public void incrementResponseExceptionSize(long exceptionSize) { } - - @Override - public void updateFsReadTime(long latencyMillis) { - - } - - @Override - public long getFsReadTime() { - return 0; - } }; return rpcCall; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index b440431f1fb0..3ba858c5e036 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -326,16 +326,6 @@ public long getResponseExceptionSize() { @Override public void incrementResponseExceptionSize(long exceptionSize) { } - - @Override - public void updateFsReadTime(long latencyMillis) { - - } - - @Override - public long getFsReadTime() { - return 0; - } }; } } From df34c6535510a4c206d633db691ea0eb33de3de9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Paksy?= Date: Tue, 7 Oct 2025 11:16:26 +0200 Subject: [PATCH 38/92] HBASE-29152 Replace site skin with Reflow2 Maven skin (#7355) - Replaced the Maven Fluido skin with the newer [Reflow2 Maven skin](https://devacfr.github.io/reflow-maven-skin/doc/reflow-documentation.html#doc-get-started) (Apache Phoenix project uses this). This brings newer Bootstrap (before we used 2.3.2, after 4.x - still not ideal because 5.x is the latest major version but it is an improvement). - The new skin also brings new more modern look. - Made sure only local resources are used by the website and the book.html - so no CDN is used - as before. We cannot load remote content as it is banned by central ASF Content Security Policy. - Fixed our site text customization was not working in project-info-reports.properties file (fixed filename, fixed keys) Signed-off-by: Istvan Toth Signed-off-by: Nick Dimiduk --- pom.xml | 68 +++--- ...erties => project-info-reports.properties} | 194 +++++++++--------- src/site/resources/css/site.css | 103 ++-------- src/site/resources/js/searchform.js | 48 +++++ src/site/site.xml | 111 +++++----- src/site/xdoc/documentation-1.4.xml | 37 ++++ src/site/xdoc/documentation-2.3.xml | 39 ++++ src/site/xdoc/documentation-2.4.xml | 39 ++++ src/site/xdoc/documentation-2.5.xml | 37 ++++ src/site/xdoc/documentation-2.6.xml | 37 ++++ src/site/xdoc/index.xml | 4 +- 11 files changed, 438 insertions(+), 279 deletions(-) rename src/site/custom/{project-info-report.properties => project-info-reports.properties} (79%) create mode 100644 src/site/resources/js/searchform.js create mode 100644 src/site/xdoc/documentation-1.4.xml create mode 100644 src/site/xdoc/documentation-2.3.xml create mode 100644 src/site/xdoc/documentation-2.4.xml create mode 100644 src/site/xdoc/documentation-2.5.xml create mode 100644 src/site/xdoc/documentation-2.6.xml diff --git a/pom.xml b/pom.xml index 18c074a7c249..4037286e4b6c 100644 --- a/pom.xml +++ b/pom.xml @@ -959,6 +959,7 @@ 1.0.1 2.44.4 3.12.0 + 2.3.3 0.27 1.11.0 @@ -2783,30 +2784,14 @@ maven-dependency-plugin - unpack-website-resources + + unpack-RefGuide-resources unpack pre-site - - org.webjars - bootstrap - 2.3.2 - jar - true - **/css/bootstrap-responsive.min.css - - - org.webjars - highlightjs - 8.7 - jar - true - **/styles/github.min.css, - **/highlight.min.js, - org.webjars font-awesome @@ -2851,6 +2836,34 @@ ${project.build.directory} + + + unpack-site-resources + + unpack + + pre-site + + + + io.github.devacfr.maven.skins + reflow-default-webdeps + ${reflow-maven-skin.version} + jar + false + **/css/bootstrap.min.css, + **/css/themes/litera/bootstrap.min.css, + **/css/fontawesome/*, + **/css/fontawesome/webfonts/*, + **/js/*.js, + **/js/languages/*, + **/js/styles/github.min.css, + **/js/styles/default.min.css + + + ${project.build.directory}/site + + org.apache.maven.wagon @@ -3017,8 +3034,8 @@ - - copy-css-js-to-site + + copy-css-js-for-RefGuide run @@ -3026,20 +3043,11 @@ - - - - - - - - - diff --git a/src/site/custom/project-info-report.properties b/src/site/custom/project-info-reports.properties similarity index 79% rename from src/site/custom/project-info-report.properties rename to src/site/custom/project-info-reports.properties index 1d49aea40f17..07659963e3cf 100644 --- a/src/site/custom/project-info-report.properties +++ b/src/site/custom/project-info-reports.properties @@ -15,6 +15,9 @@ # specific language governing permissions and limitations # under the License. +# This file is used by the Apache Maven Project Info Reports Plugin. +# Having this allows us to change the texts in the generated reports. + report.dependencies.column.artifactId = ArtifactId report.dependencies.column.classifier = Classifier report.dependencies.column.description = Description @@ -100,27 +103,28 @@ report.dependency-info.title = Dependency report.dependency-info.description = These are instructions for including Apache HBase™ as a dependency using various dependency management tools. report.index.nodescription = There is currently no description associated with Apache HBase™. report.index.title = About Apache HBase™ -report.issuetracking.bugzilla.intro = Apache HBase™ uses {Bugzilla, http://www.bugzilla.org/}. -report.issuetracking.custom.intro = Apache HBase™ uses %issueManagementSystem% to manage its issues. -report.issuetracking.description = Apache HBase™ uses the following issue management system(s). -report.issuetracking.general.intro = Apache HBase™ uses an Issue Management System to manage its issues. -report.issuetracking.intro = Issues, bugs, and feature requests should be submitted to the following issue tracking system for Apache HBase™. -report.issuetracking.jira.intro = Apache HBase™ uses {JIRA, http://www.atlassian.com/software/jira}. -report.issuetracking.name = Issue Tracking -report.issuetracking.noissueManagement = No issue management system is defined. Please check back at a later date. -report.issuetracking.overview.title = Overview -report.issuetracking.scarab.intro = Apache HBase™ uses {Scarab, http://scarab.tigris.org/}. -report.issuetracking.title = Issue Tracking -report.license.description = Apache HBase™ uses the following project license(s). -report.license.multiple = Apache HBase™ is provided under multiple licenses: -report.license.name = Apache HBase™ License -report.license.nolicense = No license is defined for Apache HBase™. -report.license.overview.intro = This is the license for the Apache HBase project itself, but not necessarily its dependencies. -report.license.overview.title = Overview -report.license.originalText = [Original text] -report.license.copy = Copy of the license follows: -report.license.title = Apache HBase™ License -report.license.unnamed = Unnamed +report.issue-management.bugzilla.intro = Apache HBase™ uses {Bugzilla, http://www.bugzilla.org/}. +report.issue-management.custom.intro = Apache HBase™ uses %issueManagementSystem% to manage its issues. +report.issue-management.description = Apache HBase™ uses the following issue management system(s). +report.issue-management.general.intro = Apache HBase™ uses an Issue Management System to manage its issues. +report.issue-management.github.intro = Apache HBase™ uses {GitHub, http://github.com/}. +report.issue-management.intro = Issues, bugs, and feature requests should be submitted to the following issue tracking system for Apache HBase™. +report.issue-management.jira.intro = Apache HBase™ uses {JIRA, http://www.atlassian.com/software/jira}. +report.issue-management.name = Issue Tracking +report.issue-management.noissueManagement = No issue management system is defined. Please check back at a later date. +report.issue-management.overview.title = Overview +report.issue-management.scarab.intro = Apache HBase™ uses {Scarab, http://scarab.tigris.org/}. +report.issue-management.title = Issue Tracking +report.licenses.description = Apache HBase™ uses the following project license(s). +report.licenses.multiple = Apache HBase™ is provided under multiple licenses: +report.licenses.name = Apache HBase™ License +report.licenses.nolicense = No license is defined for Apache HBase™. +report.licenses.overview.intro = This is the license for the Apache HBase project itself, but not necessarily its dependencies. +report.licenses.overview.title = Overview +report.licenses.originalText = [Original text] +report.licenses.copy = Copy of the license follows: +report.licenses.title = Apache HBase™ License +report.licenses.unnamed = Unnamed report.mailing-lists.column.archive = Archive report.mailing-lists.column.name = Name report.mailing-lists.column.otherArchives = Other Archives @@ -142,7 +146,7 @@ report.scm.accessthroughtproxy.svn.intro3 = Example: Ed report.scm.accessthroughtproxy.title = Access Through a Proxy report.scm.anonymousaccess.cvs.intro = Apache HBase™'s CVS repository can be checked out through anonymous CVS with the following instruction set. When prompted for a password for anonymous, simply press the Enter key. report.scm.anonymousaccess.general.intro = Refer to the documentation of the SCM used for more information about anonymously check out. The connection url is: -report.scm.anonymousaccess.git.intro = The source can be checked out anonymously from Git with this command (See {http://git-scm.com/docs/git-clone,http://git-scm.com/docs/git-clone}): +report.scm.anonymousaccess.git.intro = The source can be checked out anonymously from Git with this command (See {https://git-scm.com/docs/git-clone,https://git-scm.com/docs/git-clone}): report.scm.anonymousaccess.hg.intro = The source can be checked out anonymously from Mercurial with this command (See {http://www.selenic.com/mercurial/hg.1.html#clone,http://www.selenic.com/mercurial/hg.1.html#clone}): report.scm.anonymousaccess.svn.intro = The source can be checked out anonymously from Subversion with this command: report.scm.anonymousaccess.title = Anonymous Access @@ -152,7 +156,7 @@ report.scm.description = This docume report.scm.devaccess.clearcase.intro = Only project developers can access the ClearCase tree via this method. Substitute username with the proper value. report.scm.devaccess.cvs.intro = Only project developers can access the CVS tree via this method. Substitute username with the proper value. report.scm.devaccess.general.intro = Refer to the documentation of the SCM used for more information about developer check out. The connection url is: -report.scm.devaccess.git.intro = Only project developers can access the Git tree via this method (See {http://git-scm.com/docs/git-clone,http://git-scm.com/docs/git-clone}). +report.scm.devaccess.git.intro = Only project developers can access the Git tree via this method (See {https://git-scm.com/docs/git-clone,https://git-scm.com/docs/git-clone}). report.scm.devaccess.hg.intro = Only project developers can access the Mercurial tree via this method (See {http://www.selenic.com/mercurial/hg.1.html#clone,http://www.selenic.com/mercurial/hg.1.html#clone}). report.scm.devaccess.perforce.intro = Only project developers can access the Perforce tree via this method. Substitute username and password with the proper values. report.scm.devaccess.starteam.intro = Only project developers can access the Starteam tree via this method. Substitute username with the proper value. @@ -166,13 +170,13 @@ report.scm.general.intro = Apache HBas report.scm.name = Source Repository report.scm.noscm = No source configuration management system is defined. Please check back at a later date. report.scm.overview.title = Overview -report.scm.git.intro = Apache HBase™ uses {Git, http://git-scm.com/} to manage its source code. Instructions on Git use can be found at {http://git-scm.com/documentation,http://git-scm.com/documentation}. -report.scm.hg.intro = Apache HBase™ uses {Mercurial, http://mercurial.selenic.com/wiki/} to manage its source code. Instructions on Mercurial use can be found at {http://hgbook.red-bean.com/read/, http://hgbook.red-bean.com/read/}. +report.scm.git.intro = Apache HBase™ uses {Git, https://git-scm.com/} to manage its source code. Instructions on Git use can be found at {https://git-scm.com/documentation,https://git-scm.com/documentation}. +report.scm.hg.intro = Apache HBase™ uses {Mercurial, https://www.mercurial-scm.org/} to manage its source code. Instructions on Mercurial use can be found at {http://hgbook.red-bean.com/read/, http://hgbook.red-bean.com/read/}. report.scm.perforce.intro = Apache HBase™ uses {Perforce, http://www.perforce.com/} to manage its source code. Instructions on Perforce use can be found at {http://www.perforce.com/perforce/doc.051/manuals/cmdref/index.html, http://www.perforce.com/perforce/doc.051/manuals/cmdref/index.html}. report.scm.starteam.intro = Apache HBase™ uses {Starteam, http://www.borland.com/us/products/starteam/} to manage its source code. report.scm.svn.intro = Apache HBase™ uses {Subversion, http://subversion.apache.org/} to manage its source code. Instructions on Subversion use can be found at {http://svnbook.red-bean.com/, http://svnbook.red-bean.com/}. report.scm.title = Source Repository -report.scm.webaccess.nourl = There is no browsable version of the source repository listed for Apache HBase™. Please check back again later. +report.scm.webaccess.nourl = There is no browsable version of the source code repository listed for Apache HBase™. Please check back again later. report.scm.webaccess.title = Web Browser Access report.scm.webaccess.url = The following is a link to a browsable version of the source repository: report.summary.build.artifactid = ArtifactId @@ -195,58 +199,58 @@ report.summary.noorganization = Apache HBas report.summary.title = Project Summary report.summary.value = Value report.summary.download = Download -report.team-list.contributors.actualtime = Actual Time (GMT) -report.team-list.contributors.email = Email -report.team-list.contributors.intro = The following additional people have contributed to Apache HBase™ through the way of suggestions, patches or documentation. -report.team-list.contributors.image = Image -report.team-list.contributors.name = Name -report.team-list.contributors.organization = Organization -report.team-list.contributors.organizationurl = Organization URL -report.team-list.contributors.properties = Properties -report.team-list.contributors.roles = Roles -report.team-list.contributors.timezone = Time Zone -report.team-list.contributors.title = Contributors -report.team-list.contributors.url = URL -report.team-list.description = These are the members of the Apache HBase™ project. These are the individuals who have contributed to the project in one form or another. -report.team-list.developers.actualtime = Actual Time (GMT) -report.team-list.developers.email = Email -report.team-list.developers.image = Image -report.team-list.developers.id = Id -report.team-list.developers.intro = These are the developers with commit privileges that have directly contributed to the project in one way or another. -report.team-list.developers.name = Name -report.team-list.developers.organization = Organization -report.team-list.developers.organizationurl = Organization URL -report.team-list.developers.properties = Properties -report.team-list.developers.roles = Roles -report.team-list.developers.timezone = Time Zone -report.team-list.developers.title = Members -report.team-list.developers.url = URL -report.team-list.intro.description1 = A successful project requires many people to play many roles. Some members write code or documentation, while others are valuable as testers, submitting patches and suggestions. -report.team-list.intro.description2 = The team is comprised of Members and Contributors. Members have direct access to the source of a project and actively evolve the code-base. Contributors improve the project through submission of patches and suggestions to the Members. The number of Contributors to the project is unbounded. Get involved today. All contributions to the project are greatly appreciated. -report.team-list.intro.title = The Team -report.team-list.name = Project Team -report.team-list.nocontributor = Apache HBase™ does not maintain a list of contributors. -report.team-list.nodeveloper = Apache HBase™ does not maintain a list of developers. -report.team-list.title = Project Team -report.dependencyManagement.name = Dependency Management -report.dependencyManagement.description = This document lists the dependencies that are defined through dependencyManagement. -report.dependencyManagement.title = Project Dependency Management -report.dependencyManagement.nolist = There are no dependencies in the DependencyManagement of Apache HBase™. -report.dependencyManagement.column.groupId = GroupId -report.dependencyManagement.column.artifactId = ArtifactId -report.dependencyManagement.column.version = Version -report.dependencyManagement.column.classifier = Classifier -report.dependencyManagement.column.type = Type -report.dependencyManagement.column.license = License -report.dependencyManagement.intro.compile = The following is a list of compile dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile and run the submodule: -report.dependencyManagement.intro.provided = The following is a list of provided dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile the submodule, but should be provided by default when using the library: -report.dependencyManagement.intro.runtime = The following is a list of runtime dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to run the submodule: -report.dependencyManagement.intro.system = The following is a list of system dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile the submodule: -report.dependencyManagement.intro.test = The following is a list of test dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile and run unit tests for the submodule: -report.pluginManagement.nolist = There are no plugins defined in the PluginManagement part of Apache HBase™. -report.pluginManagement.name = Plugin Management -report.pluginManagement.description = This document lists the plugins that are defined through pluginManagement. -report.pluginManagement.title = Project Plugin Management +report.team.contributors.actualtime = Actual Time (GMT) +report.team.contributors.email = Email +report.team.contributors.intro = The following additional people have contributed to Apache HBase™ through the way of suggestions, patches or documentation. +report.team.contributors.image = Image +report.team.contributors.name = Name +report.team.contributors.organization = Organization +report.team.contributors.organizationurl = Organization URL +report.team.contributors.properties = Properties +report.team.contributors.roles = Roles +report.team.contributors.timezone = Time Zone +report.team.contributors.title = Contributors +report.team.contributors.url = URL +report.team.description = These are the members of the Apache HBase™ project. These are the individuals who have contributed to the project in one form or another. +report.team.developers.actualtime = Actual Time (GMT) +report.team.developers.email = Email +report.team.developers.image = Image +report.team.developers.id = Id +report.team.developers.intro = These are the developers with commit privileges that have directly contributed to the project in one way or another. +report.team.developers.name = Name +report.team.developers.organization = Organization +report.team.developers.organizationurl = Organization URL +report.team.developers.properties = Properties +report.team.developers.roles = Roles +report.team.developers.timezone = Time Zone +report.team.developers.title = Members +report.team.developers.url = URL +report.team.intro.description1 = A successful project requires many people to play many roles. Some members write code or documentation, while others are valuable as testers, submitting patches and suggestions. +report.team.intro.description2 = The team is comprised of Members and Contributors. Members have direct access to the source of a project and actively evolve the code-base. Contributors improve the project through submission of patches and suggestions to the Members. The number of Contributors to the project is unbounded. Get involved today. All contributions to the project are greatly appreciated. +report.team.intro.title = The Team +report.team.name = Project Team +report.team.nocontributor = Apache HBase™ does not maintain a list of contributors. +report.team.nodeveloper = Apache HBase™ does not maintain a list of developers. +report.team.title = Project Team +report.dependency-management.name = Dependency Management +report.dependency-management.description = This document lists the dependencies that are defined through dependencyManagement. +report.dependency-management.title = Project Dependency Management +report.dependency-management.nolist = There are no dependencies in the DependencyManagement of Apache HBase™. +report.dependency-management.column.groupId = GroupId +report.dependency-management.column.artifactId = ArtifactId +report.dependency-management.column.version = Version +report.dependency-management.column.classifier = Classifier +report.dependency-management.column.type = Type +report.dependency-management.column.license = License +report.dependency-management.intro.compile = The following is a list of compile dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile and run the submodule: +report.dependency-management.intro.provided = The following is a list of provided dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile the submodule, but should be provided by default when using the library: +report.dependency-management.intro.runtime = The following is a list of runtime dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to run the submodule: +report.dependency-management.intro.system = The following is a list of system dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile the submodule: +report.dependency-management.intro.test = The following is a list of test dependencies in the DependencyManagement of Apache HBase™. These dependencies can be included in the submodules to compile and run unit tests for the submodule: +report.plugin-management.nolist = There are no plugins defined in the PluginManagement part of Apache HBase™. +report.plugin-management.name = Plugin Management +report.plugin-management.description = This document lists the plugins that are defined through pluginManagement. +report.plugin-management.title = Project Plugin Management report.plugins.name = Project Plugins report.plugins.description = This document lists the build plugins and the report plugins used by Apache HBase™. report.plugins.title = Project Build Plugins @@ -260,20 +264,20 @@ report.modules.title = Project Mod report.modules.intro = Apache HBase™ has declared the following modules: report.modules.header.name = Name report.modules.header.description = Description -report.distributionManagement.name = Distribution Management -report.distributionManagement.description = This document provides informations on the distribution management of Apache HBase™. -report.distributionManagement.title = Project Distribution Management -report.distributionManagement.nodistributionmanagement = No distribution management is defined for Apache HBase™. -report.distributionManagement.overview.title = Overview -report.distributionManagement.overview.intro = The following is the distribution management information used by Apache HBase™. -report.distributionManagement.downloadURL = Download URL -report.distributionManagement.repository = Repository -report.distributionManagement.snapshotRepository = Snapshot Repository -report.distributionManagement.site = Site -report.distributionManagement.relocation = Relocation -report.distributionManagement.field = Field -report.distributionManagement.value = Value -report.distributionManagement.relocation.groupid = GroupId -report.distributionManagement.relocation.artifactid = ArtifactId -report.distributionManagement.relocation.version = Version -report.distributionManagement.relocation.message = Message +report.distribution-management.name = Distribution Management +report.distribution-management.description = This document provides informations on the distribution management of Apache HBase™. +report.distribution-management.title = Project Distribution Management +report.distribution-management.nodistributionmanagement = No distribution management is defined for Apache HBase™. +report.distribution-management.overview.title = Overview +report.distribution-management.overview.intro = The following is the distribution management information used by Apache HBase™. +report.distribution-management.downloadURL = Download URL +report.distribution-management.repository = Repository +report.distribution-management.snapshotRepository = Snapshot Repository +report.distribution-management.site = Site +report.distribution-management.relocation = Relocation +report.distribution-management.field = Field +report.distribution-management.value = Value +report.distribution-management.relocation.groupid = GroupId +report.distribution-management.relocation.artifactid = ArtifactId +report.distribution-management.relocation.version = Version +report.distribution-management.relocation.message = Message diff --git a/src/site/resources/css/site.css b/src/site/resources/css/site.css index 9426cd67a4f3..17faa5b035b6 100644 --- a/src/site/resources/css/site.css +++ b/src/site/resources/css/site.css @@ -16,99 +16,24 @@ * specific language governing permissions and limitations * under the License. */ - -/*@import(https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap.min.css); -@import(https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.css);*/ -html { - background-color: #fff; -} -body { - font-size: 16px; -} -li { - line-height: 120%; +.xtoplogo { + height: 26px; + width: 134px; + background-image: url(../images/hbase_logo.png); + background-size: 134px 26px; } -header#topbar, -div#banner, -main#bodyColumn, -footer { - width: initial; - padding-left: 20px; - padding-right: 20px; - clear: both; -} -footer { - background-color: #e5e5e5; -} -footer .row, footer p, footer .pull-right { - margin: 5px; -} -div#search-form.navbar-search.pull-right { - width: 290px; - margin-right: 0; - margin-top: -5px; - margin-left: 0; - position: initial; +/** Fix horizontal scrollbar (skin issue). */ +.main-body { + margin: 0 15px; } -.container, -.navbar-static-top .container, -.navbar-fixed-top .container, -.navbar-fixed-bottom .container, -.navbar-inner { - width: initial; -} -/* Change the color and effect when clicking in menus */ -.dropdown-menu>li>a:hover, -.dropdown-menu>li>a:focus, -.dropdown-submenu:hover>a, -.dropdown-submenu:focus>a { - background-color: #e5e5e5; - background-image: none; - color: #000; - font-weight: bolder; -} - -.dropdown-backdrop { - position: static; +/** Restore earlier code block styles. */ +pre { + border-radius: 6px; + border: 1px solid rgba(0, 0, 0, 0.15); } -@media only screen and (max-width: 979px) { - body { - padding-left: 0; - padding-right: 0; - width: initial; - margin: 0; - } - /* Without this rule, drop-down divs are a fixed height - * the first time they are expanded */ - .collapse.in { - height: auto !important; - } - div#search-form.navbar-search.pull-right { - padding: 0; - margin-left: ; - width: initial; - clear: both; - } -} - -/* Fix Google Custom Search results on very narrow screens */ -@media(max-width: 480px) { - .gsc-overflow-hidden .nav-collapse { - -webkit-transform: none; - } +footer { + margin-top: 70px; } - -/* Override weird body padding thing that causes scrolling */ -@media (max-width: 767px) { - body { - padding-right: 0; - padding-left: 0; - } - .navbar-fixed-top, .navbar-fixed-bottom, .navbar-static-top { - margin-left: 0; - margin-right: 0; - } -} \ No newline at end of file diff --git a/src/site/resources/js/searchform.js b/src/site/resources/js/searchform.js new file mode 100644 index 000000000000..11441c39b45b --- /dev/null +++ b/src/site/resources/js/searchform.js @@ -0,0 +1,48 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This script appends our Google Site search form to the navbar. +// Unfortunately our current Maven site skin does support this. + +window.onload = function() { + const div = document.getElementById('top-navbar-collapse-1'); + + const form = document.createElement("form"); + form.setAttribute('id', 'search-form'); + form.setAttribute('action', 'https://www.google.com/search'); + form.setAttribute('method', 'get'); + form.setAttribute('class', 'form-inline ml-lg-3'); + + const siteSearchInput = document.createElement('input'); + siteSearchInput.setAttribute('value', 'hbase.apache.org'); + siteSearchInput.setAttribute('name', 'sitesearch'); + siteSearchInput.setAttribute('type', 'hidden'); + form.appendChild(siteSearchInput); + + const queryInput = document.createElement('input'); + queryInput.setAttribute('name', 'q'); + queryInput.setAttribute('id', 'query'); + queryInput.setAttribute('type', 'text'); + queryInput.setAttribute('placeholder', 'Search with Google...'); + queryInput.setAttribute('class', 'form-control'); + form.appendChild(queryInput); + + div.appendChild(form); +}; diff --git a/src/site/site.xml b/src/site/site.xml index 79a758ba73cc..58a887632d0f 100644 --- a/src/site/site.xml +++ b/src/site/site.xml @@ -21,66 +21,49 @@ + xsi:schemaLocation="http://maven.apache.org/DECORATION/1.0.0 http://maven.apache.org/xsd/decoration-1.0.0.xsd" + name="Apache HBase"> + + + - - org.apache.maven.skins - maven-fluido-skin - 1.12.0 + io.github.devacfr.maven.skins + reflow-maven-skin + ${reflow-maven-skin.version} + - - true - false - - - 000385458301414556862:sq1bb0xugjg - - false - true - » - - apache/hbase - right - red - - + + true + true + + Apache HBase Project + Project Information + Documentation and API + ASF + + bootswatch-litera + true + + ]]> + index.html + + + false + true + + true + + + + + - - - - - - - - Apache HBase - images/hbase_logo_with_orca_large.png - http://hbase.apache.org/ - - - - - - - - ]]> + + ]]>

@@ -123,13 +106,13 @@ - + - + @@ -137,7 +120,7 @@ - + @@ -145,13 +128,13 @@ - + - + @@ -166,12 +149,12 @@