diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 4bc418017c0a..c550272cc3f8 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -59,8 +59,8 @@ pipeline { ASF_NIGHTLIES_BASE_ORI = "${ASF_NIGHTLIES}/hbase/${JOB_NAME}/${BUILD_NUMBER}" ASF_NIGHTLIES_BASE = "${ASF_NIGHTLIES_BASE_ORI.replaceAll(' ', '%20')}" // These are dependent on the branch - HADOOP3_VERSIONS = "3.3.5,3.3.6,3.4.0,3.4.1" - HADOOP3_DEFAULT_VERSION = "3.4.1" + HADOOP3_VERSIONS = "3.3.5,3.3.6,3.4.0,3.4.1,3.4.2" + HADOOP3_DEFAULT_VERSION = "3.4.2" } parameters { booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, description: '''Check to use the current HEAD of apache/yetus rather than our configured release. diff --git a/dev-support/create-release/release-util.sh b/dev-support/create-release/release-util.sh index fd11ae853efb..02020d93de01 100755 --- a/dev-support/create-release/release-util.sh +++ b/dev-support/create-release/release-util.sh @@ -969,7 +969,7 @@ function get_hadoop3_version() { # case spotless:check failure, so we should run spotless:apply before committing function maven_spotless_apply() { # our spotless plugin version requires at least java 11 to run, so we use java 17 here - JAVA_HOME="/usr/lib/jvm/java-17-openjdk-amd64" "${MVN[@]}" spotless:apply + JAVA_HOME="${JAVA17_HOME}" "${MVN[@]}" spotless:apply } function git_add_poms() { diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 46f08276c651..9a5d34cc2138 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -612,17 +612,17 @@ function hadoopcheck_rebuild # TODO remove this on non 2.5 branches ? yetus_info "Setting Hadoop 3 versions to test based on branch-2.5 rules" if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.2.4 3.3.6 3.4.0" + hbase_hadoop3_versions="3.2.4 3.3.6 3.4.1" else - hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6 3.4.0" + hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4 3.3.5 3.3.6 3.4.0 3.4.1" fi else yetus_info "Setting Hadoop 3 versions to test based on branch-2.6+/master/feature branch rules" # Isn't runnung these tests with the default Hadoop version redundant ? if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then - hbase_hadoop3_versions="3.3.6 3.4.0" + hbase_hadoop3_versions="3.3.6 3.4.1" else - hbase_hadoop3_versions="3.3.5 3.3.6 3.4.0" + hbase_hadoop3_versions="3.3.5 3.3.6 3.4.0 3.4.1" fi fi diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java index d9bae8490637..b0e259e1f9e2 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java @@ -36,4 +36,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface ClientTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.ClientTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java index a168adec08af..2dc143e944a0 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface CoprocessorTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.CoprocessorTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java index 84f346baaea2..1b45b583c182 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface FilterTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.FilterTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java index c23bfa298b36..0cb861979e08 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface FlakeyTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.FlakeyTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java index 8eee0e6ae4b9..be55b3829e52 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java @@ -36,4 +36,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface IOTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.IOTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java index 4e555b73fedb..0003cd1db511 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java @@ -34,4 +34,5 @@ * @see LargeTests */ public interface IntegrationTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.IntegrationTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java index b47e5bab9a46..3a24764e706a 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java @@ -33,4 +33,5 @@ * @see IntegrationTests */ public interface LargeTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.LargeTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java index 0e68ab3c0340..ac5b05e30704 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface MapReduceTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MapReduceTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java index 5dcf51b27e59..0ad843493ec1 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface MasterTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MasterTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java index d1f836ec0049..548f655c774e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java @@ -32,4 +32,5 @@ * @see IntegrationTests */ public interface MediumTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MediumTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java index 27beaacf963e..c6985d6b95cc 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java @@ -21,4 +21,5 @@ * Tag a test that covers our metrics handling. */ public interface MetricsTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MetricsTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java index 695042e801bf..b7b7ad4c3f66 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface MiscTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.MiscTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java index 929bd6487edf..71a24d5d5dd6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface RPCTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RPCTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java index 050a70762928..4d1ab88a9cf6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java @@ -21,4 +21,5 @@ * Tag the tests related to rs group feature. */ public interface RSGroupTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RSGroupTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java index 3439afa76eba..d79691d6fac6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface RegionServerTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RegionServerTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java index df606c960c25..74c65a57982d 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface ReplicationTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.ReplicationTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java index a648b4c39e03..9a73fde57e2c 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface RestTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.RestTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java index a4e55ad3aba0..939c25c05ff4 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java @@ -35,4 +35,5 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface SecurityTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.SecurityTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java index 64d2bce381b6..54e16d7ad1ae 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java @@ -30,4 +30,5 @@ * @see IntegrationTests */ public interface SmallTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.SmallTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java index d1f433b9719d..dac933ec78e4 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java @@ -36,4 +36,6 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface VerySlowMapReduceTests { + public static final String TAG = + "org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java index f556979e5b6a..1583de103e38 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java @@ -36,4 +36,6 @@ * @see org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests */ public interface VerySlowRegionServerTests { + public static final String TAG = + "org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests"; } diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java index 9fa0579ed47e..a318b388ef72 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java @@ -22,4 +22,5 @@ * {@code RecoverableZooKeeper}, not for tests which depend on ZooKeeper. */ public interface ZKTests { + public static final String TAG = "org.apache.hadoop.hbase.testclassification.ZKTests"; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java index c9a76bef2891..bbbae2d631fe 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java @@ -52,10 +52,13 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor private boolean stopped = false; private boolean aborted = false; private Connection connection; - // timestamp of most recent read from backup system table - private long prevReadFromBackupTbl = 0; - // timestamp of 2nd most recent read from backup system table - private long secondPrevReadFromBackupTbl = 0; + // timestamp of most recent completed cleaning run + private volatile long previousCleaningCompletionTimestamp = 0; + + @Override + public void postClean() { + previousCleaningCompletionTimestamp = EnvironmentEdgeManager.currentTime(); + } @Override public Iterable getDeletableFiles(Iterable files) { @@ -79,12 +82,12 @@ public Iterable getDeletableFiles(Iterable files) { return Collections.emptyList(); } - secondPrevReadFromBackupTbl = prevReadFromBackupTbl; - prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime(); + // Pin the threshold, we don't want the result to change depending on evaluation time. + final long recentFileThreshold = previousCleaningCompletionTimestamp; return Iterables.filter(files, file -> { // If the file is recent, be conservative and wait for one more scan of the bulk loads - if (file.getModificationTime() > secondPrevReadFromBackupTbl) { + if (file.getModificationTime() > recentFileThreshold) { LOG.debug("Preventing deletion due to timestamp: {}", file.getPath().toString()); return false; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index a1d057b4502d..2f9c3171346a 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -1668,9 +1668,7 @@ public static void restoreFromSnapshot(Connection conn) throws IOException { try (Admin admin = conn.getAdmin()) { String snapshotName = BackupSystemTable.getSnapshotName(conf); if (snapshotExists(admin, snapshotName)) { - admin.disableTable(BackupSystemTable.getTableName(conf)); - admin.restoreSnapshot(snapshotName); - admin.enableTable(BackupSystemTable.getTableName(conf)); + admin.restoreBackupSystemTable(snapshotName); LOG.debug("Done restoring backup system table"); } else { // Snapshot does not exists, i.e completeBackup failed after diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index 2b26aba03fe6..63e26fdc245c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupRestoreFactory; import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -204,7 +203,7 @@ private void handleContinuousBackup(Admin admin) throws IOException { private void handleNonContinuousBackup(Admin admin) throws IOException { initializeBackupStartCode(backupManager); - performLogRoll(admin); + performLogRoll(); performBackupSnapshots(admin); backupManager.addIncrementalBackupTableSet(backupInfo.getTables()); @@ -228,18 +227,14 @@ private void initializeBackupStartCode(BackupManager backupManager) throws IOExc } } - private void performLogRoll(Admin admin) throws IOException { + private void performLogRoll() throws IOException { // We roll log here before we do the snapshot. It is possible there is duplicate data // in the log that is already in the snapshot. But if we do it after the snapshot, we // could have data loss. // A better approach is to do the roll log on each RS in the same global procedure as // the snapshot. LOG.info("Execute roll log procedure for full backup ..."); - Map props = new HashMap<>(); - props.put("backupRoot", backupInfo.getBackupRootDir()); - admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); - + BackupUtils.logRoll(conn, backupInfo.getBackupRootDir(), conf); newTimestamps = backupManager.readRegionServerLastLogRollResult(); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index c92c0747e83c..20884edf836e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -29,9 +28,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -84,13 +81,8 @@ public Map getIncrBackupLogFileMap() throws IOException { } LOG.info("Execute roll log procedure for incremental backup ..."); - HashMap props = new HashMap<>(); - props.put("backupRoot", backupInfo.getBackupRootDir()); + BackupUtils.logRoll(conn, backupInfo.getBackupRootDir(), conf); - try (Admin admin = conn.getAdmin()) { - admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); - } newTimestamps = readRegionServerLastLogRollResult(); logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index 1bd3621b2945..28184656a890 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2; import org.apache.hadoop.hbase.mapreduce.WALInputFormat; import org.apache.hadoop.hbase.mapreduce.WALPlayer; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; @@ -102,7 +103,7 @@ protected List filterMissingFiles(List incrBackupFileList) throw } /** - * Check if a given path is belongs to active WAL directory + * Check if a given path belongs to active WAL directory * @param p path * @return true, if yes */ @@ -125,9 +126,9 @@ protected static int getIndex(TableName tbl, List sTableList) { /** * Reads bulk load records from backup table, iterates through the records and forms the paths for - * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination. This method does NOT - * clean up the entries in the bulk load system table. Those entries should not be cleaned until - * the backup is marked as complete. + * bulk loaded hfiles. Copies the bulk loaded hfiles to the backup destination. This method does + * NOT clean up the entries in the bulk load system table. Those entries should not be cleaned + * until the backup is marked as complete. * @param tablesToBackup list of tables to be backed up */ protected List handleBulkLoad(List tablesToBackup, @@ -231,6 +232,9 @@ private void mergeSplitAndCopyBulkloadedHFiles(List activeFiles, int numActiveFiles = activeFiles.size(); updateFileLists(activeFiles, archiveFiles); if (activeFiles.size() < numActiveFiles) { + // We've archived some files, delete bulkloads directory + // and re-try + deleteBulkLoadDirectory(); continue; } @@ -273,7 +277,7 @@ private void mergeSplitAndCopyBulkloadedHFiles(List files, TableName tn, incrementalCopyBulkloadHFiles(tgtFs, tn); } - private void updateFileLists(List activeFiles, List archiveFiles) + public void updateFileLists(List activeFiles, List archiveFiles) throws IOException { List newlyArchived = new ArrayList<>(); @@ -283,9 +287,23 @@ private void updateFileLists(List activeFiles, List archiveFiles } } - if (newlyArchived.size() > 0) { + if (!newlyArchived.isEmpty()) { + String rootDir = CommonFSUtils.getRootDir(conf).toString(); + activeFiles.removeAll(newlyArchived); - archiveFiles.addAll(newlyArchived); + for (String file : newlyArchived) { + String archivedFile = file.substring(rootDir.length() + 1); + Path archivedFilePath = new Path(HFileArchiveUtil.getArchivePath(conf), archivedFile); + archivedFile = archivedFilePath.toString(); + + if (!fs.exists(archivedFilePath)) { + throw new IOException(String.format( + "File %s no longer exists, and no archived file %s exists for it", file, archivedFile)); + } + + LOG.debug("Archived file {} has been updated", archivedFile); + archiveFiles.add(archivedFile); + } } LOG.debug(newlyArchived.size() + " files have been archived."); @@ -385,12 +403,17 @@ public void execute() throws IOException, ColumnFamilyMismatchException { failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", BackupType.INCREMENTAL, conf); throw new IOException(e); + } finally { + if (backupInfo.isContinuousBackupEnabled()) { + deleteBulkLoadDirectory(); + } } } protected void incrementalCopyHFiles(String[] files, String backupDest) throws IOException { + boolean diskBasedSortingOriginalValue = HFileOutputFormat2.diskBasedSortingEnabled(conf); try { - LOG.debug("Incremental copy HFiles is starting. dest=" + backupDest); + LOG.debug("Incremental copy HFiles is starting. dest={}", backupDest); // set overall backup phase: incremental_copy backupInfo.setPhase(BackupPhase.INCREMENTAL_COPY); // get incremental backup file list and prepare parms for DistCp @@ -403,6 +426,7 @@ protected void incrementalCopyHFiles(String[] files, String backupDest) throws I LOG.debug("Setting incremental copy HFiles job name to : " + jobname); } conf.set(JOB_NAME_CONF_KEY, jobname); + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, true); BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf); int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr); @@ -415,6 +439,8 @@ protected void incrementalCopyHFiles(String[] files, String backupDest) throws I + " finished."); } finally { deleteBulkLoadDirectory(); + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + diskBasedSortingOriginalValue); } } @@ -505,6 +531,8 @@ protected void walToHFiles(List dirPaths, List tableList, long p conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";"); conf.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true); conf.set(JOB_NAME_CONF_KEY, jobname); + boolean diskBasedSortingEnabledOriginalValue = HFileOutputFormat2.diskBasedSortingEnabled(conf); + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, true); if (backupInfo.isContinuousBackupEnabled()) { conf.set(WALInputFormat.START_TIME_KEY, Long.toString(previousBackupTs)); conf.set(WALInputFormat.END_TIME_KEY, Long.toString(backupInfo.getIncrCommittedWalTs())); @@ -517,13 +545,16 @@ protected void walToHFiles(List dirPaths, List tableList, long p if (result != 0) { throw new IOException("WAL Player failed"); } - conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); - conf.unset(JOB_NAME_CONF_KEY); } catch (IOException e) { throw e; } catch (Exception ee) { throw new IOException("Can not convert from directory " + dirs + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee); + } finally { + conf.setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + diskBasedSortingEnabledOriginalValue); + conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); + conf.unset(JOB_NAME_CONF_KEY); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index 7d9430914cb3..85df58e0946e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -34,11 +35,14 @@ import org.apache.hadoop.hbase.mapreduce.CellSortReducer; import org.apache.hadoop.hbase.mapreduce.HFileInputFormat; import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2; +import org.apache.hadoop.hbase.mapreduce.KeyOnlyCellComparable; +import org.apache.hadoop.hbase.mapreduce.PreSortedCellsReducer; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.snapshot.SnapshotRegionLocator; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; @@ -72,18 +76,28 @@ protected MapReduceHFileSplitterJob(final Configuration c) { /** * A mapper that just writes out cells. This one can be used together with {@link CellSortReducer} */ - static class HFileCellMapper extends Mapper { + static class HFileCellMapper extends Mapper, Cell> { + + private boolean diskBasedSortingEnabled = false; @Override public void map(NullWritable key, Cell value, Context context) throws IOException, InterruptedException { - context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), - new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(value))); + ExtendedCell extendedCell = PrivateCellUtil.ensureExtendedCell(value); + context.write(wrap(extendedCell), new MapReduceExtendedCell(extendedCell)); } @Override public void setup(Context context) throws IOException { - // do nothing + diskBasedSortingEnabled = + HFileOutputFormat2.diskBasedSortingEnabled(context.getConfiguration()); + } + + private WritableComparable wrap(ExtendedCell cell) { + if (diskBasedSortingEnabled) { + return new KeyOnlyCellComparable(cell); + } + return new ImmutableBytesWritable(CellUtil.cloneRow(cell)); } } @@ -107,13 +121,23 @@ public Job createSubmittableJob(String[] args) throws IOException { true); job.setJarByClass(MapReduceHFileSplitterJob.class); job.setInputFormatClass(HFileInputFormat.class); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); + boolean diskBasedSortingEnabled = HFileOutputFormat2.diskBasedSortingEnabled(conf); + if (diskBasedSortingEnabled) { + job.setMapOutputKeyClass(KeyOnlyCellComparable.class); + job.setSortComparatorClass(KeyOnlyCellComparable.KeyOnlyCellComparator.class); + } else { + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + } if (hfileOutPath != null) { LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs); TableName tableName = TableName.valueOf(tabName); job.setMapperClass(HFileCellMapper.class); - job.setReducerClass(CellSortReducer.class); + if (diskBasedSortingEnabled) { + job.setReducerClass(PreSortedCellsReducer.class); + } else { + job.setReducerClass(CellSortReducer.class); + } Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputValueClass(MapReduceExtendedCell.class); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index 28bbfcf254ae..a1ec67345b5e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -29,6 +29,7 @@ import java.net.URLDecoder; import java.text.ParseException; import java.text.SimpleDateFormat; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -62,6 +63,7 @@ import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; @@ -84,6 +86,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.hbase.thirdparty.com.google.common.base.Strings; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; @@ -790,6 +793,55 @@ public static String findMostRecentBackupId(String[] backupIds) { return BackupRestoreConstants.BACKUPID_PREFIX + recentTimestamp; } + /** + * roll WAL writer for all region servers and record the newest log roll result + */ + public static void logRoll(Connection conn, String backupRootDir, Configuration conf) + throws IOException { + boolean legacy = conf.getBoolean("hbase.backup.logroll.legacy.used", false); + if (legacy) { + logRollV1(conn, backupRootDir); + } else { + logRollV2(conn, backupRootDir); + } + } + + private static void logRollV1(Connection conn, String backupRootDir) throws IOException { + try (Admin admin = conn.getAdmin()) { + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, + ImmutableMap.of("backupRoot", backupRootDir)); + } + } + + private static void logRollV2(Connection conn, String backupRootDir) throws IOException { + BackupSystemTable backupSystemTable = new BackupSystemTable(conn); + HashMap lastLogRollResult = + backupSystemTable.readRegionServerLastLogRollResult(backupRootDir); + try (Admin admin = conn.getAdmin()) { + Map newLogRollResult = admin.rollAllWALWriters(); + + for (Map.Entry entry : newLogRollResult.entrySet()) { + ServerName serverName = entry.getKey(); + long newHighestWALFilenum = entry.getValue(); + + String address = serverName.getAddress().toString(); + Long lastHighestWALFilenum = lastLogRollResult.get(address); + if (lastHighestWALFilenum != null && lastHighestWALFilenum > newHighestWALFilenum) { + LOG.warn("Won't update last roll log result for server {}: current = {}, new = {}", + serverName, lastHighestWALFilenum, newHighestWALFilenum); + } else { + backupSystemTable.writeRegionServerLastLogRollResult(address, newHighestWALFilenum, + backupRootDir); + if (LOG.isDebugEnabled()) { + LOG.debug("updated last roll log result for {} from {} to {}", serverName, + lastHighestWALFilenum, newHighestWALFilenum); + } + } + } + } + } + /** * Calculates the replication checkpoint timestamp used for continuous backup. *

@@ -996,6 +1048,7 @@ public static List getValidWalDirs(Configuration conf, Path walBackupDir List validDirs = new ArrayList<>(); SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT); + dateFormat.setTimeZone(TimeZone.getTimeZone(ZoneOffset.UTC)); for (FileStatus dayDir : dayDirs) { if (!dayDir.isDirectory()) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index e32e1b8f920a..aeabc8698cd8 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager; import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -248,10 +247,7 @@ public void execute() throws IOException { // the snapshot. LOG.info("Execute roll log procedure for full backup ..."); - Map props = new HashMap<>(); - props.put("backupRoot", backupInfo.getBackupRootDir()); - admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); + BackupUtils.logRoll(conn, backupInfo.getBackupRootDir(), conf); failStageIf(Stage.stage_2); newTimestamps = backupManager.readRegionServerLastLogRollResult(); @@ -331,6 +327,8 @@ public static void setUpHelper() throws Exception { // Set MultiWAL (with 2 default WAL files per RS) conf1.set(WALFactory.WAL_PROVIDER, provider); TEST_UTIL.startMiniCluster(); + conf1 = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniMapReduceCluster(); if (useSecondCluster) { conf2 = HBaseConfiguration.create(conf1); @@ -343,9 +341,7 @@ public static void setUpHelper() throws Exception { CommonFSUtils.setWALRootDir(TEST_UTIL2.getConfiguration(), p); TEST_UTIL2.startMiniCluster(); } - conf1 = TEST_UTIL.getConfiguration(); - TEST_UTIL.startMiniMapReduceCluster(); BACKUP_ROOT_DIR = new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR) .toString(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java index cfceada51a02..ef72b994c773 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -46,7 +46,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ MasterTests.class, SmallTests.class }) +@Category({ MasterTests.class, MediumTests.class }) public class TestBackupHFileCleaner { @ClassRule @@ -108,11 +108,11 @@ protected Set fetchFullyBackedUpTables(BackupSystemTable tbl) { Iterable deletable; // The first call will not allow any deletions because of the timestamp mechanism. - deletable = cleaner.getDeletableFiles(List.of(file1, file1Archived, file2, file3)); + deletable = callCleaner(cleaner, List.of(file1, file1Archived, file2, file3)); assertEquals(Set.of(), Sets.newHashSet(deletable)); // No bulk loads registered, so all files can be deleted. - deletable = cleaner.getDeletableFiles(List.of(file1, file1Archived, file2, file3)); + deletable = callCleaner(cleaner, List.of(file1, file1Archived, file2, file3)); assertEquals(Set.of(file1, file1Archived, file2, file3), Sets.newHashSet(deletable)); // Register some bulk loads. @@ -125,10 +125,17 @@ protected Set fetchFullyBackedUpTables(BackupSystemTable tbl) { } // File 1 can no longer be deleted, because it is registered as a bulk load. - deletable = cleaner.getDeletableFiles(List.of(file1, file1Archived, file2, file3)); + deletable = callCleaner(cleaner, List.of(file1, file1Archived, file2, file3)); assertEquals(Set.of(file2, file3), Sets.newHashSet(deletable)); } + private Iterable callCleaner(BackupHFileCleaner cleaner, Iterable files) { + cleaner.preClean(); + Iterable deletable = cleaner.getDeletableFiles(files); + cleaner.postClean(); + return deletable; + } + private FileStatus createFile(String fileName) throws IOException { Path file = new Path(root, fileName); fs.createNewFile(file); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 38204f68e31a..b91976325447 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -23,6 +23,7 @@ import java.io.File; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupUtils; @@ -70,17 +71,17 @@ public void TestIncBackupMergeRestore() throws Exception { // #2 - insert some data to table1 Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); - LOG.debug("writing " + ADD_ROWS + " rows to " + table1); + LOG.debug("writing {} rows to {}", ADD_ROWS, table1); - Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS); + Assert.assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS); t1.close(); - LOG.debug("written " + ADD_ROWS + " rows to " + table1); + LOG.debug("written {} rows to {}", ADD_ROWS, table1); Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS); - Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS); + Assert.assertEquals(HBaseTestingUtil.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS); t2.close(); - LOG.debug("written " + ADD_ROWS + " rows to " + table2); + LOG.debug("written {} rows to {}", ADD_ROWS, table2); // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); @@ -112,15 +113,15 @@ public void TestIncBackupMergeRestore() throws Exception { tablesRestoreIncMultiple, tablesMapIncMultiple, true)); Table hTable = conn.getTable(table1_restore); - LOG.debug("After incremental restore: " + hTable.getDescriptor()); - int countRows = TEST_UTIL.countRows(hTable, famName); - LOG.debug("f1 has " + countRows + " rows"); + LOG.debug("After incremental restore: {}", hTable.getDescriptor()); + int countRows = HBaseTestingUtil.countRows(hTable, famName); + LOG.debug("f1 has {} rows", countRows); Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows); hTable.close(); hTable = conn.getTable(table2_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS); + Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS); hTable.close(); admin.close(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java index 83cc19578ade..5add9412014f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java @@ -22,14 +22,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.security.UserGroupInformation; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category(SmallTests.class) +@Category(MediumTests.class) public class TestBackupSmallTests extends TestBackupBase { @ClassRule diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java index 3fc2c31a9d51..bd5cb8358d3d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java @@ -17,8 +17,17 @@ */ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.security.PrivilegedAction; +import java.time.Instant; +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.List; +import java.util.TimeZone; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -34,6 +43,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -50,6 +60,15 @@ public class TestBackupUtils { protected static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); protected static Configuration conf = TEST_UTIL.getConfiguration(); + private static FileSystem dummyFs; + private static Path backupRootDir; + + @BeforeClass + public static void setUp() throws IOException { + dummyFs = TEST_UTIL.getTestFileSystem(); + backupRootDir = TEST_UTIL.getDataTestDirOnTestFS("backupUT"); + } + @Test public void testGetBulkOutputDir() { // Create a user who is not the current user @@ -82,7 +101,7 @@ public Path run() { } }); // Make sure the directory is in foo1234's home directory - Assert.assertTrue(bulkOutputDir.toString().startsWith(fooHomeDirectory.toString())); + assertTrue(bulkOutputDir.toString().startsWith(fooHomeDirectory.toString())); } @Test @@ -99,7 +118,7 @@ public void testFilesystemWalHostNameParsing() throws IOException { Path testOldWalPath = new Path(oldLogDir, serverName + BackupUtils.LOGNAME_SEPARATOR + EnvironmentEdgeManager.currentTime()); - Assert.assertEquals(host + Addressing.HOSTNAME_PORT_SEPARATOR + port, + assertEquals(host + Addressing.HOSTNAME_PORT_SEPARATOR + port, BackupUtils.parseHostFromOldLog(testOldWalPath)); Path testMasterWalPath = @@ -110,9 +129,99 @@ public void testFilesystemWalHostNameParsing() throws IOException { Path testOldWalWithRegionGroupingPath = new Path(oldLogDir, serverName + BackupUtils.LOGNAME_SEPARATOR + serverName + BackupUtils.LOGNAME_SEPARATOR + "regiongroup-0" + BackupUtils.LOGNAME_SEPARATOR + EnvironmentEdgeManager.currentTime()); - Assert.assertEquals(host + Addressing.HOSTNAME_PORT_SEPARATOR + port, + assertEquals(host + Addressing.HOSTNAME_PORT_SEPARATOR + port, BackupUtils.parseHostFromOldLog(testOldWalWithRegionGroupingPath)); } + } + + // Ensure getValidWalDirs() uses UTC timestamps regardless of what time zone the test is run in. + @Test + public void testGetValidWalDirForAllTimeZonesSingleDay() throws IOException { + // This UTC test time is a time when it is still "yesterday" in other time zones (such as PST) + List walDateDirs = List.of("2026-01-23"); + Path walDir = new Path(backupRootDir, "WALs"); + + // 10-minute window in UTC between start and end time + long startTime = Instant.parse("2026-01-23T01:00:00Z").toEpochMilli(); + long endTime = startTime + (10 * 60 * 1000); + + testGetValidWalDirs(startTime, endTime, walDir, walDateDirs, 1, walDateDirs); + } + + // Ensure getValidWalDirs() works as expected for time ranges across multiple days for all time + // zones + @Test + public void testGetValidWalDirsForAllTimeZonesMultiDay() throws IOException { + List walDateDirs = List.of("2025-12-30", "2025-12-31", "2026-01-01", "2026-01-02"); + List expectedValidWalDirs = List.of("2025-12-31", "2026-01-01"); + Path walDir = new Path(backupRootDir, "WALs"); + + // 10-minute window in UTC between start and end time that spans over two days + long startTime = Instant.parse("2025-12-31T23:55:00Z").toEpochMilli(); + long endTime = Instant.parse("2026-01-01T00:05:00Z").toEpochMilli(); + + testGetValidWalDirs(startTime, endTime, walDir, walDateDirs, 2, expectedValidWalDirs); + } + + @Test + public void testGetValidWalDirExactlyMidnightUTC() throws IOException { + List walDateDirs = List.of("2026-01-23"); + Path walDir = new Path(backupRootDir, "WALs"); + // This instant is UTC + long startAndEndTime = Instant.parse("2026-01-23T00:00:00.000Z").toEpochMilli(); + testGetValidWalDirs(startAndEndTime, startAndEndTime, walDir, walDateDirs, 1, walDateDirs); + } + + @Test + public void testGetValidWalDirOneMsBeforeMidnightUTC() throws IOException { + List walDateDirs = List.of("2026-01-23"); + Path walDir = new Path(backupRootDir, "WALs"); + // This instant is UTC + long startAndEndTime = Instant.parse("2026-01-23T23:59:59.999Z").toEpochMilli(); + + testGetValidWalDirs(startAndEndTime, startAndEndTime, walDir, walDateDirs, 1, walDateDirs); + } + + protected void testGetValidWalDirs(long startTime, long endTime, Path walDir, + List availableWalDateDirs, int numExpectedValidWalDirs, + List expectedValidWalDirs) throws IOException { + TimeZone defaultTimeZone = TimeZone.getDefault(); + try { + // This UTC test time is a time when it is still "yesterday" in other time zones (such as PST) + for (String dirName : availableWalDateDirs) { + dummyFs.mkdirs(new Path(walDir, dirName)); + } + + // Ensure we can get valid WAL dirs regardless of the test environment's time zone + for (String timeZone : ZoneId.getAvailableZoneIds()) { + // Force test environment to use specified time zone + TimeZone.setDefault(TimeZone.getTimeZone(timeZone)); + + List validWalDirs = BackupUtils.getValidWalDirs(TEST_UTIL.getConfiguration(), + backupRootDir, startTime, endTime); + + // Verify the correct number of valid WAL dirs was found + assertEquals("The number of valid WAL dirs should be " + numExpectedValidWalDirs + + " for time zone " + timeZone, numExpectedValidWalDirs, validWalDirs.size()); + + // Verify the list of valid WAL dirs is as expected + for (String dirName : expectedValidWalDirs) { + assertTrue("Expected " + dirName + " to be a valid WAL dir", + validWalDirs.stream().anyMatch(path -> path.endsWith("/" + dirName))); + } + + // Verify the list of valid WAL dirs does not contain anything expected to be invalid + List expectedInvalidWalDirs = new ArrayList<>(availableWalDateDirs); + expectedInvalidWalDirs.removeAll(expectedValidWalDirs); + for (String dirName : expectedInvalidWalDirs) { + assertFalse("Expected " + dirName + " to NOT be a valid WAL dir", + validWalDirs.contains(dirName)); + } + } + } finally { + TimeZone.setDefault(defaultTimeZone); + dummyFs.delete(walDir, true); + } } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index 412fd5e32f7e..73c26dce7353 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -20,9 +20,11 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.FileSystem; @@ -38,6 +40,8 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.ClassRule; import org.junit.Test; @@ -147,6 +151,98 @@ private boolean containsRowWithKey(Table table, String rowKey) throws IOExceptio return result.containsColumn(famName, qualName); } + @Test + public void testUpdateFileListsRaceCondition() throws Exception { + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Test the race condition where files are archived during incremental backup + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + String regionName = "region1"; + String columnFamily = "cf"; + String filename1 = "hfile1"; + String filename2 = "hfile2"; + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + Path tableDir = CommonFSUtils.getTableDir(rootDir, table1); + Path activeFile1 = + new Path(tableDir, regionName + Path.SEPARATOR + columnFamily + Path.SEPARATOR + filename1); + Path activeFile2 = + new Path(tableDir, regionName + Path.SEPARATOR + columnFamily + Path.SEPARATOR + filename2); + + fs.mkdirs(activeFile1.getParent()); + fs.create(activeFile1).close(); + fs.create(activeFile2).close(); + + List activeFiles = new ArrayList<>(); + activeFiles.add(activeFile1.toString()); + activeFiles.add(activeFile2.toString()); + List archiveFiles = new ArrayList<>(); + + Path archiveDir = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), table1, + regionName, columnFamily); + Path archivedFile1 = new Path(archiveDir, filename1); + fs.mkdirs(archiveDir); + assertTrue("File should be moved to archive", fs.rename(activeFile1, archivedFile1)); + + TestBackupBase.IncrementalTableBackupClientForTest client = + new TestBackupBase.IncrementalTableBackupClientForTest(TEST_UTIL.getConnection(), + "test_backup_id", + createBackupRequest(BackupType.INCREMENTAL, List.of(table1), BACKUP_ROOT_DIR)); + + client.updateFileLists(activeFiles, archiveFiles); + + assertEquals("Only one file should remain in active files", 1, activeFiles.size()); + assertEquals("File2 should still be in active files", activeFile2.toString(), + activeFiles.get(0)); + assertEquals("One file should be added to archive files", 1, archiveFiles.size()); + assertEquals("Archived file should have correct path", archivedFile1.toString(), + archiveFiles.get(0)); + systemTable.finishBackupExclusiveOperation(); + } + + } + + @Test + public void testUpdateFileListsMissingArchivedFile() throws Exception { + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Test that IOException is thrown when file doesn't exist in archive location + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + String regionName = "region2"; + String columnFamily = "cf"; + String filename = "missing_file"; + + Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration()); + Path tableDir = CommonFSUtils.getTableDir(rootDir, table1); + Path activeFile = + new Path(tableDir, regionName + Path.SEPARATOR + columnFamily + Path.SEPARATOR + filename); + + fs.mkdirs(activeFile.getParent()); + fs.create(activeFile).close(); + + List activeFiles = new ArrayList<>(); + activeFiles.add(activeFile.toString()); + List archiveFiles = new ArrayList<>(); + + // Delete the file but don't create it in archive location + fs.delete(activeFile, false); + + TestBackupBase.IncrementalTableBackupClientForTest client = + new TestBackupBase.IncrementalTableBackupClientForTest(TEST_UTIL.getConnection(), + "test_backup_id", + createBackupRequest(BackupType.INCREMENTAL, List.of(table1), BACKUP_ROOT_DIR)); + + // This should throw IOException since file doesn't exist in archive + try { + client.updateFileLists(activeFiles, archiveFiles); + fail("Expected IOException to be thrown"); + } catch (IOException e) { + // Expected + } + systemTable.finishBackupExclusiveOperation(); + } + } + private void performBulkLoad(String keyPrefix) throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path baseDirectory = TEST_UTIL.getDataTestDirOnTestFS(TEST_NAME); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java index 72867da95f17..c911f5dbce07 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java @@ -70,19 +70,21 @@ public class TestIncrementalBackupWithContinuous extends TestBackupBase { private static final int ROWS_IN_BULK_LOAD = 100; private static final String backupWalDirName = "TestContinuousBackupWalDir"; + private FileSystem fs; + @Before public void beforeTest() throws IOException { Path root = TEST_UTIL.getDataTestDirOnTestFS(); Path backupWalDir = new Path(root, backupWalDirName); conf1.set(CONF_CONTINUOUS_BACKUP_WAL_DIR, backupWalDir.toString()); conf1.setBoolean(REPLICATION_MARKER_ENABLED_KEY, true); + fs = FileSystem.get(conf1); } @After public void afterTest() throws IOException { Path root = TEST_UTIL.getDataTestDirOnTestFS(); Path backupWalDir = new Path(root, backupWalDirName); - FileSystem fs = FileSystem.get(conf1); if (fs.exists(backupWalDir)) { fs.delete(backupWalDir, true); } @@ -126,6 +128,12 @@ public void testContinuousBackupWithIncrementalBackupSuccess() throws Exception assertTrue(checkSucceeded(backup2)); LOG.info("Incremental backup completed"); + // Verify the temporary backup directory was deleted + Path backupTmpDir = new Path(BACKUP_ROOT_DIR, ".tmp"); + Path bulkLoadOutputDir = new Path(backupTmpDir, backup2); + assertFalse("Bulk load output directory " + bulkLoadOutputDir + " should have been deleted", + fs.exists(bulkLoadOutputDir)); + // Verify backup history increased and all the backups are succeeded backups = table.getBackupHistory(); assertEquals("Backup history should increase", before + 1, backups.size()); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java new file mode 100644 index 000000000000..31ded67b4774 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestRestoreBackupSystemTable.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.master; + +import static org.junit.Assert.assertEquals; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRestoreBackupSystemTable { + private static final String BACKUP_ROOT = "root"; + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + @BeforeClass + public static void setUp() throws Exception { + UTIL.startMiniCluster(); + } + + @Test + public void itRestoresFromSnapshot() throws Exception { + BackupSystemTable table = new BackupSystemTable(UTIL.getConnection()); + Set tables = new HashSet<>(); + + tables.add(TableName.valueOf("test1")); + tables.add(TableName.valueOf("test2")); + tables.add(TableName.valueOf("test3")); + + Map rsTimestampMap = new HashMap<>(); + rsTimestampMap.put("rs1:100", 100L); + rsTimestampMap.put("rs2:100", 101L); + rsTimestampMap.put("rs3:100", 103L); + + table.writeRegionServerLogTimestamp(tables, rsTimestampMap, BACKUP_ROOT); + BackupSystemTable.snapshot(UTIL.getConnection()); + + Admin admin = UTIL.getAdmin(); + TableName backupSystemTn = BackupSystemTable.getTableName(UTIL.getConfiguration()); + admin.disableTable(backupSystemTn); + admin.truncateTable(backupSystemTn, true); + + BackupSystemTable.restoreFromSnapshot(UTIL.getConnection()); + Map> results = table.readLogTimestampMap(BACKUP_ROOT); + + assertEquals(results.size(), tables.size()); + + for (TableName tableName : tables) { + Map resultMap = results.get(tableName); + assertEquals(resultMap, rsTimestampMap); + } + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 75dd2ef07b38..1c08ec3b26fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1404,6 +1404,16 @@ Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) */ void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException; + /** + * Roll log writer for all RegionServers. Note that unlike + * {@link Admin#rollWALWriter(ServerName)}, this method is synchronous, which means it will block + * until all RegionServers have completed the log roll, or a RegionServer fails due to an + * exception that retry will not work. + * @return server and the highest wal filenum of server before performing log roll + * @throws IOException if a remote or network exception occurs + */ + Map rollAllWALWriters() throws IOException; + /** * Helper that delegates to getClusterMetrics().getMasterCoprocessorNames(). * @return an array of master coprocessors @@ -2651,4 +2661,7 @@ List getLogEntries(Set serverNames, String logType, Server * Get the list of cached files */ List getCachedFilesList(ServerName serverName) throws IOException; + + @InterfaceAudience.Private + void restoreBackupSystemTable(String snapshotName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index c13dfc33e3d2..e6bf6c3d28e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -635,6 +635,11 @@ public void rollWALWriter(ServerName serverName) throws IOException, FailedLogCl get(admin.rollWALWriter(serverName)); } + @Override + public Map rollAllWALWriters() throws IOException { + return get(admin.rollAllWALWriters()); + } + @Override public CompactionState getCompactionState(TableName tableName) throws IOException { return get(admin.getCompactionState(tableName)); @@ -1136,4 +1141,9 @@ public void flushMasterStore() throws IOException { public List getCachedFilesList(ServerName serverName) throws IOException { return get(admin.getCachedFilesList(serverName)); } + + @Override + public void restoreBackupSystemTable(String snapshotName) throws IOException { + get(admin.restoreBackupSystemTable(snapshotName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 331aa4a254af..ec0556f20ac1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1270,6 +1270,15 @@ default CompletableFuture getMasterInfoPort() { */ CompletableFuture rollWALWriter(ServerName serverName); + /** + * Roll log writer for all RegionServers. Note that unlike + * {@link Admin#rollWALWriter(ServerName)}, this method is synchronous, which means it will block + * until all RegionServers have completed the log roll, or a RegionServer fails due to an + * exception that retry will not work. + * @return server and the highest wal filenum of server before performing log roll + */ + CompletableFuture> rollAllWALWriters(); + /** * Clear compacting queues on a region server. * @param serverName The servername of the region server. @@ -1862,4 +1871,7 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Get the list of cached files */ CompletableFuture> getCachedFilesList(ServerName serverName); + + @InterfaceAudience.Private + CompletableFuture restoreBackupSystemTable(String snapshotName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 69f353600036..b1fb2be13547 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -691,6 +691,11 @@ public CompletableFuture rollWALWriter(ServerName serverName) { return wrap(rawAdmin.rollWALWriter(serverName)); } + @Override + public CompletableFuture> rollAllWALWriters() { + return wrap(rawAdmin.rollAllWALWriters()); + } + @Override public CompletableFuture clearCompactionQueues(ServerName serverName, Set queues) { return wrap(rawAdmin.clearCompactionQueues(serverName, queues)); @@ -1005,4 +1010,9 @@ public CompletableFuture flushMasterStore() { public CompletableFuture> getCachedFilesList(ServerName serverName) { return wrap(rawAdmin.getCachedFilesList(serverName)); } + + @Override + public CompletableFuture restoreBackupSystemTable(String snapshotName) { + return wrap(rawAdmin.restoreBackupSystemTable(snapshotName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 7cb0e4689510..710c8c430386 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -105,6 +105,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.Message; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; @@ -149,6 +150,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LastHighestWalFilenum; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; @@ -263,6 +265,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; @@ -497,28 +501,70 @@ public void run(PRESP resp) { return future; } + /** + * short-circuit call for + * {@link RawAsyncHBaseAdmin#procedureCall(Object, MasterRpcCall, Converter, Converter, ProcedureBiConsumer)} + * by ignoring procedure result + */ private CompletableFuture procedureCall(PREQ preq, MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { + ProcedureBiConsumer consumer) { + return procedureCall(preq, rpcCall, respConverter, result -> null, consumer); + } + + /** + * short-circuit call for procedureCall(Consumer, Object, MasterRpcCall, Converter, Converter, + * ProcedureBiConsumer) by skip setting priority for request + */ + private CompletableFuture procedureCall(PREQ preq, + MasterRpcCall rpcCall, Converter respConverter, + Converter resultConverter, ProcedureBiConsumer consumer) { return procedureCall(b -> { - }, preq, rpcCall, respConverter, consumer); + }, preq, rpcCall, respConverter, resultConverter, consumer); } + /** + * short-circuit call for procedureCall(TableName, Object, MasterRpcCall, Converter, Converter, + * ProcedureBiConsumer) by ignoring procedure result + */ private CompletableFuture procedureCall(TableName tableName, PREQ preq, MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { - return procedureCall(b -> b.priority(tableName), preq, rpcCall, respConverter, consumer); + ProcedureBiConsumer consumer) { + return procedureCall(tableName, preq, rpcCall, respConverter, result -> null, consumer); } - private CompletableFuture procedureCall( + /** + * short-circuit call for procedureCall(Consumer, Object, MasterRpcCall, Converter, Converter, + * ProcedureBiConsumer) by skip setting priority for request + */ + private CompletableFuture procedureCall(TableName tableName, PREQ preq, + MasterRpcCall rpcCall, Converter respConverter, + Converter resultConverter, ProcedureBiConsumer consumer) { + return procedureCall(b -> b.priority(tableName), preq, rpcCall, respConverter, resultConverter, + consumer); + } + + /** + * @param type of request + * @param type of response + * @param type of procedure call result + * @param prioritySetter prioritySetter set priority by table for request + * @param preq procedure call request + * @param rpcCall procedure rpc call + * @param respConverter extract proc id from procedure call response + * @param resultConverter extract result from procedure call result + * @param consumer action performs on result + * @return procedure call result, null if procedure is void + */ + private CompletableFuture procedureCall( Consumer> prioritySetter, PREQ preq, MasterRpcCall rpcCall, Converter respConverter, - ProcedureBiConsumer consumer) { - MasterRequestCallerBuilder builder = this. newMasterCaller().action((controller, - stub) -> this. call(controller, stub, preq, rpcCall, respConverter)); + Converter resultConverter, ProcedureBiConsumer consumer) { + MasterRequestCallerBuilder builder = this. newMasterCaller() + .action((controller, stub) -> this.call(controller, stub, preq, rpcCall, respConverter)); prioritySetter.accept(builder); CompletableFuture procFuture = builder.call(); - CompletableFuture future = waitProcedureResult(procFuture); + CompletableFuture future = waitProcedureResult(procFuture, resultConverter); addListener(future, consumer); return future; } @@ -1935,7 +1981,7 @@ public CompletableFuture appendReplicationPeerTableCFs(String id, return failedFuture(new ReplicationException("tableCfs is null")); } - CompletableFuture future = new CompletableFuture(); + CompletableFuture future = new CompletableFuture<>(); addListener(getReplicationPeerConfig(id), (peerConfig, error) -> { if (!completeExceptionally(future, error)) { ReplicationPeerConfig newPeerConfig = @@ -1957,7 +2003,7 @@ public CompletableFuture removeReplicationPeerTableCFs(String id, return failedFuture(new ReplicationException("tableCfs is null")); } - CompletableFuture future = new CompletableFuture(); + CompletableFuture future = new CompletableFuture<>(); addListener(getReplicationPeerConfig(id), (peerConfig, error) -> { if (!completeExceptionally(future, error)) { ReplicationPeerConfig newPeerConfig = null; @@ -2056,7 +2102,7 @@ public CompletableFuture snapshot(SnapshotDescription snapshotDesc) { private void waitSnapshotFinish(SnapshotDescription snapshot, CompletableFuture future, SnapshotResponse resp) { if (resp.hasProcId()) { - getProcedureResult(resp.getProcId(), future, 0); + getProcedureResult(resp.getProcId(), src -> null, future, 0); addListener(future, new SnapshotProcedureBiConsumer(snapshot.getTableName())); } else { long expectedTimeout = resp.getExpectedTimeout(); @@ -2272,7 +2318,7 @@ private CompletableFuture internalRestoreSnapshot(String snapshotName, Tab .action((controller, stub) -> this. call(controller, stub, builder.build(), (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) - .call()); + .call(), result -> null); } @Override @@ -2684,14 +2730,14 @@ private void verifySplitKeys(byte[][] splitKeys) { } } - private static abstract class ProcedureBiConsumer implements BiConsumer { + private static abstract class ProcedureBiConsumer implements BiConsumer { abstract void onFinished(); abstract void onError(Throwable error); @Override - public void accept(Void v, Throwable error) { + public void accept(T value, Throwable error) { if (error != null) { onError(error); return; @@ -2700,7 +2746,7 @@ public void accept(Void v, Throwable error) { } } - private static abstract class TableProcedureBiConsumer extends ProcedureBiConsumer { + private static abstract class TableProcedureBiConsumer extends ProcedureBiConsumer { protected final TableName tableName; TableProcedureBiConsumer(TableName tableName) { @@ -2725,7 +2771,7 @@ void onError(Throwable error) { } } - private static abstract class NamespaceProcedureBiConsumer extends ProcedureBiConsumer { + private static abstract class NamespaceProcedureBiConsumer extends ProcedureBiConsumer { protected final String namespaceName; NamespaceProcedureBiConsumer(String namespaceName) { @@ -2740,12 +2786,25 @@ String getDescription() { @Override void onFinished() { - LOG.info(getDescription() + " completed"); + LOG.info("{} completed", getDescription()); } @Override void onError(Throwable error) { - LOG.info(getDescription() + " failed with " + error.getMessage()); + LOG.info("{} failed with {}", getDescription(), error.getMessage()); + } + } + + private static class RestoreBackupSystemTableProcedureBiConsumer extends ProcedureBiConsumer { + + @Override + void onFinished() { + LOG.info("RestoreBackupSystemTableProcedure completed"); + } + + @Override + void onError(Throwable error) { + LOG.info("RestoreBackupSystemTableProcedure failed with {}", error.getMessage()); } } @@ -2984,7 +3043,7 @@ String getOperationType() { } } - private static class ReplicationProcedureBiConsumer extends ProcedureBiConsumer { + private static class ReplicationProcedureBiConsumer extends ProcedureBiConsumer { private final String peerId; private final Supplier getOperation; @@ -2999,28 +3058,44 @@ String getDescription() { @Override void onFinished() { - LOG.info(getDescription() + " completed"); + LOG.info("{} completed", getDescription()); } @Override void onError(Throwable error) { - LOG.info(getDescription() + " failed with " + error.getMessage()); + LOG.info("{} failed with {}", getDescription(), error.getMessage()); } } - private CompletableFuture waitProcedureResult(CompletableFuture procFuture) { - CompletableFuture future = new CompletableFuture<>(); + private static final class RollAllWALWritersBiConsumer + extends ProcedureBiConsumer> { + + @Override + void onFinished() { + LOG.info("Rolling all WAL writers completed"); + } + + @Override + void onError(Throwable error) { + LOG.warn("Rolling all WAL writers failed with {}", error.getMessage()); + } + } + + private CompletableFuture waitProcedureResult(CompletableFuture procFuture, + Converter converter) { + CompletableFuture future = new CompletableFuture<>(); addListener(procFuture, (procId, error) -> { if (error != null) { future.completeExceptionally(error); return; } - getProcedureResult(procId, future, 0); + getProcedureResult(procId, converter, future, 0); }); return future; } - private void getProcedureResult(long procId, CompletableFuture future, int retries) { + private void getProcedureResult(long procId, Converter converter, + CompletableFuture future, int retries) { addListener( this. newMasterCaller() .action((controller, stub) -> this. call(controller, stub, if (error != null) { LOG.warn("failed to get the procedure result procId={}", procId, ConnectionUtils.translateException(error)); - retryTimer.newTimeout(t -> getProcedureResult(procId, future, retries + 1), + retryTimer.newTimeout(t -> getProcedureResult(procId, converter, future, retries + 1), ConnectionUtils.getPauseTime(pauseNs, retries), TimeUnit.NANOSECONDS); return; } if (response.getState() == GetProcedureResultResponse.State.RUNNING) { - retryTimer.newTimeout(t -> getProcedureResult(procId, future, retries + 1), + retryTimer.newTimeout(t -> getProcedureResult(procId, converter, future, retries + 1), ConnectionUtils.getPauseTime(pauseNs, retries), TimeUnit.NANOSECONDS); return; } @@ -3045,7 +3120,11 @@ GetProcedureResultResponse> call(controller, stub, IOException ioe = ForeignExceptionUtil.toIOException(response.getException()); future.completeExceptionally(ioe); } else { - future.complete(null); + try { + future.complete(converter.convert(response.getResult())); + } catch (IOException e) { + future.completeExceptionally(e); + } } }); } @@ -3188,6 +3267,20 @@ Void> adminCall(controller, stub, RequestConverter.buildRollWALWriterRequest(), .serverName(serverName).call(); } + @Override + public CompletableFuture> rollAllWALWriters() { + return this + .> procedureCall( + RequestConverter.buildRollAllWALWritersRequest(ng.getNonceGroup(), ng.newNonce()), + (s, c, req, done) -> s.rollAllWALWriters(c, req, done), resp -> resp.getProcId(), + result -> LastHighestWalFilenum.parseFrom(result.toByteArray()).getFileNumMap() + .entrySet().stream().collect(Collectors + .toUnmodifiableMap(e -> ServerName.valueOf(e.getKey()), Map.Entry::getValue)), + new RollAllWALWritersBiConsumer()); + } + @Override public CompletableFuture clearCompactionQueues(ServerName serverName, Set queues) { return this. newAdminCaller() @@ -4557,4 +4650,16 @@ List> adminCall(controller, stub, request.build(), resp -> resp.getCachedFilesList())) .serverName(serverName).call(); } + + @Override + public CompletableFuture restoreBackupSystemTable(String snapshotName) { + MasterProtos.RestoreBackupSystemTableRequest request = + MasterProtos.RestoreBackupSystemTableRequest.newBuilder().setSnapshotName(snapshotName) + .build(); + return this. procedureCall(request, + MasterService.Interface::restoreBackupSystemTable, + MasterProtos.RestoreBackupSystemTableResponse::getProcId, + new RestoreBackupSystemTableProcedureBiConsumer()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 1afb15c0ac61..4bdf5e5af049 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -206,37 +206,6 @@ private static Quotas getQuotas(final Connection connection, final byte[] rowKey return quotasFromData(result.getValue(QUOTA_FAMILY_INFO, qualifier)); } - public static Get makeGetForTableQuotas(final TableName table) { - Get get = new Get(getTableRowKey(table)); - get.addFamily(QUOTA_FAMILY_INFO); - return get; - } - - public static Get makeGetForNamespaceQuotas(final String namespace) { - Get get = new Get(getNamespaceRowKey(namespace)); - get.addFamily(QUOTA_FAMILY_INFO); - return get; - } - - public static Get makeGetForRegionServerQuotas(final String regionServer) { - Get get = new Get(getRegionServerRowKey(regionServer)); - get.addFamily(QUOTA_FAMILY_INFO); - return get; - } - - public static Get makeGetForUserQuotas(final String user, final Iterable tables, - final Iterable namespaces) { - Get get = new Get(getUserRowKey(user)); - get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - for (final TableName table : tables) { - get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserTable(table)); - } - for (final String ns : namespaces) { - get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserNamespace(ns)); - } - return get; - } - public static Scan makeScan(final QuotaFilter filter) { Scan scan = new Scan(); scan.addFamily(QUOTA_FAMILY_INFO); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/RpcThrottlingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/RpcThrottlingException.java index d4ab38f5bf73..b08179a27a58 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/RpcThrottlingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/RpcThrottlingException.java @@ -205,4 +205,15 @@ protected static long timeFromString(String timeDiff) { } return -1; } + + /** + * There is little value in an RpcThrottlingException having a stack trace, since its cause is + * well understood without one. When a RegionServer is under heavy load and needs to serve many + * RpcThrottlingExceptions, skipping fillInStackTrace() will save CPU time and allocations, both + * here and later when the exception must be serialized over the wire. + */ + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 3bbfac500ce5..37fdb1ba6fe7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -139,6 +139,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; @@ -860,6 +861,11 @@ public static RollWALWriterRequest buildRollWALWriterRequest() { return RollWALWriterRequest.getDefaultInstance(); } + public static RollAllWALWritersRequest buildRollAllWALWritersRequest(long nonceGroup, + long nonce) { + return RollAllWALWritersRequest.newBuilder().setNonceGroup(nonceGroup).setNonce(nonce).build(); + } + /** * Create a new GetServerInfoRequest * @return a GetServerInfoRequest diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java index 5b1fb86a351a..260c0064f840 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java @@ -28,8 +28,8 @@ import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; -import org.apache.hadoop.hbase.Version; import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -39,7 +39,7 @@ private TraceUtil() { } public static Tracer getGlobalTracer() { - return GlobalOpenTelemetry.getTracer("org.apache.hbase", Version.version); + return GlobalOpenTelemetry.getTracer("org.apache.hbase", VersionInfo.getVersion()); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index fe6f3bc238a9..f004686a4b32 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -187,11 +187,39 @@ public static int getDefaultBufferSize(final FileSystem fs) { */ public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, boolean overwrite) throws IOException { + return create(fs, path, perm, overwrite, true); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *

    + *
  1. apply the umask in the configuration (if it is enabled)
  2. + *
  3. use the fs configured buffer size (or 4096 if not set)
  4. + *
  5. use the default replication
  6. + *
  7. use the default block size
  8. + *
  9. not track progress
  10. + *
+ * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param perm intial permissions + * @param overwrite Whether or not the created file should be overwritten. + * @param isRecursiveCreate recursively create parent directories + * @return output stream to the created file + * @throws IOException if the file cannot be created + */ + public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, + boolean overwrite, boolean isRecursiveCreate) throws IOException { if (LOG.isTraceEnabled()) { - LOG.trace("Creating file={} with permission={}, overwrite={}", path, perm, overwrite); + LOG.trace("Creating file={} with permission={}, overwrite={}, recursive={}", path, perm, + overwrite, isRecursiveCreate); + } + if (isRecursiveCreate) { + return fs.create(path, perm, overwrite, getDefaultBufferSize(fs), + getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null); + } else { + return fs.createNonRecursive(path, perm, overwrite, getDefaultBufferSize(fs), + getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null); } - return fs.create(path, perm, overwrite, getDefaultBufferSize(fs), - getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java index 4f8a7320fb40..37292d5feefc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java @@ -65,7 +65,7 @@ public static void addListener(CompletableFuture future, try { // See this post on stack overflow(shorten since the url is too long), // https://s.apache.org/completionexception - // For a chain of CompleableFuture, only the first child CompletableFuture can get the + // For a chain of CompletableFuture, only the first child CompletableFuture can get the // original exception, others will get a CompletionException, which wraps the original // exception. So here we unwrap it before passing it to the callback action. action.accept(resp, unwrapCompletionException(error)); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java index ba60edb06a08..dfb74e66a9ec 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java @@ -40,12 +40,15 @@ public class VersionInfo { // higher than any numbers in the version. private static final int VERY_LARGE_NUMBER = 100000; + // Copying into a non-final member so that it can be changed by reflection for testing + private static String version = Version.version; + /** * Get the hbase version. * @return the hbase version string, eg. "0.6.3-dev" */ public static String getVersion() { - return Version.version; + return version; } /** diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java index 1bc648aeb0b5..dc51187e3cf8 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java @@ -19,9 +19,11 @@ import java.lang.reflect.Method; import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.List; import java.util.regex.Pattern; -import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; import org.junit.runners.Suite; /** @@ -46,6 +48,16 @@ public static Class[] getCategoryAnnotations(Class c) { return new Class[0]; } + public static String[] getTagAnnotations(Class c) { + // TODO handle optional Tags annotation + Tag[] tags = c.getAnnotationsByType(Tag.class); + List values = new ArrayList<>(); + for (Tag tag : tags) { + values.add(tag.value()); + } + return values.toArray(new String[values.size()]); + } + /** Filters both test classes and anything in the hadoop-compat modules */ public static class TestFileNameFilter implements FileNameFilter, ResourcePathFilter { private static final Pattern hadoopCompactRe = Pattern.compile("hbase-hadoop\\d?-compat"); @@ -92,7 +104,10 @@ private boolean isTestClass(Class c) { } for (Method met : c.getMethods()) { - if (met.getAnnotation(Test.class) != null) { + if ( + met.getAnnotation(org.junit.Test.class) != null + || met.getAnnotation(org.junit.jupiter.api.Test.class) != null + ) { return true; } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupiterExtension.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupiterExtension.java new file mode 100644 index 000000000000..9d4ea87e0ec1 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseJupiterExtension.java @@ -0,0 +1,246 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.jupiter.api.Assertions.fail; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.time.Duration; +import java.time.Instant; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.yetus.audience.InterfaceAudience; +import org.junit.jupiter.api.extension.AfterAllCallback; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.api.extension.ExtensionContext.Store; +import org.junit.jupiter.api.extension.InvocationInterceptor; +import org.junit.jupiter.api.extension.ReflectiveInvocationContext; +import org.junit.platform.commons.JUnitException; +import org.junit.platform.commons.util.ExceptionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Class test rule implementation for JUnit5. + *

+ * It ensures that all JUnit5 tests should have at least one of {@link SmallTests}, + * {@link MediumTests}, {@link LargeTests}, {@link IntegrationTests} tags, and set timeout based on + * the tag. + *

+ * It also controls the timeout for the whole test class running, while the timeout annotation in + * JUnit5 can only enforce the timeout for each test method. When a test is timed out, a thread dump + * will be printed to log output. + *

+ * It also implements resource check for each test method, using the {@link ResourceChecker} class. + *

+ * Finally, it also forbid System.exit call in tests.
+ * TODO: need to find a new way as SecurityManager was deprecated in Java 17 and permanently + * disabled since Java 24. + */ +@InterfaceAudience.Private +public class HBaseJupiterExtension implements InvocationInterceptor, BeforeAllCallback, + AfterAllCallback, BeforeEachCallback, AfterEachCallback { + + private static final Logger LOG = LoggerFactory.getLogger(HBaseJupiterExtension.class); + + private static final SecurityManager securityManager = new TestSecurityManager(); + + private static final ExtensionContext.Namespace NAMESPACE = + ExtensionContext.Namespace.create(HBaseJupiterExtension.class); + + private static final Map TAG_TO_TIMEOUT = + ImmutableMap.of(SmallTests.TAG, Duration.ofMinutes(3), MediumTests.TAG, Duration.ofMinutes(6), + LargeTests.TAG, Duration.ofMinutes(13), IntegrationTests.TAG, Duration.ZERO); + + private static final String EXECUTOR = "executor"; + + private static final String DEADLINE = "deadline"; + + private static final String RESOURCE_CHECK = "rc"; + + private Duration pickTimeout(ExtensionContext ctx) { + Set timeoutTags = TAG_TO_TIMEOUT.keySet(); + Set timeoutTag = Sets.intersection(timeoutTags, ctx.getTags()); + if (timeoutTag.isEmpty()) { + fail("Test class " + ctx.getDisplayName() + " does not have any of the following scale tags " + + timeoutTags); + } + if (timeoutTag.size() > 1) { + fail("Test class " + ctx.getDisplayName() + " has multiple scale tags " + timeoutTag); + } + return TAG_TO_TIMEOUT.get(Iterables.getOnlyElement(timeoutTag)); + } + + @Override + public void beforeAll(ExtensionContext ctx) throws Exception { + // TODO: remove this usage + System.setSecurityManager(securityManager); + Duration timeout = pickTimeout(ctx); + if (timeout.isZero() || timeout.isNegative()) { + LOG.info("No timeout for {}", ctx.getDisplayName()); + // zero means no timeout + return; + } + Instant deadline = Instant.now().plus(timeout); + LOG.info("Timeout for {} is {}, it should be finished before {}", ctx.getDisplayName(), timeout, + deadline); + ExecutorService executor = + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("HBase-Test-" + ctx.getDisplayName() + "-Main-Thread").build()); + Store store = ctx.getStore(NAMESPACE); + store.put(EXECUTOR, executor); + store.put(DEADLINE, deadline); + } + + @Override + public void afterAll(ExtensionContext ctx) throws Exception { + Store store = ctx.getStore(NAMESPACE); + ExecutorService executor = store.remove(EXECUTOR, ExecutorService.class); + if (executor != null) { + executor.shutdownNow(); + } + store.remove(DEADLINE); + // reset secutiry manager + System.setSecurityManager(null); + } + + private T runWithTimeout(Invocation invocation, ExtensionContext ctx, String name) + throws Throwable { + Store store = ctx.getStore(NAMESPACE); + ExecutorService executor = store.get(EXECUTOR, ExecutorService.class); + if (executor == null) { + return invocation.proceed(); + } + Instant deadline = store.get(DEADLINE, Instant.class); + Instant now = Instant.now(); + if (!now.isBefore(deadline)) { + fail("Test " + name + " timed out, deadline is " + deadline); + return null; + } + + Duration remaining = Duration.between(now, deadline); + LOG.info("remaining timeout for {} is {}", name, remaining); + Future future = executor.submit(() -> { + try { + return invocation.proceed(); + } catch (Throwable t) { + // follow the same pattern with junit5 + throw ExceptionUtils.throwAsUncheckedException(t); + } + }); + try { + return future.get(remaining.toNanos(), TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + fail("Test " + name + " interrupted"); + return null; + } catch (ExecutionException e) { + throw ExceptionUtils.throwAsUncheckedException(e.getCause()); + } catch (TimeoutException e) { + printThreadDump(); + throw new JUnitException("Test " + name + " timed out, deadline is " + deadline, e); + } + } + + private void printThreadDump() { + LOG.info("====> TEST TIMED OUT. PRINTING THREAD DUMP. <===="); + LOG.info(TimedOutTestsListener.buildThreadDiagnosticString()); + } + + @Override + public void interceptBeforeAllMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext, extensionContext.getDisplayName() + ".beforeAll"); + } + + @Override + public void interceptBeforeEachMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext, extensionContext.getDisplayName() + ".beforeEach"); + } + + @Override + public void interceptTestMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext, extensionContext.getDisplayName()); + } + + @Override + public void interceptAfterEachMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext, extensionContext.getDisplayName() + ".afterEach"); + } + + @Override + public void interceptAfterAllMethod(Invocation invocation, + ReflectiveInvocationContext invocationContext, ExtensionContext extensionContext) + throws Throwable { + runWithTimeout(invocation, extensionContext, extensionContext.getDisplayName() + ".afterAll"); + } + + @Override + public T interceptTestClassConstructor(Invocation invocation, + ReflectiveInvocationContext> invocationContext, + ExtensionContext extensionContext) throws Throwable { + return runWithTimeout(invocation, extensionContext, + extensionContext.getDisplayName() + ".constructor"); + } + + // below are for implementing resource checker around test method + + @Override + public void beforeEach(ExtensionContext ctx) throws Exception { + ResourceChecker rc = new ResourceChecker(ctx.getDisplayName()); + JUnitResourceCheckers.addResourceAnalyzer(rc); + Store store = ctx.getStore(NAMESPACE); + store.put(RESOURCE_CHECK, rc); + rc.start(); + } + + @Override + public void afterEach(ExtensionContext ctx) throws Exception { + Store store = ctx.getStore(NAMESPACE); + ResourceChecker rc = store.remove(RESOURCE_CHECK, ResourceChecker.class); + if (rc != null) { + rc.end(); + } + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/JUnitResourceCheckers.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/JUnitResourceCheckers.java new file mode 100644 index 000000000000..aee49dc2c60f --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/JUnitResourceCheckers.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.hadoop.hbase.ResourceChecker.Phase; +import org.apache.hadoop.hbase.util.JVM; + +/** + * ResourceCheckers when running JUnit tests. + */ +public final class JUnitResourceCheckers { + + private JUnitResourceCheckers() { + } + + private static class ThreadResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { + private Set initialThreadNames = new HashSet<>(); + private List stringsToLog = null; + + @Override + public int getVal(Phase phase) { + Map stackTraces = Thread.getAllStackTraces(); + if (phase == Phase.INITIAL) { + stringsToLog = null; + for (Thread t : stackTraces.keySet()) { + initialThreadNames.add(t.getName()); + } + } else if (phase == Phase.END) { + if (stackTraces.size() > initialThreadNames.size()) { + stringsToLog = new ArrayList<>(); + for (Thread t : stackTraces.keySet()) { + if (!initialThreadNames.contains(t.getName())) { + stringsToLog.add("\nPotentially hanging thread: " + t.getName() + "\n"); + StackTraceElement[] stackElements = stackTraces.get(t); + for (StackTraceElement ele : stackElements) { + stringsToLog.add("\t" + ele + "\n"); + } + } + } + } + } + return stackTraces.size(); + } + + @Override + public int getMax() { + return 500; + } + + @Override + public List getStringsToLog() { + return stringsToLog; + } + } + + private static class OpenFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { + @Override + public int getVal(Phase phase) { + if (!JVM.isUnix()) { + return 0; + } + JVM jvm = new JVM(); + return (int) jvm.getOpenFileDescriptorCount(); + } + + @Override + public int getMax() { + return 1024; + } + } + + private static class MaxFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { + @Override + public int getVal(Phase phase) { + if (!JVM.isUnix()) { + return 0; + } + JVM jvm = new JVM(); + return (int) jvm.getMaxFileDescriptorCount(); + } + } + + private static class SystemLoadAverageResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { + @Override + public int getVal(Phase phase) { + if (!JVM.isUnix()) { + return 0; + } + return (int) (new JVM().getSystemLoadAverage() * 100); + } + } + + private static class ProcessCountResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { + @Override + public int getVal(Phase phase) { + if (!JVM.isUnix()) { + return 0; + } + return new JVM().getNumberOfRunningProcess(); + } + } + + private static class AvailableMemoryMBResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { + @Override + public int getVal(Phase phase) { + if (!JVM.isUnix()) { + return 0; + } + return (int) (new JVM().getFreeMemory() / (1024L * 1024L)); + } + } + + public static void addResourceAnalyzer(ResourceChecker rc) { + rc.addResourceAnalyzer(new ThreadResourceAnalyzer()); + rc.addResourceAnalyzer(new OpenFileDescriptorResourceAnalyzer()); + rc.addResourceAnalyzer(new MaxFileDescriptorResourceAnalyzer()); + rc.addResourceAnalyzer(new SystemLoadAverageResourceAnalyzer()); + rc.addResourceAnalyzer(new ProcessCountResourceAnalyzer()); + rc.addResourceAnalyzer(new AvailableMemoryMBResourceAnalyzer()); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java index 4dfce7f536b5..2a796cc40774 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java @@ -17,14 +17,8 @@ */ package org.apache.hadoop.hbase; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.hbase.ResourceChecker.Phase; -import org.apache.hadoop.hbase.util.JVM; import org.junit.runner.notification.RunListener; /** @@ -38,104 +32,8 @@ * When surefire forkMode=once/always/perthread, this code is executed on the forked process. */ public class ResourceCheckerJUnitListener extends RunListener { - private Map rcs = new ConcurrentHashMap<>(); - static class ThreadResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - private static Set initialThreadNames = new HashSet<>(); - private static List stringsToLog = null; - - @Override - public int getVal(Phase phase) { - Map stackTraces = Thread.getAllStackTraces(); - if (phase == Phase.INITIAL) { - stringsToLog = null; - for (Thread t : stackTraces.keySet()) { - initialThreadNames.add(t.getName()); - } - } else if (phase == Phase.END) { - if (stackTraces.size() > initialThreadNames.size()) { - stringsToLog = new ArrayList<>(); - for (Thread t : stackTraces.keySet()) { - if (!initialThreadNames.contains(t.getName())) { - stringsToLog.add("\nPotentially hanging thread: " + t.getName() + "\n"); - StackTraceElement[] stackElements = stackTraces.get(t); - for (StackTraceElement ele : stackElements) { - stringsToLog.add("\t" + ele + "\n"); - } - } - } - } - } - return stackTraces.size(); - } - - @Override - public int getMax() { - return 500; - } - - @Override - public List getStringsToLog() { - return stringsToLog; - } - } - - static class OpenFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - @Override - public int getVal(Phase phase) { - if (!JVM.isUnix()) { - return 0; - } - JVM jvm = new JVM(); - return (int) jvm.getOpenFileDescriptorCount(); - } - - @Override - public int getMax() { - return 1024; - } - } - - static class MaxFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - @Override - public int getVal(Phase phase) { - if (!JVM.isUnix()) { - return 0; - } - JVM jvm = new JVM(); - return (int) jvm.getMaxFileDescriptorCount(); - } - } - - static class SystemLoadAverageResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - @Override - public int getVal(Phase phase) { - if (!JVM.isUnix()) { - return 0; - } - return (int) (new JVM().getSystemLoadAverage() * 100); - } - } - - static class ProcessCountResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - @Override - public int getVal(Phase phase) { - if (!JVM.isUnix()) { - return 0; - } - return new JVM().getNumberOfRunningProcess(); - } - } - - static class AvailableMemoryMBResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - @Override - public int getVal(Phase phase) { - if (!JVM.isUnix()) { - return 0; - } - return (int) (new JVM().getFreeMemory() / (1024L * 1024L)); - } - } + private final Map rcs = new ConcurrentHashMap<>(); /** * To be implemented by sub classes if they want to add specific ResourceAnalyzer. @@ -145,17 +43,9 @@ protected void addResourceAnalyzer(ResourceChecker rc) { private void start(String testName) { ResourceChecker rc = new ResourceChecker(testName); - rc.addResourceAnalyzer(new ThreadResourceAnalyzer()); - rc.addResourceAnalyzer(new OpenFileDescriptorResourceAnalyzer()); - rc.addResourceAnalyzer(new MaxFileDescriptorResourceAnalyzer()); - rc.addResourceAnalyzer(new SystemLoadAverageResourceAnalyzer()); - rc.addResourceAnalyzer(new ProcessCountResourceAnalyzer()); - rc.addResourceAnalyzer(new AvailableMemoryMBResourceAnalyzer()); - + JUnitResourceCheckers.addResourceAnalyzer(rc); addResourceAnalyzer(rc); - rcs.put(testName, rc); - rc.start(); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestBuildThreadDiagnosticString.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestBuildThreadDiagnosticString.java new file mode 100644 index 000000000000..4071f18e2dd6 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestBuildThreadDiagnosticString.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; + +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestBuildThreadDiagnosticString { + + @Test + public void test() { + String threadDump = TimedOutTestsListener.buildThreadDiagnosticString(); + System.out.println(threadDump); + assertThat(threadDump, + containsString(getClass().getName() + ".test(" + getClass().getSimpleName() + ".java:")); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java new file mode 100644 index 000000000000..3e30b388ab2e --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestJUnit5TagConstants.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.lang.reflect.Field; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +/** + * Verify that the values are all correct. + */ +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestJUnit5TagConstants { + + @Test + public void testVerify() throws Exception { + ClassFinder finder = new ClassFinder(getClass().getClassLoader()); + for (Class annoClazz : finder.findClasses(ClientTests.class.getPackage().getName(), false)) { + Field field = annoClazz.getField("TAG"); + assertEquals(annoClazz.getName(), field.get(null)); + } + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java index 00860d0dde58..253d17359778 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java @@ -26,9 +26,9 @@ import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.nio.charset.StandardCharsets; -import java.text.DateFormat; -import java.text.SimpleDateFormat; -import java.util.Date; +import java.time.Instant; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; import java.util.Locale; import java.util.Map; import org.junit.runner.notification.Failure; @@ -40,7 +40,10 @@ */ public class TimedOutTestsListener extends RunListener { - static final String TEST_TIMED_OUT_PREFIX = "test timed out after"; + private static final String TEST_TIMED_OUT_PREFIX = "test timed out after"; + + private static final DateTimeFormatter TIMESTAMP_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss,SSS Z").withZone(ZoneId.systemDefault()); private static String INDENT = " "; @@ -67,13 +70,11 @@ public void testFailure(Failure failure) throws Exception { output.flush(); } - @SuppressWarnings("JavaUtilDate") public static String buildThreadDiagnosticString() { StringWriter sw = new StringWriter(); PrintWriter output = new PrintWriter(sw); - DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS"); - output.println(String.format("Timestamp: %s", dateFormat.format(new Date()))); + output.println(String.format("Timestamp: %s", TIMESTAMP_FORMATTER.format(Instant.now()))); output.println(); output.println(buildThreadDump()); @@ -87,7 +88,7 @@ public static String buildThreadDiagnosticString() { return sw.toString(); } - static String buildThreadDump() { + private static String buildThreadDump() { StringBuilder dump = new StringBuilder(); Map stackTraces = Thread.getAllStackTraces(); for (Map.Entry e : stackTraces.entrySet()) { @@ -109,7 +110,7 @@ static String buildThreadDump() { return dump.toString(); } - static String buildDeadlockInfo() { + private static String buildDeadlockInfo() { ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); long[] threadIds = threadBean.findMonitorDeadlockedThreads(); if (threadIds != null && threadIds.length > 0) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ByteBufferUtilsTestBase.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ByteBufferUtilsTestBase.java new file mode 100644 index 000000000000..194915475775 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ByteBufferUtilsTestBase.java @@ -0,0 +1,600 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.io.WritableUtils; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ByteBufferUtilsTestBase { + + private static final Logger LOG = LoggerFactory.getLogger(ByteBufferUtilsTestBase.class); + + private static int MAX_VLONG_LENGTH = 9; + private static Collection testNumbers; + + private byte[] array; + + @BeforeAll + public static void setUpBeforeAll() { + SortedSet a = new TreeSet<>(); + for (int i = 0; i <= 63; ++i) { + long v = -1L << i; + assertTrue(v < 0); + addNumber(a, v); + v = (1L << i) - 1; + assertTrue(v >= 0); + addNumber(a, v); + } + + testNumbers = Collections.unmodifiableSet(a); + LOG.info("Testing variable-length long serialization using: {} (count: {})", testNumbers, + testNumbers.size()); + assertEquals(1753, testNumbers.size()); + assertEquals(Long.MIN_VALUE, a.first().longValue()); + assertEquals(Long.MAX_VALUE, a.last().longValue()); + } + + /** + * Create an array with sample data. + */ + @BeforeEach + public void setUp() { + array = new byte[8]; + for (int i = 0; i < array.length; ++i) { + array[i] = (byte) ('a' + i); + } + } + + private static void addNumber(Set a, long l) { + if (l != Long.MIN_VALUE) { + a.add(l - 1); + } + a.add(l); + if (l != Long.MAX_VALUE) { + a.add(l + 1); + } + for (long divisor = 3; divisor <= 10; ++divisor) { + for (long delta = -1; delta <= 1; ++delta) { + a.add(l / divisor + delta); + } + } + } + + @Test + public void testReadWriteVLong() { + for (long l : testNumbers) { + ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); + ByteBufferUtils.writeVLong(b, l); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(b)); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + } + } + + @Test + public void testReadWriteConsecutiveVLong() { + for (long l : testNumbers) { + ByteBuffer b = ByteBuffer.allocate(2 * MAX_VLONG_LENGTH); + ByteBufferUtils.writeVLong(b, l); + ByteBufferUtils.writeVLong(b, l - 4); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(b)); + assertEquals(l - 4, ByteBufferUtils.readVLong(b)); + b.flip(); + assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + assertEquals(l - 4, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); + } + } + + @Test + public void testConsistencyWithHadoopVLong() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dos = new DataOutputStream(baos); + for (long l : testNumbers) { + baos.reset(); + ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); + ByteBufferUtils.writeVLong(b, l); + String bufStr = Bytes.toStringBinary(b.array(), b.arrayOffset(), b.position()); + WritableUtils.writeVLong(dos, l); + String baosStr = Bytes.toStringBinary(baos.toByteArray()); + assertEquals(baosStr, bufStr); + } + } + + /** + * Test copying to stream from buffer. + */ + @Test + public void testMoveBufferToStream() throws IOException { + final int arrayOffset = 7; + final int initialPosition = 10; + final int endPadding = 5; + byte[] arrayWrapper = new byte[arrayOffset + initialPosition + array.length + endPadding]; + System.arraycopy(array, 0, arrayWrapper, arrayOffset + initialPosition, array.length); + ByteBuffer buffer = + ByteBuffer.wrap(arrayWrapper, arrayOffset, initialPosition + array.length).slice(); + assertEquals(initialPosition + array.length, buffer.limit()); + assertEquals(0, buffer.position()); + buffer.position(initialPosition); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ByteBufferUtils.moveBufferToStream(bos, buffer, array.length); + assertArrayEquals(array, bos.toByteArray()); + assertEquals(initialPosition + array.length, buffer.position()); + } + + /** + * Test copying to stream from buffer with offset. + * @throws IOException On test failure. + */ + @Test + public void testCopyToStreamWithOffset() throws IOException { + ByteBuffer buffer = ByteBuffer.wrap(array); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + + ByteBufferUtils.copyBufferToStream(bos, buffer, array.length / 2, array.length / 2); + + byte[] returnedArray = bos.toByteArray(); + for (int i = 0; i < array.length / 2; ++i) { + int pos = array.length / 2 + i; + assertEquals(returnedArray[i], array[pos]); + } + } + + /** + * Test copying data from stream. + * @throws IOException On test failure. + */ + @Test + public void testCopyFromStream() throws IOException { + ByteBuffer buffer = ByteBuffer.allocate(array.length); + ByteArrayInputStream bis = new ByteArrayInputStream(array); + DataInputStream dis = new DataInputStream(bis); + + ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length / 2); + ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length - array.length / 2); + for (int i = 0; i < array.length; ++i) { + assertEquals(array[i], buffer.get(i)); + } + } + + /** + * Test copying from buffer. + */ + @Test + public void testCopyFromBuffer() { + ByteBuffer srcBuffer = ByteBuffer.allocate(array.length); + ByteBuffer dstBuffer = ByteBuffer.allocate(array.length); + srcBuffer.put(array); + + ByteBufferUtils.copyFromBufferToBuffer(srcBuffer, dstBuffer, array.length / 2, + array.length / 4); + for (int i = 0; i < array.length / 4; ++i) { + assertEquals(srcBuffer.get(i + array.length / 2), dstBuffer.get(i)); + } + } + + /** + * Test 7-bit encoding of integers. + * @throws IOException On test failure. + */ + @Test + public void testCompressedInt() throws IOException { + testCompressedInt(0); + testCompressedInt(Integer.MAX_VALUE); + testCompressedInt(Integer.MIN_VALUE); + + for (int i = 0; i < 3; i++) { + testCompressedInt((128 << i) - 1); + } + + for (int i = 0; i < 3; i++) { + testCompressedInt((128 << i)); + } + } + + /** + * Test how much bytes we need to store integer. + */ + @Test + public void testIntFitsIn() { + assertEquals(1, ByteBufferUtils.intFitsIn(0)); + assertEquals(1, ByteBufferUtils.intFitsIn(1)); + assertEquals(2, ByteBufferUtils.intFitsIn(1 << 8)); + assertEquals(3, ByteBufferUtils.intFitsIn(1 << 16)); + assertEquals(4, ByteBufferUtils.intFitsIn(-1)); + assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MAX_VALUE)); + assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MIN_VALUE)); + } + + /** + * Test how much bytes we need to store long. + */ + @Test + public void testLongFitsIn() { + assertEquals(1, ByteBufferUtils.longFitsIn(0)); + assertEquals(1, ByteBufferUtils.longFitsIn(1)); + assertEquals(3, ByteBufferUtils.longFitsIn(1L << 16)); + assertEquals(5, ByteBufferUtils.longFitsIn(1L << 32)); + assertEquals(8, ByteBufferUtils.longFitsIn(-1)); + assertEquals(8, ByteBufferUtils.longFitsIn(Long.MIN_VALUE)); + assertEquals(8, ByteBufferUtils.longFitsIn(Long.MAX_VALUE)); + } + + /** + * Test if we are comparing equal bytes. + */ + @Test + public void testArePartEqual() { + byte[] array = new byte[] { 1, 2, 3, 4, 5, 1, 2, 3, 4 }; + ByteBuffer buffer = ByteBuffer.wrap(array); + assertTrue(ByteBufferUtils.arePartsEqual(buffer, 0, 4, 5, 4)); + assertTrue(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 2)); + assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 3)); + assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 3, 6, 2)); + assertFalse(ByteBufferUtils.arePartsEqual(buffer, 0, 3, 6, 3)); + } + + /** + * Test serializing int to bytes + */ + @Test + public void testPutInt() { + testPutInt(0); + testPutInt(Integer.MAX_VALUE); + + for (int i = 0; i < 3; i++) { + testPutInt((128 << i) - 1); + } + + for (int i = 0; i < 3; i++) { + testPutInt((128 << i)); + } + } + + @Test + public void testToBytes() { + ByteBuffer buffer = ByteBuffer.allocate(5); + buffer.put(new byte[] { 0, 1, 2, 3, 4 }); + assertEquals(5, buffer.position()); + assertEquals(5, buffer.limit()); + byte[] copy = ByteBufferUtils.toBytes(buffer, 2); + assertArrayEquals(new byte[] { 2, 3, 4 }, copy); + assertEquals(5, buffer.position()); + assertEquals(5, buffer.limit()); + } + + @Test + public void testToPrimitiveTypes() { + ByteBuffer buffer = ByteBuffer.allocate(15); + long l = 988L; + int i = 135; + short s = 7; + buffer.putLong(l); + buffer.putShort(s); + buffer.putInt(i); + assertEquals(l, ByteBufferUtils.toLong(buffer, 0)); + assertEquals(s, ByteBufferUtils.toShort(buffer, 8)); + assertEquals(i, ByteBufferUtils.toInt(buffer, 10)); + } + + @Test + public void testCopyFromArrayToBuffer() { + byte[] b = new byte[15]; + b[0] = -1; + long l = 988L; + int i = 135; + short s = 7; + Bytes.putLong(b, 1, l); + Bytes.putShort(b, 9, s); + Bytes.putInt(b, 11, i); + ByteBuffer buffer = ByteBuffer.allocate(14); + ByteBufferUtils.copyFromArrayToBuffer(buffer, b, 1, 14); + buffer.rewind(); + assertEquals(l, buffer.getLong()); + assertEquals(s, buffer.getShort()); + assertEquals(i, buffer.getInt()); + } + + private void testCopyFromSrcToDestWithThreads(Object input, Object output, List lengthes, + List offsets) throws InterruptedException { + assertTrue((input instanceof ByteBuffer) || (input instanceof byte[])); + assertTrue((output instanceof ByteBuffer) || (output instanceof byte[])); + assertEquals(lengthes.size(), offsets.size()); + + final int threads = lengthes.size(); + CountDownLatch latch = new CountDownLatch(1); + List exes = new ArrayList<>(threads); + int oldInputPos = (input instanceof ByteBuffer) ? ((ByteBuffer) input).position() : 0; + int oldOutputPos = (output instanceof ByteBuffer) ? ((ByteBuffer) output).position() : 0; + for (int i = 0; i != threads; ++i) { + int offset = offsets.get(i); + int length = lengthes.get(i); + exes.add(() -> { + try { + latch.await(); + if (input instanceof ByteBuffer && output instanceof byte[]) { + ByteBufferUtils.copyFromBufferToArray((byte[]) output, (ByteBuffer) input, offset, + offset, length); + } + if (input instanceof byte[] && output instanceof ByteBuffer) { + ByteBufferUtils.copyFromArrayToBuffer((ByteBuffer) output, offset, (byte[]) input, + offset, length); + } + if (input instanceof ByteBuffer && output instanceof ByteBuffer) { + ByteBufferUtils.copyFromBufferToBuffer((ByteBuffer) input, (ByteBuffer) output, offset, + offset, length); + } + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + }); + } + ExecutorService service = Executors.newFixedThreadPool(threads); + exes.forEach(service::execute); + latch.countDown(); + service.shutdown(); + assertTrue(service.awaitTermination(5, TimeUnit.SECONDS)); + if (input instanceof ByteBuffer) { + assertEquals(oldInputPos, ((ByteBuffer) input).position()); + } + if (output instanceof ByteBuffer) { + assertEquals(oldOutputPos, ((ByteBuffer) output).position()); + } + String inputString = (input instanceof ByteBuffer) + ? Bytes.toString(Bytes.toBytes((ByteBuffer) input)) + : Bytes.toString((byte[]) input); + String outputString = (output instanceof ByteBuffer) + ? Bytes.toString(Bytes.toBytes((ByteBuffer) output)) + : Bytes.toString((byte[]) output); + assertEquals(inputString, outputString); + } + + @Test + public void testCopyFromSrcToDestWithThreads() throws InterruptedException { + List words = + Arrays.asList(Bytes.toBytes("with"), Bytes.toBytes("great"), Bytes.toBytes("power"), + Bytes.toBytes("comes"), Bytes.toBytes("great"), Bytes.toBytes("responsibility")); + List lengthes = words.stream().map(v -> v.length).collect(Collectors.toList()); + List offsets = new ArrayList<>(words.size()); + for (int i = 0; i != words.size(); ++i) { + offsets.add(words.subList(0, i).stream().mapToInt(v -> v.length).sum()); + } + + int totalSize = words.stream().mapToInt(v -> v.length).sum(); + byte[] fullContent = new byte[totalSize]; + int offset = 0; + for (byte[] w : words) { + offset = Bytes.putBytes(fullContent, offset, w, 0, w.length); + } + + // test copyFromBufferToArray + for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + words.forEach(input::put); + byte[] output = new byte[totalSize]; + testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); + } + + // test copyFromArrayToBuffer + for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + byte[] input = fullContent; + testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); + } + + // test copyFromBufferToBuffer + for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + words.forEach(input::put); + for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), + ByteBuffer.allocate(totalSize))) { + testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); + } + } + } + + @Test + public void testCopyFromBufferToArray() { + ByteBuffer buffer = ByteBuffer.allocate(15); + buffer.put((byte) -1); + long l = 988L; + int i = 135; + short s = 7; + buffer.putShort(s); + buffer.putInt(i); + buffer.putLong(l); + byte[] b = new byte[15]; + ByteBufferUtils.copyFromBufferToArray(b, buffer, 1, 1, 14); + assertEquals(s, Bytes.toShort(b, 1)); + assertEquals(i, Bytes.toInt(b, 3)); + assertEquals(l, Bytes.toLong(b, 7)); + } + + @Test + public void testRelativeCopyFromBuffertoBuffer() { + ByteBuffer bb1 = ByteBuffer.allocate(135); + ByteBuffer bb2 = ByteBuffer.allocate(135); + fillBB(bb1, (byte) 5); + ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); + assertTrue(bb1.position() == bb2.position()); + assertTrue(bb1.limit() == bb2.limit()); + bb1 = ByteBuffer.allocateDirect(135); + bb2 = ByteBuffer.allocateDirect(135); + fillBB(bb1, (byte) 5); + ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); + assertTrue(bb1.position() == bb2.position()); + assertTrue(bb1.limit() == bb2.limit()); + } + + @Test + public void testCompareTo() { + ByteBuffer bb1 = ByteBuffer.allocate(135); + ByteBuffer bb2 = ByteBuffer.allocate(135); + byte[] b = new byte[71]; + fillBB(bb1, (byte) 5); + fillBB(bb2, (byte) 5); + fillArray(b, (byte) 5); + assertEquals(0, ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), b, 0, b.length) > 0); + bb2.put(134, (byte) 6); + assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) < 0); + bb2.put(6, (byte) 4); + assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) > 0); + // Assert reverse comparing BB and bytearray works. + ByteBuffer bb3 = ByteBuffer.allocate(135); + fillBB(bb3, (byte) 0); + byte[] b3 = new byte[135]; + fillArray(b3, (byte) 1); + int result = ByteBufferUtils.compareTo(b3, 0, b3.length, bb3, 0, bb3.remaining()); + assertTrue(result > 0); + result = ByteBufferUtils.compareTo(bb3, 0, bb3.remaining(), b3, 0, b3.length); + assertTrue(result < 0); + byte[] b4 = Bytes.toBytes("123"); + ByteBuffer bb4 = ByteBuffer.allocate(10 + b4.length); + for (int i = 10; i < bb4.capacity(); ++i) { + bb4.put(i, b4[i - 10]); + } + result = ByteBufferUtils.compareTo(b4, 0, b4.length, bb4, 10, b4.length); + assertEquals(0, result); + } + + @Test + public void testEquals() { + byte[] a = Bytes.toBytes("http://A"); + ByteBuffer bb = ByteBuffer.wrap(a); + + assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, + HConstants.EMPTY_BYTE_BUFFER, 0, 0)); + + assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, bb, 0, a.length)); + + assertFalse(ByteBufferUtils.equals(bb, 0, 0, HConstants.EMPTY_BYTE_BUFFER, 0, a.length)); + + assertTrue(ByteBufferUtils.equals(bb, 0, a.length, bb, 0, a.length)); + + assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, + HConstants.EMPTY_BYTE_ARRAY, 0, 0)); + + assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, a, 0, a.length)); + + assertFalse(ByteBufferUtils.equals(bb, 0, a.length, HConstants.EMPTY_BYTE_ARRAY, 0, 0)); + + assertTrue(ByteBufferUtils.equals(bb, 0, a.length, a, 0, a.length)); + } + + @Test + public void testFindCommonPrefix() { + ByteBuffer bb1 = ByteBuffer.allocate(135); + ByteBuffer bb2 = ByteBuffer.allocate(135); + ByteBuffer bb3 = ByteBuffer.allocateDirect(135); + byte[] b = new byte[71]; + + fillBB(bb1, (byte) 5); + fillBB(bb2, (byte) 5); + fillBB(bb3, (byte) 5); + fillArray(b, (byte) 5); + + assertEquals(135, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + assertEquals(71, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); + assertEquals(135, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb3, 0, bb3.remaining())); + assertEquals(71, ByteBufferUtils.findCommonPrefix(bb3, 0, bb3.remaining(), b, 0, b.length)); + + b[13] = 9; + assertEquals(13, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); + + bb2.put(134, (byte) 6); + assertEquals(134, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + + bb2.put(6, (byte) 4); + assertEquals(6, + ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); + } + + // Below are utility methods invoked from test methods + private static void testCompressedInt(int value) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ByteBufferUtils.putCompressedInt(bos, value); + ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray()); + int parsedValue = ByteBufferUtils.readCompressedInt(bis); + assertEquals(value, parsedValue); + } + + private static void testPutInt(int value) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + ByteBufferUtils.putInt(baos, value); + } catch (IOException e) { + throw new RuntimeException("Bug in putIn()", e); + } + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + DataInputStream dis = new DataInputStream(bais); + try { + assertEquals(dis.readInt(), value); + } catch (IOException e) { + throw new RuntimeException("Bug in test!", e); + } + } + + private static void fillBB(ByteBuffer bb, byte b) { + for (int i = bb.position(); i < bb.limit(); i++) { + bb.put(i, b); + } + } + + private static void fillArray(byte[] bb, byte b) { + for (int i = 0; i < bb.length; i++) { + bb[i] = b; + } + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/BytesTestBase.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/BytesTestBase.java new file mode 100644 index 000000000000..96df8bc39396 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/BytesTestBase.java @@ -0,0 +1,578 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.io.WritableUtils; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class BytesTestBase { + + private static final Logger LOG = LoggerFactory.getLogger(BytesTestBase.class); + + @Test + public void testShort() throws Exception { + for (short n : Arrays.asList(Short.MIN_VALUE, (short) -100, (short) -1, (short) 0, (short) 1, + (short) 300, Short.MAX_VALUE)) { + byte[] bytes = Bytes.toBytes(n); + assertEquals(Bytes.toShort(bytes, 0, bytes.length), n); + } + } + + @Test + public void testNullHashCode() { + byte[] b = null; + Exception ee = null; + try { + Bytes.hashCode(b); + } catch (Exception e) { + ee = e; + } + assertNotNull(ee); + } + + @Test + public void testAdd() { + byte[] a = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + byte[] b = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + byte[] c = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }; + byte[] result1 = Bytes.add(a, b, c); + byte[] result2 = Bytes.add(new byte[][] { a, b, c }); + assertEquals(0, Bytes.compareTo(result1, result2)); + } + + @Test + public void testSplit() { + byte[] lowest = Bytes.toBytes("AAA"); + byte[] middle = Bytes.toBytes("CCC"); + byte[] highest = Bytes.toBytes("EEE"); + byte[][] parts = Bytes.split(lowest, highest, 1); + for (byte[] bytes : parts) { + LOG.info(Bytes.toString(bytes)); + } + assertEquals(3, parts.length); + assertTrue(Bytes.equals(parts[1], middle)); + // Now divide into three parts. Change highest so split is even. + highest = Bytes.toBytes("DDD"); + parts = Bytes.split(lowest, highest, 2); + for (byte[] part : parts) { + LOG.info(Bytes.toString(part)); + } + assertEquals(4, parts.length); + // Assert that 3rd part is 'CCC'. + assertTrue(Bytes.equals(parts[2], middle)); + } + + @Test + public void testSplit2() { + // More split tests. + byte[] lowest = Bytes.toBytes("http://A"); + byte[] highest = Bytes.toBytes("http://z"); + byte[] middle = Bytes.toBytes("http://]"); + byte[][] parts = Bytes.split(lowest, highest, 1); + for (byte[] part : parts) { + LOG.info(Bytes.toString(part)); + } + assertEquals(3, parts.length); + assertTrue(Bytes.equals(parts[1], middle)); + } + + @Test + public void testSplit3() { + // Test invalid split cases + byte[] low = { 1, 1, 1 }; + byte[] high = { 1, 1, 3 }; + + // If swapped, should throw IAE + try { + Bytes.split(high, low, 1); + fail("Should not be able to split if low > high"); + } catch (IllegalArgumentException iae) { + // Correct + } + + // Single split should work + byte[][] parts = Bytes.split(low, high, 1); + for (int i = 0; i < parts.length; i++) { + LOG.info("" + i + " -> " + Bytes.toStringBinary(parts[i])); + } + assertEquals(3, parts.length, "Returned split should have 3 parts but has " + parts.length); + + // If split more than once, use additional byte to split + parts = Bytes.split(low, high, 2); + assertNotNull(parts, "Split with an additional byte"); + assertEquals(parts.length, low.length + 1); + + // Split 0 times should throw IAE + try { + Bytes.split(low, high, 0); + fail("Should not be able to split 0 times"); + } catch (IllegalArgumentException iae) { + // Correct + } + } + + @Test + public void testToInt() { + int[] ints = { -1, 123, Integer.MIN_VALUE, Integer.MAX_VALUE }; + for (int anInt : ints) { + byte[] b = Bytes.toBytes(anInt); + assertEquals(anInt, Bytes.toInt(b)); + byte[] b2 = bytesWithOffset(b); + assertEquals(anInt, Bytes.toInt(b2, 1)); + assertEquals(anInt, Bytes.toInt(b2, 1, Bytes.SIZEOF_INT)); + } + } + + @Test + public void testToLong() { + long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; + for (long aLong : longs) { + byte[] b = Bytes.toBytes(aLong); + assertEquals(aLong, Bytes.toLong(b)); + byte[] b2 = bytesWithOffset(b); + assertEquals(aLong, Bytes.toLong(b2, 1)); + assertEquals(aLong, Bytes.toLong(b2, 1, Bytes.SIZEOF_LONG)); + } + } + + @Test + public void testToFloat() { + float[] floats = { -1f, 123.123f, Float.MAX_VALUE }; + for (float aFloat : floats) { + byte[] b = Bytes.toBytes(aFloat); + assertEquals(aFloat, Bytes.toFloat(b), 0.0f); + byte[] b2 = bytesWithOffset(b); + assertEquals(aFloat, Bytes.toFloat(b2, 1), 0.0f); + } + } + + @Test + public void testToDouble() { + double[] doubles = { Double.MIN_VALUE, Double.MAX_VALUE }; + for (double aDouble : doubles) { + byte[] b = Bytes.toBytes(aDouble); + assertEquals(aDouble, Bytes.toDouble(b), 0.0); + byte[] b2 = bytesWithOffset(b); + assertEquals(aDouble, Bytes.toDouble(b2, 1), 0.0); + } + } + + @Test + public void testToBigDecimal() { + BigDecimal[] decimals = + { new BigDecimal("-1"), new BigDecimal("123.123"), new BigDecimal("123123123123") }; + for (BigDecimal decimal : decimals) { + byte[] b = Bytes.toBytes(decimal); + assertEquals(decimal, Bytes.toBigDecimal(b)); + byte[] b2 = bytesWithOffset(b); + assertEquals(decimal, Bytes.toBigDecimal(b2, 1, b.length)); + } + } + + private byte[] bytesWithOffset(byte[] src) { + // add one byte in front to test offset + byte[] result = new byte[src.length + 1]; + result[0] = (byte) 0xAA; + System.arraycopy(src, 0, result, 1, src.length); + return result; + } + + @Test + public void testToBytesForByteBuffer() { + byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + ByteBuffer target = ByteBuffer.wrap(array); + target.position(2); + target.limit(7); + + byte[] actual = Bytes.toBytes(target); + byte[] expected = { 0, 1, 2, 3, 4, 5, 6 }; + assertArrayEquals(expected, actual); + assertEquals(2, target.position()); + assertEquals(7, target.limit()); + + ByteBuffer target2 = target.slice(); + assertEquals(0, target2.position()); + assertEquals(5, target2.limit()); + + byte[] actual2 = Bytes.toBytes(target2); + byte[] expected2 = { 2, 3, 4, 5, 6 }; + assertArrayEquals(expected2, actual2); + assertEquals(0, target2.position()); + assertEquals(5, target2.limit()); + } + + @Test + public void testGetBytesForByteBuffer() { + byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + ByteBuffer target = ByteBuffer.wrap(array); + target.position(2); + target.limit(7); + + byte[] actual = Bytes.getBytes(target); + byte[] expected = { 2, 3, 4, 5, 6 }; + assertArrayEquals(expected, actual); + assertEquals(2, target.position()); + assertEquals(7, target.limit()); + } + + @Test + public void testReadAsVLong() throws Exception { + long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; + for (long aLong : longs) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream output = new DataOutputStream(baos); + WritableUtils.writeVLong(output, aLong); + byte[] long_bytes_no_offset = baos.toByteArray(); + assertEquals(aLong, Bytes.readAsVLong(long_bytes_no_offset, 0)); + byte[] long_bytes_with_offset = bytesWithOffset(long_bytes_no_offset); + assertEquals(aLong, Bytes.readAsVLong(long_bytes_with_offset, 1)); + } + } + + @Test + public void testToStringBinaryForBytes() { + byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; + String actual = Bytes.toStringBinary(array); + String expected = "09azAZ@\\x01"; + assertEquals(expected, actual); + + String actual2 = Bytes.toStringBinary(array, 2, 3); + String expected2 = "azA"; + assertEquals(expected2, actual2); + } + + @Test + public void testToStringBinaryForArrayBasedByteBuffer() { + byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; + ByteBuffer target = ByteBuffer.wrap(array); + String actual = Bytes.toStringBinary(target); + String expected = "09azAZ@\\x01"; + assertEquals(expected, actual); + } + + @Test + public void testToStringBinaryForReadOnlyByteBuffer() { + byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; + ByteBuffer target = ByteBuffer.wrap(array).asReadOnlyBuffer(); + String actual = Bytes.toStringBinary(target); + String expected = "09azAZ@\\x01"; + assertEquals(expected, actual); + } + + @Test + public void testBinarySearch() { + byte[][] arr = { { 1 }, { 3 }, { 5 }, { 7 }, { 9 }, { 11 }, { 13 }, { 15 }, }; + byte[] key1 = { 3, 1 }; + byte[] key2 = { 4, 9 }; + byte[] key2_2 = { 4 }; + byte[] key3 = { 5, 11 }; + byte[] key4 = { 0 }; + byte[] key5 = { 2 }; + + assertEquals(1, Bytes.binarySearch(arr, key1, 0, 1)); + assertEquals(0, Bytes.binarySearch(arr, key1, 1, 1)); + assertEquals(-(2 + 1), Arrays.binarySearch(arr, key2_2, Bytes.BYTES_COMPARATOR)); + assertEquals(-(2 + 1), Bytes.binarySearch(arr, key2, 0, 1)); + assertEquals(4, Bytes.binarySearch(arr, key2, 1, 1)); + assertEquals(2, Bytes.binarySearch(arr, key3, 0, 1)); + assertEquals(5, Bytes.binarySearch(arr, key3, 1, 1)); + assertEquals(-1, Bytes.binarySearch(arr, key4, 0, 1)); + assertEquals(-2, Bytes.binarySearch(arr, key5, 0, 1)); + + // Search for values to the left and to the right of each item in the array. + for (int i = 0; i < arr.length; ++i) { + assertEquals(-(i + 1), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] - 1) }, 0, 1)); + assertEquals(-(i + 2), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] + 1) }, 0, 1)); + } + } + + @Test + public void testToStringBytesBinaryReversible() { + byte[] randomBytes = new byte[1000]; + for (int i = 0; i < 1000; i++) { + Bytes.random(randomBytes); + verifyReversibleForBytes(randomBytes); + } + // some specific cases + verifyReversibleForBytes(new byte[] {}); + verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D' }); + verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D', '\\' }); + } + + private void verifyReversibleForBytes(byte[] originalBytes) { + String convertedString = Bytes.toStringBinary(originalBytes); + byte[] convertedBytes = Bytes.toBytesBinary(convertedString); + if (Bytes.compareTo(originalBytes, convertedBytes) != 0) { + fail("Not reversible for\nbyte[]: " + Arrays.toString(originalBytes) + ",\nStringBinary: " + + convertedString); + } + } + + @Test + public void testStartsWith() { + assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("h"))); + assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes(""))); + assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("hello"))); + assertFalse(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("helloworld"))); + assertFalse(Bytes.startsWith(Bytes.toBytes(""), Bytes.toBytes("hello"))); + } + + @Test + public void testIncrementBytes() { + assertTrue(checkTestIncrementBytes(10, 1)); + assertTrue(checkTestIncrementBytes(12, 123435445)); + assertTrue(checkTestIncrementBytes(124634654, 1)); + assertTrue(checkTestIncrementBytes(10005460, 5005645)); + assertTrue(checkTestIncrementBytes(1, -1)); + assertTrue(checkTestIncrementBytes(10, -1)); + assertTrue(checkTestIncrementBytes(10, -5)); + assertTrue(checkTestIncrementBytes(1005435000, -5)); + assertTrue(checkTestIncrementBytes(10, -43657655)); + assertTrue(checkTestIncrementBytes(-1, 1)); + assertTrue(checkTestIncrementBytes(-26, 5034520)); + assertTrue(checkTestIncrementBytes(-10657200, 5)); + assertTrue(checkTestIncrementBytes(-12343250, 45376475)); + assertTrue(checkTestIncrementBytes(-10, -5)); + assertTrue(checkTestIncrementBytes(-12343250, -5)); + assertTrue(checkTestIncrementBytes(-12, -34565445)); + assertTrue(checkTestIncrementBytes(-1546543452, -34565445)); + } + + private static boolean checkTestIncrementBytes(long val, long amount) { + byte[] value = Bytes.toBytes(val); + byte[] testValue = { -1, -1, -1, -1, -1, -1, -1, -1 }; + if (value[0] > 0) { + testValue = new byte[Bytes.SIZEOF_LONG]; + } + System.arraycopy(value, 0, testValue, testValue.length - value.length, value.length); + + long incrementResult = Bytes.toLong(Bytes.incrementBytes(value, amount)); + + return (Bytes.toLong(testValue) + amount) == incrementResult; + } + + @Test + public void testFixedSizeString() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dos = new DataOutputStream(baos); + Bytes.writeStringFixedSize(dos, "Hello", 5); + Bytes.writeStringFixedSize(dos, "World", 18); + Bytes.writeStringFixedSize(dos, "", 9); + + try { + // Use a long dash which is three bytes in UTF-8. If encoding happens + // using ISO-8859-1, this will fail. + Bytes.writeStringFixedSize(dos, "Too\u2013Long", 9); + fail("Exception expected"); + } catch (IOException ex) { + assertEquals( + "Trying to write 10 bytes (Too\\xE2\\x80\\x93Long) into a field of " + "length 9", + ex.getMessage()); + } + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + DataInputStream dis = new DataInputStream(bais); + assertEquals("Hello", Bytes.readStringFixedSize(dis, 5)); + assertEquals("World", Bytes.readStringFixedSize(dis, 18)); + assertEquals("", Bytes.readStringFixedSize(dis, 9)); + } + + @Test + public void testCopy() { + byte[] bytes = Bytes.toBytes("ABCDEFGHIJKLMNOPQRSTUVWXYZ"); + byte[] copy = Bytes.copy(bytes); + assertNotSame(bytes, copy); + assertTrue(Bytes.equals(bytes, copy)); + } + + @Test + public void testToBytesBinaryTrailingBackslashes() { + try { + Bytes.toBytesBinary("abc\\x00\\x01\\"); + } catch (StringIndexOutOfBoundsException ex) { + fail("Illegal string access: " + ex.getMessage()); + } + } + + @Test + public void testToStringBinary_toBytesBinary_Reversable() { + String bytes = Bytes.toStringBinary(Bytes.toBytes(2.17)); + assertEquals(2.17, Bytes.toDouble(Bytes.toBytesBinary(bytes)), 0); + } + + @Test + public void testUnsignedBinarySearch() { + byte[] bytes = new byte[] { 0, 5, 123, 127, -128, -100, -1 }; + assertEquals(1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 5)); + assertEquals(3, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 127)); + assertEquals(4, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -128)); + assertEquals(5, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -100)); + assertEquals(6, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -1)); + assertEquals(-1 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 2)); + assertEquals(-6 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -5)); + } + + @Test + public void testUnsignedIncrement() { + byte[] a = Bytes.toBytes(0); + int a2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(a), 0); + assertEquals(1, a2); + + byte[] b = Bytes.toBytes(-1); + byte[] actual = Bytes.unsignedCopyAndIncrement(b); + assertNotSame(b, actual); + byte[] expected = new byte[] { 1, 0, 0, 0, 0 }; + assertArrayEquals(expected, actual); + + byte[] c = Bytes.toBytes(255);// should wrap to the next significant byte + int c2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(c), 0); + assertEquals(256, c2); + } + + @Test + public void testIndexOf() { + byte[] array = Bytes.toBytes("hello"); + assertEquals(1, Bytes.indexOf(array, (byte) 'e')); + assertEquals(4, Bytes.indexOf(array, (byte) 'o')); + assertEquals(-1, Bytes.indexOf(array, (byte) 'a')); + assertEquals(0, Bytes.indexOf(array, Bytes.toBytes("hel"))); + assertEquals(2, Bytes.indexOf(array, Bytes.toBytes("ll"))); + assertEquals(-1, Bytes.indexOf(array, Bytes.toBytes("hll"))); + } + + @Test + public void testContains() { + byte[] array = Bytes.toBytes("hello world"); + assertTrue(Bytes.contains(array, (byte) 'e')); + assertTrue(Bytes.contains(array, (byte) 'd')); + assertFalse(Bytes.contains(array, (byte) 'a')); + assertTrue(Bytes.contains(array, Bytes.toBytes("world"))); + assertTrue(Bytes.contains(array, Bytes.toBytes("ello"))); + assertFalse(Bytes.contains(array, Bytes.toBytes("owo"))); + } + + @Test + public void testZero() { + byte[] array = Bytes.toBytes("hello"); + Bytes.zero(array); + for (byte b : array) { + assertEquals(0, b); + } + array = Bytes.toBytes("hello world"); + Bytes.zero(array, 2, 7); + assertFalse(array[0] == 0); + assertFalse(array[1] == 0); + for (int i = 2; i < 9; i++) { + assertEquals(0, array[i]); + } + for (int i = 9; i < array.length; i++) { + assertFalse(array[i] == 0); + } + } + + @Test + public void testPutBuffer() { + byte[] b = new byte[100]; + for (byte i = 0; i < 100; i++) { + Bytes.putByteBuffer(b, i, ByteBuffer.wrap(new byte[] { i })); + } + for (byte i = 0; i < 100; i++) { + assertEquals(i, b[i]); + } + } + + @Test + public void testToFromHex() { + List testStrings = new ArrayList<>(8); + testStrings.addAll(Arrays.asList("", "00", "A0", "ff", "FFffFFFFFFFFFF", "12", + "0123456789abcdef", "283462839463924623984692834692346ABCDFEDDCA0")); + for (String testString : testStrings) { + byte[] byteData = Bytes.fromHex(testString); + assertEquals(testString.length() / 2, byteData.length); + String result = Bytes.toHex(byteData); + assertTrue(testString.equalsIgnoreCase(result)); + } + + List testByteData = new ArrayList<>(5); + testByteData.addAll(Arrays.asList(new byte[0], new byte[1], new byte[10], + new byte[] { 1, 2, 3, 4, 5 }, new byte[] { (byte) 0xFF })); + Random rand = ThreadLocalRandom.current(); + for (int i = 0; i < 20; i++) { + byte[] bytes = new byte[rand.nextInt(100)]; + Bytes.random(bytes); + testByteData.add(bytes); + } + + for (byte[] testData : testByteData) { + String hexString = Bytes.toHex(testData); + assertEquals(testData.length * 2, hexString.length()); + byte[] result = Bytes.fromHex(hexString); + assertArrayEquals(testData, result); + } + } + + @Test + public void testFindCommonPrefix() throws Exception { + // tests for common prefixes less than 8 bytes in length (i.e. using non-vectorized path) + byte[] hello = Bytes.toBytes("hello"); + byte[] helloWorld = Bytes.toBytes("helloworld"); + + assertEquals(5, + Bytes.findCommonPrefix(hello, helloWorld, hello.length, helloWorld.length, 0, 0)); + assertEquals(5, Bytes.findCommonPrefix(hello, hello, hello.length, hello.length, 0, 0)); + assertEquals(3, Bytes.findCommonPrefix(hello, hello, hello.length - 2, hello.length - 2, 2, 2)); + assertEquals(0, Bytes.findCommonPrefix(hello, hello, 0, 0, 0, 0)); + + // tests for common prefixes greater than 8 bytes in length which may use the vectorized path + byte[] hellohello = Bytes.toBytes("hellohello"); + byte[] hellohellohi = Bytes.toBytes("hellohellohi"); + + assertEquals(10, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, + hellohellohi.length, 0, 0)); + assertEquals(10, Bytes.findCommonPrefix(hellohellohi, hellohello, hellohellohi.length, + hellohello.length, 0, 0)); + assertEquals(10, + Bytes.findCommonPrefix(hellohello, hellohello, hellohello.length, hellohello.length, 0, 0)); + + hellohello[2] = 0; + assertEquals(2, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, + hellohellohi.length, 0, 0)); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java index e07e75bffdb2..b451d71ed879 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java @@ -17,654 +17,12 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseCommonTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.nio.ByteBuff; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; -import org.apache.hadoop.io.WritableUtils; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@Category({ MiscTests.class, MediumTests.class }) -@RunWith(Parameterized.class) -public class TestByteBufferUtils { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestByteBufferUtils.class); - - private static final String UNSAFE_AVAIL_NAME = "UNSAFE_AVAIL"; - private static final String UNSAFE_UNALIGNED_NAME = "UNSAFE_UNALIGNED"; - private byte[] array; - - @AfterClass - public static void afterClass() throws Exception { - detectAvailabilityOfUnsafe(); - } - - @Parameterized.Parameters - public static Collection parameters() { - return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; - } - - private static void setUnsafe(String fieldName, boolean value) throws Exception { - Field field = ByteBufferUtils.class.getDeclaredField(fieldName); - field.setAccessible(true); - Field modifiersField = ReflectionUtils.getModifiersField(); - modifiersField.setAccessible(true); - int oldModifiers = field.getModifiers(); - modifiersField.setInt(field, oldModifiers & ~Modifier.FINAL); - try { - field.set(null, value); - } finally { - modifiersField.setInt(field, oldModifiers); - } - } - - static void disableUnsafe() throws Exception { - if (ByteBufferUtils.UNSAFE_AVAIL) { - setUnsafe(UNSAFE_AVAIL_NAME, false); - } - if (ByteBufferUtils.UNSAFE_UNALIGNED) { - setUnsafe(UNSAFE_UNALIGNED_NAME, false); - } - assertFalse(ByteBufferUtils.UNSAFE_AVAIL); - assertFalse(ByteBufferUtils.UNSAFE_UNALIGNED); - } - - static void detectAvailabilityOfUnsafe() throws Exception { - if (ByteBufferUtils.UNSAFE_AVAIL != HBasePlatformDependent.isUnsafeAvailable()) { - setUnsafe(UNSAFE_AVAIL_NAME, HBasePlatformDependent.isUnsafeAvailable()); - } - if (ByteBufferUtils.UNSAFE_UNALIGNED != HBasePlatformDependent.unaligned()) { - setUnsafe(UNSAFE_UNALIGNED_NAME, HBasePlatformDependent.unaligned()); - } - assertEquals(ByteBufferUtils.UNSAFE_AVAIL, HBasePlatformDependent.isUnsafeAvailable()); - assertEquals(ByteBufferUtils.UNSAFE_UNALIGNED, HBasePlatformDependent.unaligned()); - } - - public TestByteBufferUtils(boolean useUnsafeIfPossible) throws Exception { - if (useUnsafeIfPossible) { - detectAvailabilityOfUnsafe(); - } else { - disableUnsafe(); - } - } - - /** - * Create an array with sample data. - */ - @Before - public void setUp() { - array = new byte[8]; - for (int i = 0; i < array.length; ++i) { - array[i] = (byte) ('a' + i); - } - } - - private static final int MAX_VLONG_LENGTH = 9; - private static final Collection testNumbers; - - private static void addNumber(Set a, long l) { - if (l != Long.MIN_VALUE) { - a.add(l - 1); - } - a.add(l); - if (l != Long.MAX_VALUE) { - a.add(l + 1); - } - for (long divisor = 3; divisor <= 10; ++divisor) { - for (long delta = -1; delta <= 1; ++delta) { - a.add(l / divisor + delta); - } - } - } - - static { - SortedSet a = new TreeSet<>(); - for (int i = 0; i <= 63; ++i) { - long v = -1L << i; - assertTrue(v < 0); - addNumber(a, v); - v = (1L << i) - 1; - assertTrue(v >= 0); - addNumber(a, v); - } - - testNumbers = Collections.unmodifiableSet(a); - System.err.println("Testing variable-length long serialization using: " + testNumbers - + " (count: " + testNumbers.size() + ")"); - assertEquals(1753, testNumbers.size()); - assertEquals(Long.MIN_VALUE, a.first().longValue()); - assertEquals(Long.MAX_VALUE, a.last().longValue()); - } - - @Test - public void testReadWriteVLong() { - for (long l : testNumbers) { - ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); - ByteBufferUtils.writeVLong(b, l); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(b)); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); - } - } - - @Test - public void testReadWriteConsecutiveVLong() { - for (long l : testNumbers) { - ByteBuffer b = ByteBuffer.allocate(2 * MAX_VLONG_LENGTH); - ByteBufferUtils.writeVLong(b, l); - ByteBufferUtils.writeVLong(b, l - 4); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(b)); - assertEquals(l - 4, ByteBufferUtils.readVLong(b)); - b.flip(); - assertEquals(l, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); - assertEquals(l - 4, ByteBufferUtils.readVLong(ByteBuff.wrap(b))); - } - } - - @Test - public void testConsistencyWithHadoopVLong() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - for (long l : testNumbers) { - baos.reset(); - ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH); - ByteBufferUtils.writeVLong(b, l); - String bufStr = Bytes.toStringBinary(b.array(), b.arrayOffset(), b.position()); - WritableUtils.writeVLong(dos, l); - String baosStr = Bytes.toStringBinary(baos.toByteArray()); - assertEquals(baosStr, bufStr); - } - } - - /** - * Test copying to stream from buffer. - */ - @Test - public void testMoveBufferToStream() throws IOException { - final int arrayOffset = 7; - final int initialPosition = 10; - final int endPadding = 5; - byte[] arrayWrapper = new byte[arrayOffset + initialPosition + array.length + endPadding]; - System.arraycopy(array, 0, arrayWrapper, arrayOffset + initialPosition, array.length); - ByteBuffer buffer = - ByteBuffer.wrap(arrayWrapper, arrayOffset, initialPosition + array.length).slice(); - assertEquals(initialPosition + array.length, buffer.limit()); - assertEquals(0, buffer.position()); - buffer.position(initialPosition); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ByteBufferUtils.moveBufferToStream(bos, buffer, array.length); - assertArrayEquals(array, bos.toByteArray()); - assertEquals(initialPosition + array.length, buffer.position()); - } - - /** - * Test copying to stream from buffer with offset. - * @throws IOException On test failure. - */ - @Test - public void testCopyToStreamWithOffset() throws IOException { - ByteBuffer buffer = ByteBuffer.wrap(array); - - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - - ByteBufferUtils.copyBufferToStream(bos, buffer, array.length / 2, array.length / 2); - - byte[] returnedArray = bos.toByteArray(); - for (int i = 0; i < array.length / 2; ++i) { - int pos = array.length / 2 + i; - assertEquals(returnedArray[i], array[pos]); - } - } - - /** - * Test copying data from stream. - * @throws IOException On test failure. - */ - @Test - public void testCopyFromStream() throws IOException { - ByteBuffer buffer = ByteBuffer.allocate(array.length); - ByteArrayInputStream bis = new ByteArrayInputStream(array); - DataInputStream dis = new DataInputStream(bis); - - ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length / 2); - ByteBufferUtils.copyFromStreamToBuffer(buffer, dis, array.length - array.length / 2); - for (int i = 0; i < array.length; ++i) { - assertEquals(array[i], buffer.get(i)); - } - } - - /** - * Test copying from buffer. - */ - @Test - public void testCopyFromBuffer() { - ByteBuffer srcBuffer = ByteBuffer.allocate(array.length); - ByteBuffer dstBuffer = ByteBuffer.allocate(array.length); - srcBuffer.put(array); - - ByteBufferUtils.copyFromBufferToBuffer(srcBuffer, dstBuffer, array.length / 2, - array.length / 4); - for (int i = 0; i < array.length / 4; ++i) { - assertEquals(srcBuffer.get(i + array.length / 2), dstBuffer.get(i)); - } - } - - /** - * Test 7-bit encoding of integers. - * @throws IOException On test failure. - */ - @Test - public void testCompressedInt() throws IOException { - testCompressedInt(0); - testCompressedInt(Integer.MAX_VALUE); - testCompressedInt(Integer.MIN_VALUE); - - for (int i = 0; i < 3; i++) { - testCompressedInt((128 << i) - 1); - } - - for (int i = 0; i < 3; i++) { - testCompressedInt((128 << i)); - } - } - - /** - * Test how much bytes we need to store integer. - */ - @Test - public void testIntFitsIn() { - assertEquals(1, ByteBufferUtils.intFitsIn(0)); - assertEquals(1, ByteBufferUtils.intFitsIn(1)); - assertEquals(2, ByteBufferUtils.intFitsIn(1 << 8)); - assertEquals(3, ByteBufferUtils.intFitsIn(1 << 16)); - assertEquals(4, ByteBufferUtils.intFitsIn(-1)); - assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MAX_VALUE)); - assertEquals(4, ByteBufferUtils.intFitsIn(Integer.MIN_VALUE)); - } - - /** - * Test how much bytes we need to store long. - */ - @Test - public void testLongFitsIn() { - assertEquals(1, ByteBufferUtils.longFitsIn(0)); - assertEquals(1, ByteBufferUtils.longFitsIn(1)); - assertEquals(3, ByteBufferUtils.longFitsIn(1L << 16)); - assertEquals(5, ByteBufferUtils.longFitsIn(1L << 32)); - assertEquals(8, ByteBufferUtils.longFitsIn(-1)); - assertEquals(8, ByteBufferUtils.longFitsIn(Long.MIN_VALUE)); - assertEquals(8, ByteBufferUtils.longFitsIn(Long.MAX_VALUE)); - } - - /** - * Test if we are comparing equal bytes. - */ - @Test - public void testArePartEqual() { - byte[] array = new byte[] { 1, 2, 3, 4, 5, 1, 2, 3, 4 }; - ByteBuffer buffer = ByteBuffer.wrap(array); - assertTrue(ByteBufferUtils.arePartsEqual(buffer, 0, 4, 5, 4)); - assertTrue(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 2)); - assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 2, 6, 3)); - assertFalse(ByteBufferUtils.arePartsEqual(buffer, 1, 3, 6, 2)); - assertFalse(ByteBufferUtils.arePartsEqual(buffer, 0, 3, 6, 3)); - } - - /** - * Test serializing int to bytes - */ - @Test - public void testPutInt() { - testPutInt(0); - testPutInt(Integer.MAX_VALUE); - - for (int i = 0; i < 3; i++) { - testPutInt((128 << i) - 1); - } - - for (int i = 0; i < 3; i++) { - testPutInt((128 << i)); - } - } - - // Utility methods invoked from test methods - - private void testCompressedInt(int value) throws IOException { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ByteBufferUtils.putCompressedInt(bos, value); - ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray()); - int parsedValue = ByteBufferUtils.readCompressedInt(bis); - assertEquals(value, parsedValue); - } - - private void testPutInt(int value) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try { - ByteBufferUtils.putInt(baos, value); - } catch (IOException e) { - throw new RuntimeException("Bug in putIn()", e); - } - - ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); - DataInputStream dis = new DataInputStream(bais); - try { - assertEquals(dis.readInt(), value); - } catch (IOException e) { - throw new RuntimeException("Bug in test!", e); - } - } - - @Test - public void testToBytes() { - ByteBuffer buffer = ByteBuffer.allocate(5); - buffer.put(new byte[] { 0, 1, 2, 3, 4 }); - assertEquals(5, buffer.position()); - assertEquals(5, buffer.limit()); - byte[] copy = ByteBufferUtils.toBytes(buffer, 2); - assertArrayEquals(new byte[] { 2, 3, 4 }, copy); - assertEquals(5, buffer.position()); - assertEquals(5, buffer.limit()); - } - - @Test - public void testToPrimitiveTypes() { - ByteBuffer buffer = ByteBuffer.allocate(15); - long l = 988L; - int i = 135; - short s = 7; - buffer.putLong(l); - buffer.putShort(s); - buffer.putInt(i); - assertEquals(l, ByteBufferUtils.toLong(buffer, 0)); - assertEquals(s, ByteBufferUtils.toShort(buffer, 8)); - assertEquals(i, ByteBufferUtils.toInt(buffer, 10)); - } - - @Test - public void testCopyFromArrayToBuffer() { - byte[] b = new byte[15]; - b[0] = -1; - long l = 988L; - int i = 135; - short s = 7; - Bytes.putLong(b, 1, l); - Bytes.putShort(b, 9, s); - Bytes.putInt(b, 11, i); - ByteBuffer buffer = ByteBuffer.allocate(14); - ByteBufferUtils.copyFromArrayToBuffer(buffer, b, 1, 14); - buffer.rewind(); - assertEquals(l, buffer.getLong()); - assertEquals(s, buffer.getShort()); - assertEquals(i, buffer.getInt()); - } - - private void testCopyFromSrcToDestWithThreads(Object input, Object output, List lengthes, - List offsets) throws InterruptedException { - assertTrue((input instanceof ByteBuffer) || (input instanceof byte[])); - assertTrue((output instanceof ByteBuffer) || (output instanceof byte[])); - assertEquals(lengthes.size(), offsets.size()); - - final int threads = lengthes.size(); - CountDownLatch latch = new CountDownLatch(1); - List exes = new ArrayList<>(threads); - int oldInputPos = (input instanceof ByteBuffer) ? ((ByteBuffer) input).position() : 0; - int oldOutputPos = (output instanceof ByteBuffer) ? ((ByteBuffer) output).position() : 0; - for (int i = 0; i != threads; ++i) { - int offset = offsets.get(i); - int length = lengthes.get(i); - exes.add(() -> { - try { - latch.await(); - if (input instanceof ByteBuffer && output instanceof byte[]) { - ByteBufferUtils.copyFromBufferToArray((byte[]) output, (ByteBuffer) input, offset, - offset, length); - } - if (input instanceof byte[] && output instanceof ByteBuffer) { - ByteBufferUtils.copyFromArrayToBuffer((ByteBuffer) output, offset, (byte[]) input, - offset, length); - } - if (input instanceof ByteBuffer && output instanceof ByteBuffer) { - ByteBufferUtils.copyFromBufferToBuffer((ByteBuffer) input, (ByteBuffer) output, offset, - offset, length); - } - } catch (InterruptedException ex) { - throw new RuntimeException(ex); - } - }); - } - ExecutorService service = Executors.newFixedThreadPool(threads); - exes.forEach(service::execute); - latch.countDown(); - service.shutdown(); - assertTrue(service.awaitTermination(5, TimeUnit.SECONDS)); - if (input instanceof ByteBuffer) { - assertEquals(oldInputPos, ((ByteBuffer) input).position()); - } - if (output instanceof ByteBuffer) { - assertEquals(oldOutputPos, ((ByteBuffer) output).position()); - } - String inputString = (input instanceof ByteBuffer) - ? Bytes.toString(Bytes.toBytes((ByteBuffer) input)) - : Bytes.toString((byte[]) input); - String outputString = (output instanceof ByteBuffer) - ? Bytes.toString(Bytes.toBytes((ByteBuffer) output)) - : Bytes.toString((byte[]) output); - assertEquals(inputString, outputString); - } - - @Test - public void testCopyFromSrcToDestWithThreads() throws InterruptedException { - List words = - Arrays.asList(Bytes.toBytes("with"), Bytes.toBytes("great"), Bytes.toBytes("power"), - Bytes.toBytes("comes"), Bytes.toBytes("great"), Bytes.toBytes("responsibility")); - List lengthes = words.stream().map(v -> v.length).collect(Collectors.toList()); - List offsets = new ArrayList<>(words.size()); - for (int i = 0; i != words.size(); ++i) { - offsets.add(words.subList(0, i).stream().mapToInt(v -> v.length).sum()); - } - - int totalSize = words.stream().mapToInt(v -> v.length).sum(); - byte[] fullContent = new byte[totalSize]; - int offset = 0; - for (byte[] w : words) { - offset = Bytes.putBytes(fullContent, offset, w, 0, w.length); - } - - // test copyFromBufferToArray - for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - words.forEach(input::put); - byte[] output = new byte[totalSize]; - testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); - } - - // test copyFromArrayToBuffer - for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - byte[] input = fullContent; - testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); - } - - // test copyFromBufferToBuffer - for (ByteBuffer input : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - words.forEach(input::put); - for (ByteBuffer output : Arrays.asList(ByteBuffer.allocateDirect(totalSize), - ByteBuffer.allocate(totalSize))) { - testCopyFromSrcToDestWithThreads(input, output, lengthes, offsets); - } - } - } - - @Test - public void testCopyFromBufferToArray() { - ByteBuffer buffer = ByteBuffer.allocate(15); - buffer.put((byte) -1); - long l = 988L; - int i = 135; - short s = 7; - buffer.putShort(s); - buffer.putInt(i); - buffer.putLong(l); - byte[] b = new byte[15]; - ByteBufferUtils.copyFromBufferToArray(b, buffer, 1, 1, 14); - assertEquals(s, Bytes.toShort(b, 1)); - assertEquals(i, Bytes.toInt(b, 3)); - assertEquals(l, Bytes.toLong(b, 7)); - } - - @Test - public void testRelativeCopyFromBuffertoBuffer() { - ByteBuffer bb1 = ByteBuffer.allocate(135); - ByteBuffer bb2 = ByteBuffer.allocate(135); - fillBB(bb1, (byte) 5); - ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); - assertTrue(bb1.position() == bb2.position()); - assertTrue(bb1.limit() == bb2.limit()); - bb1 = ByteBuffer.allocateDirect(135); - bb2 = ByteBuffer.allocateDirect(135); - fillBB(bb1, (byte) 5); - ByteBufferUtils.copyFromBufferToBuffer(bb1, bb2); - assertTrue(bb1.position() == bb2.position()); - assertTrue(bb1.limit() == bb2.limit()); - } - - @Test - public void testCompareTo() { - ByteBuffer bb1 = ByteBuffer.allocate(135); - ByteBuffer bb2 = ByteBuffer.allocate(135); - byte[] b = new byte[71]; - fillBB(bb1, (byte) 5); - fillBB(bb2, (byte) 5); - fillArray(b, (byte) 5); - assertEquals(0, ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), b, 0, b.length) > 0); - bb2.put(134, (byte) 6); - assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) < 0); - bb2.put(6, (byte) 4); - assertTrue(ByteBufferUtils.compareTo(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining()) > 0); - // Assert reverse comparing BB and bytearray works. - ByteBuffer bb3 = ByteBuffer.allocate(135); - fillBB(bb3, (byte) 0); - byte[] b3 = new byte[135]; - fillArray(b3, (byte) 1); - int result = ByteBufferUtils.compareTo(b3, 0, b3.length, bb3, 0, bb3.remaining()); - assertTrue(result > 0); - result = ByteBufferUtils.compareTo(bb3, 0, bb3.remaining(), b3, 0, b3.length); - assertTrue(result < 0); - byte[] b4 = Bytes.toBytes("123"); - ByteBuffer bb4 = ByteBuffer.allocate(10 + b4.length); - for (int i = 10; i < bb4.capacity(); ++i) { - bb4.put(i, b4[i - 10]); - } - result = ByteBufferUtils.compareTo(b4, 0, b4.length, bb4, 10, b4.length); - assertEquals(0, result); - } - - @Test - public void testEquals() { - byte[] a = Bytes.toBytes("http://A"); - ByteBuffer bb = ByteBuffer.wrap(a); - - assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, - HConstants.EMPTY_BYTE_BUFFER, 0, 0)); - - assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, bb, 0, a.length)); - - assertFalse(ByteBufferUtils.equals(bb, 0, 0, HConstants.EMPTY_BYTE_BUFFER, 0, a.length)); - - assertTrue(ByteBufferUtils.equals(bb, 0, a.length, bb, 0, a.length)); - - assertTrue(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, - HConstants.EMPTY_BYTE_ARRAY, 0, 0)); - - assertFalse(ByteBufferUtils.equals(HConstants.EMPTY_BYTE_BUFFER, 0, 0, a, 0, a.length)); - - assertFalse(ByteBufferUtils.equals(bb, 0, a.length, HConstants.EMPTY_BYTE_ARRAY, 0, 0)); - - assertTrue(ByteBufferUtils.equals(bb, 0, a.length, a, 0, a.length)); - } - - @Test - public void testFindCommonPrefix() { - ByteBuffer bb1 = ByteBuffer.allocate(135); - ByteBuffer bb2 = ByteBuffer.allocate(135); - ByteBuffer bb3 = ByteBuffer.allocateDirect(135); - byte[] b = new byte[71]; - - fillBB(bb1, (byte) 5); - fillBB(bb2, (byte) 5); - fillBB(bb3, (byte) 5); - fillArray(b, (byte) 5); - - assertEquals(135, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - assertEquals(71, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); - assertEquals(135, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb3, 0, bb3.remaining())); - assertEquals(71, ByteBufferUtils.findCommonPrefix(bb3, 0, bb3.remaining(), b, 0, b.length)); - - b[13] = 9; - assertEquals(13, ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), b, 0, b.length)); - - bb2.put(134, (byte) 6); - assertEquals(134, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - - bb2.put(6, (byte) 4); - assertEquals(6, - ByteBufferUtils.findCommonPrefix(bb1, 0, bb1.remaining(), bb2, 0, bb2.remaining())); - } - - private static void fillBB(ByteBuffer bb, byte b) { - for (int i = bb.position(); i < bb.limit(); i++) { - bb.put(i, b); - } - } +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; - private static void fillArray(byte[] bb, byte b) { - for (int i = 0; i < bb.length; i++) { - bb[i] = b; - } - } +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestByteBufferUtils extends ByteBufferUtilsTestBase { } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtilsWoUnsafe.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtilsWoUnsafe.java new file mode 100644 index 000000000000..c02db2142c1c --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtilsWoUnsafe.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mockStatic; + +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.mockito.MockedStatic; + +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestByteBufferUtilsWoUnsafe extends ByteBufferUtilsTestBase { + + @BeforeAll + public static void disableUnsafe() { + try (MockedStatic mocked = mockStatic(HBasePlatformDependent.class)) { + mocked.when(HBasePlatformDependent::isUnsafeAvailable).thenReturn(false); + mocked.when(HBasePlatformDependent::unaligned).thenReturn(false); + assertFalse(ByteBufferUtils.UNSAFE_AVAIL); + assertFalse(ByteBufferUtils.UNSAFE_UNALIGNED); + } + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index b74348959982..0122e91d7ea9 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -17,615 +17,12 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.math.BigDecimal; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Random; -import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; -import org.apache.hadoop.io.WritableUtils; -import org.junit.Assert; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ MiscTests.class, MediumTests.class }) -public class TestBytes { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBytes.class); - - private static void setUnsafe(boolean value) throws Exception { - Field field = Bytes.class.getDeclaredField("UNSAFE_UNALIGNED"); - field.setAccessible(true); - - Field modifiersField = ReflectionUtils.getModifiersField(); - modifiersField.setAccessible(true); - int oldModifiers = field.getModifiers(); - modifiersField.setInt(field, oldModifiers & ~Modifier.FINAL); - try { - field.set(null, value); - } finally { - modifiersField.setInt(field, oldModifiers); - } - assertEquals(Bytes.UNSAFE_UNALIGNED, value); - } - - @Test - public void testShort() throws Exception { - testShort(false); - } - - @Test - public void testShortUnsafe() throws Exception { - testShort(true); - } - - private static void testShort(boolean unsafe) throws Exception { - setUnsafe(unsafe); - try { - for (short n : Arrays.asList(Short.MIN_VALUE, (short) -100, (short) -1, (short) 0, (short) 1, - (short) 300, Short.MAX_VALUE)) { - byte[] bytes = Bytes.toBytes(n); - assertEquals(Bytes.toShort(bytes, 0, bytes.length), n); - } - } finally { - setUnsafe(HBasePlatformDependent.unaligned()); - } - } - - @Test - public void testNullHashCode() { - byte[] b = null; - Exception ee = null; - try { - Bytes.hashCode(b); - } catch (Exception e) { - ee = e; - } - assertNotNull(ee); - } - - @Test - public void testAdd() { - byte[] a = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - byte[] b = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - byte[] c = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }; - byte[] result1 = Bytes.add(a, b, c); - byte[] result2 = Bytes.add(new byte[][] { a, b, c }); - assertEquals(0, Bytes.compareTo(result1, result2)); - } - - @Test - public void testSplit() { - byte[] lowest = Bytes.toBytes("AAA"); - byte[] middle = Bytes.toBytes("CCC"); - byte[] highest = Bytes.toBytes("EEE"); - byte[][] parts = Bytes.split(lowest, highest, 1); - for (byte[] bytes : parts) { - System.out.println(Bytes.toString(bytes)); - } - assertEquals(3, parts.length); - assertTrue(Bytes.equals(parts[1], middle)); - // Now divide into three parts. Change highest so split is even. - highest = Bytes.toBytes("DDD"); - parts = Bytes.split(lowest, highest, 2); - for (byte[] part : parts) { - System.out.println(Bytes.toString(part)); - } - assertEquals(4, parts.length); - // Assert that 3rd part is 'CCC'. - assertTrue(Bytes.equals(parts[2], middle)); - } - - @Test - public void testSplit2() { - // More split tests. - byte[] lowest = Bytes.toBytes("http://A"); - byte[] highest = Bytes.toBytes("http://z"); - byte[] middle = Bytes.toBytes("http://]"); - byte[][] parts = Bytes.split(lowest, highest, 1); - for (byte[] part : parts) { - System.out.println(Bytes.toString(part)); - } - assertEquals(3, parts.length); - assertTrue(Bytes.equals(parts[1], middle)); - } - - @Test - public void testSplit3() { - // Test invalid split cases - byte[] low = { 1, 1, 1 }; - byte[] high = { 1, 1, 3 }; - - // If swapped, should throw IAE - try { - Bytes.split(high, low, 1); - fail("Should not be able to split if low > high"); - } catch (IllegalArgumentException iae) { - // Correct - } - - // Single split should work - byte[][] parts = Bytes.split(low, high, 1); - for (int i = 0; i < parts.length; i++) { - System.out.println("" + i + " -> " + Bytes.toStringBinary(parts[i])); - } - assertEquals("Returned split should have 3 parts but has " + parts.length, 3, parts.length); - - // If split more than once, use additional byte to split - parts = Bytes.split(low, high, 2); - assertNotNull("Split with an additional byte", parts); - assertEquals(parts.length, low.length + 1); - - // Split 0 times should throw IAE - try { - Bytes.split(low, high, 0); - fail("Should not be able to split 0 times"); - } catch (IllegalArgumentException iae) { - // Correct - } - } - - @Test - public void testToInt() { - int[] ints = { -1, 123, Integer.MIN_VALUE, Integer.MAX_VALUE }; - for (int anInt : ints) { - byte[] b = Bytes.toBytes(anInt); - assertEquals(anInt, Bytes.toInt(b)); - byte[] b2 = bytesWithOffset(b); - assertEquals(anInt, Bytes.toInt(b2, 1)); - assertEquals(anInt, Bytes.toInt(b2, 1, Bytes.SIZEOF_INT)); - } - } - - @Test - public void testToLong() { - long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; - for (long aLong : longs) { - byte[] b = Bytes.toBytes(aLong); - assertEquals(aLong, Bytes.toLong(b)); - byte[] b2 = bytesWithOffset(b); - assertEquals(aLong, Bytes.toLong(b2, 1)); - assertEquals(aLong, Bytes.toLong(b2, 1, Bytes.SIZEOF_LONG)); - } - } - - @Test - public void testToFloat() { - float[] floats = { -1f, 123.123f, Float.MAX_VALUE }; - for (float aFloat : floats) { - byte[] b = Bytes.toBytes(aFloat); - assertEquals(aFloat, Bytes.toFloat(b), 0.0f); - byte[] b2 = bytesWithOffset(b); - assertEquals(aFloat, Bytes.toFloat(b2, 1), 0.0f); - } - } - - @Test - public void testToDouble() { - double[] doubles = { Double.MIN_VALUE, Double.MAX_VALUE }; - for (double aDouble : doubles) { - byte[] b = Bytes.toBytes(aDouble); - assertEquals(aDouble, Bytes.toDouble(b), 0.0); - byte[] b2 = bytesWithOffset(b); - assertEquals(aDouble, Bytes.toDouble(b2, 1), 0.0); - } - } - - @Test - public void testToBigDecimal() { - BigDecimal[] decimals = - { new BigDecimal("-1"), new BigDecimal("123.123"), new BigDecimal("123123123123") }; - for (BigDecimal decimal : decimals) { - byte[] b = Bytes.toBytes(decimal); - assertEquals(decimal, Bytes.toBigDecimal(b)); - byte[] b2 = bytesWithOffset(b); - assertEquals(decimal, Bytes.toBigDecimal(b2, 1, b.length)); - } - } - - private byte[] bytesWithOffset(byte[] src) { - // add one byte in front to test offset - byte[] result = new byte[src.length + 1]; - result[0] = (byte) 0xAA; - System.arraycopy(src, 0, result, 1, src.length); - return result; - } - - @Test - public void testToBytesForByteBuffer() { - byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; - ByteBuffer target = ByteBuffer.wrap(array); - target.position(2); - target.limit(7); - - byte[] actual = Bytes.toBytes(target); - byte[] expected = { 0, 1, 2, 3, 4, 5, 6 }; - assertArrayEquals(expected, actual); - assertEquals(2, target.position()); - assertEquals(7, target.limit()); - - ByteBuffer target2 = target.slice(); - assertEquals(0, target2.position()); - assertEquals(5, target2.limit()); - - byte[] actual2 = Bytes.toBytes(target2); - byte[] expected2 = { 2, 3, 4, 5, 6 }; - assertArrayEquals(expected2, actual2); - assertEquals(0, target2.position()); - assertEquals(5, target2.limit()); - } - - @Test - public void testGetBytesForByteBuffer() { - byte[] array = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; - ByteBuffer target = ByteBuffer.wrap(array); - target.position(2); - target.limit(7); - - byte[] actual = Bytes.getBytes(target); - byte[] expected = { 2, 3, 4, 5, 6 }; - assertArrayEquals(expected, actual); - assertEquals(2, target.position()); - assertEquals(7, target.limit()); - } - - @Test - public void testReadAsVLong() throws Exception { - long[] longs = { -1L, 123L, Long.MIN_VALUE, Long.MAX_VALUE }; - for (long aLong : longs) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream output = new DataOutputStream(baos); - WritableUtils.writeVLong(output, aLong); - byte[] long_bytes_no_offset = baos.toByteArray(); - assertEquals(aLong, Bytes.readAsVLong(long_bytes_no_offset, 0)); - byte[] long_bytes_with_offset = bytesWithOffset(long_bytes_no_offset); - assertEquals(aLong, Bytes.readAsVLong(long_bytes_with_offset, 1)); - } - } - - @Test - public void testToStringBinaryForBytes() { - byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; - String actual = Bytes.toStringBinary(array); - String expected = "09azAZ@\\x01"; - assertEquals(expected, actual); - - String actual2 = Bytes.toStringBinary(array, 2, 3); - String expected2 = "azA"; - assertEquals(expected2, actual2); - } - - @Test - public void testToStringBinaryForArrayBasedByteBuffer() { - byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; - ByteBuffer target = ByteBuffer.wrap(array); - String actual = Bytes.toStringBinary(target); - String expected = "09azAZ@\\x01"; - assertEquals(expected, actual); - } - - @Test - public void testToStringBinaryForReadOnlyByteBuffer() { - byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; - ByteBuffer target = ByteBuffer.wrap(array).asReadOnlyBuffer(); - String actual = Bytes.toStringBinary(target); - String expected = "09azAZ@\\x01"; - assertEquals(expected, actual); - } - - @Test - public void testBinarySearch() { - byte[][] arr = { { 1 }, { 3 }, { 5 }, { 7 }, { 9 }, { 11 }, { 13 }, { 15 }, }; - byte[] key1 = { 3, 1 }; - byte[] key2 = { 4, 9 }; - byte[] key2_2 = { 4 }; - byte[] key3 = { 5, 11 }; - byte[] key4 = { 0 }; - byte[] key5 = { 2 }; - - assertEquals(1, Bytes.binarySearch(arr, key1, 0, 1)); - assertEquals(0, Bytes.binarySearch(arr, key1, 1, 1)); - assertEquals(-(2 + 1), Arrays.binarySearch(arr, key2_2, Bytes.BYTES_COMPARATOR)); - assertEquals(-(2 + 1), Bytes.binarySearch(arr, key2, 0, 1)); - assertEquals(4, Bytes.binarySearch(arr, key2, 1, 1)); - assertEquals(2, Bytes.binarySearch(arr, key3, 0, 1)); - assertEquals(5, Bytes.binarySearch(arr, key3, 1, 1)); - assertEquals(-1, Bytes.binarySearch(arr, key4, 0, 1)); - assertEquals(-2, Bytes.binarySearch(arr, key5, 0, 1)); - - // Search for values to the left and to the right of each item in the array. - for (int i = 0; i < arr.length; ++i) { - assertEquals(-(i + 1), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] - 1) }, 0, 1)); - assertEquals(-(i + 2), Bytes.binarySearch(arr, new byte[] { (byte) (arr[i][0] + 1) }, 0, 1)); - } - } - - @Test - public void testToStringBytesBinaryReversible() { - byte[] randomBytes = new byte[1000]; - for (int i = 0; i < 1000; i++) { - Bytes.random(randomBytes); - verifyReversibleForBytes(randomBytes); - } - // some specific cases - verifyReversibleForBytes(new byte[] {}); - verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D' }); - verifyReversibleForBytes(new byte[] { '\\', 'x', 'A', 'D', '\\' }); - } - - private void verifyReversibleForBytes(byte[] originalBytes) { - String convertedString = Bytes.toStringBinary(originalBytes); - byte[] convertedBytes = Bytes.toBytesBinary(convertedString); - if (Bytes.compareTo(originalBytes, convertedBytes) != 0) { - fail("Not reversible for\nbyte[]: " + Arrays.toString(originalBytes) + ",\nStringBinary: " - + convertedString); - } - } - - @Test - public void testStartsWith() { - assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("h"))); - assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes(""))); - assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("hello"))); - assertFalse(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("helloworld"))); - assertFalse(Bytes.startsWith(Bytes.toBytes(""), Bytes.toBytes("hello"))); - } - - @Test - public void testIncrementBytes() { - assertTrue(checkTestIncrementBytes(10, 1)); - assertTrue(checkTestIncrementBytes(12, 123435445)); - assertTrue(checkTestIncrementBytes(124634654, 1)); - assertTrue(checkTestIncrementBytes(10005460, 5005645)); - assertTrue(checkTestIncrementBytes(1, -1)); - assertTrue(checkTestIncrementBytes(10, -1)); - assertTrue(checkTestIncrementBytes(10, -5)); - assertTrue(checkTestIncrementBytes(1005435000, -5)); - assertTrue(checkTestIncrementBytes(10, -43657655)); - assertTrue(checkTestIncrementBytes(-1, 1)); - assertTrue(checkTestIncrementBytes(-26, 5034520)); - assertTrue(checkTestIncrementBytes(-10657200, 5)); - assertTrue(checkTestIncrementBytes(-12343250, 45376475)); - assertTrue(checkTestIncrementBytes(-10, -5)); - assertTrue(checkTestIncrementBytes(-12343250, -5)); - assertTrue(checkTestIncrementBytes(-12, -34565445)); - assertTrue(checkTestIncrementBytes(-1546543452, -34565445)); - } - - private static boolean checkTestIncrementBytes(long val, long amount) { - byte[] value = Bytes.toBytes(val); - byte[] testValue = { -1, -1, -1, -1, -1, -1, -1, -1 }; - if (value[0] > 0) { - testValue = new byte[Bytes.SIZEOF_LONG]; - } - System.arraycopy(value, 0, testValue, testValue.length - value.length, value.length); - - long incrementResult = Bytes.toLong(Bytes.incrementBytes(value, amount)); - - return (Bytes.toLong(testValue) + amount) == incrementResult; - } - - @Test - public void testFixedSizeString() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - Bytes.writeStringFixedSize(dos, "Hello", 5); - Bytes.writeStringFixedSize(dos, "World", 18); - Bytes.writeStringFixedSize(dos, "", 9); - - try { - // Use a long dash which is three bytes in UTF-8. If encoding happens - // using ISO-8859-1, this will fail. - Bytes.writeStringFixedSize(dos, "Too\u2013Long", 9); - fail("Exception expected"); - } catch (IOException ex) { - assertEquals( - "Trying to write 10 bytes (Too\\xE2\\x80\\x93Long) into a field of " + "length 9", - ex.getMessage()); - } - - ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); - DataInputStream dis = new DataInputStream(bais); - assertEquals("Hello", Bytes.readStringFixedSize(dis, 5)); - assertEquals("World", Bytes.readStringFixedSize(dis, 18)); - assertEquals("", Bytes.readStringFixedSize(dis, 9)); - } - - @Test - public void testCopy() { - byte[] bytes = Bytes.toBytes("ABCDEFGHIJKLMNOPQRSTUVWXYZ"); - byte[] copy = Bytes.copy(bytes); - assertNotSame(bytes, copy); - assertTrue(Bytes.equals(bytes, copy)); - } - - @Test - public void testToBytesBinaryTrailingBackslashes() { - try { - Bytes.toBytesBinary("abc\\x00\\x01\\"); - } catch (StringIndexOutOfBoundsException ex) { - fail("Illegal string access: " + ex.getMessage()); - } - } - - @Test - public void testToStringBinary_toBytesBinary_Reversable() { - String bytes = Bytes.toStringBinary(Bytes.toBytes(2.17)); - assertEquals(2.17, Bytes.toDouble(Bytes.toBytesBinary(bytes)), 0); - } - - @Test - public void testUnsignedBinarySearch() { - byte[] bytes = new byte[] { 0, 5, 123, 127, -128, -100, -1 }; - Assert.assertEquals(1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 5)); - Assert.assertEquals(3, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 127)); - Assert.assertEquals(4, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -128)); - Assert.assertEquals(5, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -100)); - Assert.assertEquals(6, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -1)); - Assert.assertEquals(-1 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) 2)); - Assert.assertEquals(-6 - 1, Bytes.unsignedBinarySearch(bytes, 0, bytes.length, (byte) -5)); - } - - @Test - public void testUnsignedIncrement() { - byte[] a = Bytes.toBytes(0); - int a2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(a), 0); - Assert.assertEquals(1, a2); - - byte[] b = Bytes.toBytes(-1); - byte[] actual = Bytes.unsignedCopyAndIncrement(b); - Assert.assertNotSame(b, actual); - byte[] expected = new byte[] { 1, 0, 0, 0, 0 }; - assertArrayEquals(expected, actual); - - byte[] c = Bytes.toBytes(255);// should wrap to the next significant byte - int c2 = Bytes.toInt(Bytes.unsignedCopyAndIncrement(c), 0); - Assert.assertEquals(256, c2); - } - - @Test - public void testIndexOf() { - byte[] array = Bytes.toBytes("hello"); - assertEquals(1, Bytes.indexOf(array, (byte) 'e')); - assertEquals(4, Bytes.indexOf(array, (byte) 'o')); - assertEquals(-1, Bytes.indexOf(array, (byte) 'a')); - assertEquals(0, Bytes.indexOf(array, Bytes.toBytes("hel"))); - assertEquals(2, Bytes.indexOf(array, Bytes.toBytes("ll"))); - assertEquals(-1, Bytes.indexOf(array, Bytes.toBytes("hll"))); - } - - @Test - public void testContains() { - byte[] array = Bytes.toBytes("hello world"); - assertTrue(Bytes.contains(array, (byte) 'e')); - assertTrue(Bytes.contains(array, (byte) 'd')); - assertFalse(Bytes.contains(array, (byte) 'a')); - assertTrue(Bytes.contains(array, Bytes.toBytes("world"))); - assertTrue(Bytes.contains(array, Bytes.toBytes("ello"))); - assertFalse(Bytes.contains(array, Bytes.toBytes("owo"))); - } - - @Test - public void testZero() { - byte[] array = Bytes.toBytes("hello"); - Bytes.zero(array); - for (byte b : array) { - assertEquals(0, b); - } - array = Bytes.toBytes("hello world"); - Bytes.zero(array, 2, 7); - assertFalse(array[0] == 0); - assertFalse(array[1] == 0); - for (int i = 2; i < 9; i++) { - assertEquals(0, array[i]); - } - for (int i = 9; i < array.length; i++) { - assertFalse(array[i] == 0); - } - } - - @Test - public void testPutBuffer() { - byte[] b = new byte[100]; - for (byte i = 0; i < 100; i++) { - Bytes.putByteBuffer(b, i, ByteBuffer.wrap(new byte[] { i })); - } - for (byte i = 0; i < 100; i++) { - Assert.assertEquals(i, b[i]); - } - } - - @Test - public void testToFromHex() { - List testStrings = new ArrayList<>(8); - testStrings.addAll(Arrays.asList("", "00", "A0", "ff", "FFffFFFFFFFFFF", "12", - "0123456789abcdef", "283462839463924623984692834692346ABCDFEDDCA0")); - for (String testString : testStrings) { - byte[] byteData = Bytes.fromHex(testString); - Assert.assertEquals(testString.length() / 2, byteData.length); - String result = Bytes.toHex(byteData); - Assert.assertTrue(testString.equalsIgnoreCase(result)); - } - - List testByteData = new ArrayList<>(5); - testByteData.addAll(Arrays.asList(new byte[0], new byte[1], new byte[10], - new byte[] { 1, 2, 3, 4, 5 }, new byte[] { (byte) 0xFF })); - Random rand = ThreadLocalRandom.current(); - for (int i = 0; i < 20; i++) { - byte[] bytes = new byte[rand.nextInt(100)]; - Bytes.random(bytes); - testByteData.add(bytes); - } - - for (byte[] testData : testByteData) { - String hexString = Bytes.toHex(testData); - Assert.assertEquals(testData.length * 2, hexString.length()); - byte[] result = Bytes.fromHex(hexString); - assertArrayEquals(testData, result); - } - } - - @Test - public void testFindCommonPrefix() throws Exception { - testFindCommonPrefix(false); - } - - @Test - public void testFindCommonPrefixUnsafe() throws Exception { - testFindCommonPrefix(true); - } - - private static void testFindCommonPrefix(boolean unsafe) throws Exception { - setUnsafe(unsafe); - try { - // tests for common prefixes less than 8 bytes in length (i.e. using non-vectorized path) - byte[] hello = Bytes.toBytes("hello"); - byte[] helloWorld = Bytes.toBytes("helloworld"); - - assertEquals(5, - Bytes.findCommonPrefix(hello, helloWorld, hello.length, helloWorld.length, 0, 0)); - assertEquals(5, Bytes.findCommonPrefix(hello, hello, hello.length, hello.length, 0, 0)); - assertEquals(3, - Bytes.findCommonPrefix(hello, hello, hello.length - 2, hello.length - 2, 2, 2)); - assertEquals(0, Bytes.findCommonPrefix(hello, hello, 0, 0, 0, 0)); - - // tests for common prefixes greater than 8 bytes in length which may use the vectorized path - byte[] hellohello = Bytes.toBytes("hellohello"); - byte[] hellohellohi = Bytes.toBytes("hellohellohi"); +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.jupiter.api.Tag; - assertEquals(10, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, - hellohellohi.length, 0, 0)); - assertEquals(10, Bytes.findCommonPrefix(hellohellohi, hellohello, hellohellohi.length, - hellohello.length, 0, 0)); - assertEquals(10, - Bytes.findCommonPrefix(hellohello, hellohello, hellohello.length, hellohello.length, 0, 0)); +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestBytes extends BytesTestBase { - hellohello[2] = 0; - assertEquals(2, Bytes.findCommonPrefix(hellohello, hellohellohi, hellohello.length, - hellohellohi.length, 0, 0)); - } finally { - setUnsafe(HBasePlatformDependent.unaligned()); - } - } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytesWoUnsafe.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytesWoUnsafe.java new file mode 100644 index 000000000000..8aacab4b8514 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytesWoUnsafe.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mockStatic; + +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.mockito.MockedStatic; + +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) +public class TestBytesWoUnsafe extends BytesTestBase { + + @BeforeAll + public static void disableUnsafe() { + try (MockedStatic mocked = mockStatic(HBasePlatformDependent.class)) { + mocked.when(HBasePlatformDependent::unaligned).thenReturn(false); + assertFalse(Bytes.UNSAFE_UNALIGNED); + } + } +} diff --git a/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension new file mode 100644 index 000000000000..0cb8a35a1ee8 --- /dev/null +++ b/hbase-common/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +org.apache.hadoop.hbase.HBaseJupiterExtension diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index c4063428b942..d64e6cd7fa84 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -184,6 +184,10 @@ org.bouncycastle bcprov-jdk15on + + org.bouncycastle + bcpkix-jdk15on + diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java index bbf35b8585f6..8856aaa0e205 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/LdapServerTestBase.java @@ -21,34 +21,73 @@ import java.net.HttpURLConnection; import java.net.URL; import org.apache.commons.codec.binary.Base64; -import org.apache.directory.server.core.integ.CreateLdapServerRule; +import org.apache.directory.ldap.client.template.LdapConnectionTemplate; +import org.apache.directory.server.core.api.DirectoryService; +import org.apache.directory.server.core.integ.ApacheDSTestExtension; +import org.apache.directory.server.ldap.LdapServer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.http.resource.JerseyResource; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.extension.ExtendWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for setting up and testing an HTTP server with LDAP authentication. */ +@ExtendWith(ApacheDSTestExtension.class) public class LdapServerTestBase extends HttpServerFunctionalTest { private static final Logger LOG = LoggerFactory.getLogger(LdapServerTestBase.class); - @ClassRule - public static CreateLdapServerRule ldapRule = new CreateLdapServerRule(); - protected static HttpServer server; protected static URL baseUrl; + /** + * The following fields are set by ApacheDSTestExtension. These are normally inherited from + * AbstractLdapTestUnit, but this class already has a parent. We only use ldapServer, but + * declaring that one alone does not work. + */ + + /** The class DirectoryService instance */ + public static DirectoryService classDirectoryService; + + /** The test DirectoryService instance */ + public static DirectoryService methodDirectoryService; + + /** The current DirectoryService instance */ + public static DirectoryService directoryService; + + /** The class LdapServer instance */ + public static LdapServer classLdapServer; + + /** The test LdapServer instance */ + public static LdapServer methodLdapServer; + + /** The current LdapServer instance */ + public static LdapServer ldapServer; + + /** The Ldap connection template */ + public static LdapConnectionTemplate ldapConnectionTemplate; + + /** The current revision */ + public static long revision = 0L; + + /** + * End of fields required by ApacheDSTestExtension + */ + private static final String AUTH_TYPE = "Basic "; + protected static LdapServer getLdapServer() { + return classLdapServer; + } + /** * Sets up the HTTP server with LDAP authentication before any tests are run. * @throws Exception if an error occurs during server setup */ - @BeforeClass + @BeforeAll public static void setupServer() throws Exception { Configuration conf = new Configuration(); setLdapConfigurations(conf); @@ -66,7 +105,7 @@ public static void setupServer() throws Exception { * Stops the HTTP server after all tests are completed. * @throws Exception if an error occurs during server shutdown */ - @AfterClass + @AfterAll public static void stopServer() throws Exception { try { if (null != server) { @@ -90,8 +129,8 @@ protected static void setLdapConfigurations(Configuration conf) { conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, "org.apache.hadoop.hbase.http.lib.AuthenticationFilterInitializer"); conf.set("hadoop.http.authentication.type", "ldap"); - conf.set("hadoop.http.authentication.ldap.providerurl", String.format("ldap://%s:%s", - LdapConstants.LDAP_SERVER_ADDR, ldapRule.getLdapServer().getPort())); + conf.set("hadoop.http.authentication.ldap.providerurl", + String.format("ldap://%s:%s", LdapConstants.LDAP_SERVER_ADDR, getLdapServer().getPort())); conf.set("hadoop.http.authentication.ldap.enablestarttls", "false"); conf.set("hadoop.http.authentication.ldap.basedn", LdapConstants.LDAP_BASE_DN); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java index 459865509630..91a3321bdfce 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapAdminACL.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.http; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.net.HttpURLConnection; @@ -29,21 +29,20 @@ import org.apache.directory.server.core.annotations.CreatePartition; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.http.resource.JerseyResource; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Test class for admin ACLs with LDAP authentication on the HttpServer. */ -@Category({ MiscTests.class, SmallTests.class }) +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) @CreateLdapServer( transports = { @CreateTransport(protocol = "LDAP", address = LdapConstants.LDAP_SERVER_ADDR), }) @CreateDS(name = "TestLdapAdminACL", allowAnonAccess = true, @@ -57,16 +56,13 @@ "objectClass: inetOrgPerson", "uid: jdoe", "userPassword: secure123" }) public class TestLdapAdminACL extends LdapServerTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLdapAdminACL.class); private static final Logger LOG = LoggerFactory.getLogger(TestLdapAdminACL.class); private static final String ADMIN_CREDENTIALS = "bjones:p@ssw0rd"; private static final String NON_ADMIN_CREDENTIALS = "jdoe:secure123"; private static final String WRONG_CREDENTIALS = "bjones:password"; - @BeforeClass + @BeforeAll public static void setupServer() throws Exception { Configuration conf = new Configuration(); setLdapConfigurationWithACLs(conf); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java index bff4dc9d9591..c4936513fb36 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestLdapHttpServer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.http; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.net.HttpURLConnection; @@ -27,17 +27,16 @@ import org.apache.directory.server.core.annotations.ContextEntry; import org.apache.directory.server.core.annotations.CreateDS; import org.apache.directory.server.core.annotations.CreatePartition; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Test class for LDAP authentication on the HttpServer. */ -@Category({ MiscTests.class, SmallTests.class }) +@Tag(MiscTests.TAG) +@Tag(SmallTests.TAG) @CreateLdapServer( transports = { @CreateTransport(protocol = "LDAP", address = LdapConstants.LDAP_SERVER_ADDR), }) @CreateDS(name = "TestLdapHttpServer", allowAnonAccess = true, @@ -48,10 +47,6 @@ "objectClass: inetOrgPerson", "uid: bjones", "userPassword: p@ssw0rd" }) public class TestLdapHttpServer extends LdapServerTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLdapHttpServer.class); - private static final String BJONES_CREDENTIALS = "bjones:p@ssw0rd"; private static final String WRONG_CREDENTIALS = "bjones:password"; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index be68d4575963..0e81c95677c3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY; import java.io.IOException; +import java.io.UncheckedIOException; import java.io.UnsupportedEncodingException; import java.net.InetSocketAddress; import java.net.URLDecoder; @@ -50,6 +51,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -82,6 +84,7 @@ import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.OutputFormat; @@ -170,6 +173,11 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) "hbase.mapreduce.hfileoutputformat.extendedcell.enabled"; static final boolean EXTENDED_CELL_SERIALIZATION_ENABLED_DEFULT = false; + @InterfaceAudience.Private + public static final String DISK_BASED_SORTING_ENABLED_KEY = + "hbase.mapreduce.hfileoutputformat.disk.based.sorting.enabled"; + private static final boolean DISK_BASED_SORTING_ENABLED_DEFAULT = false; + public static final String REMOTE_CLUSTER_CONF_PREFIX = "hbase.hfileoutputformat.remote.cluster."; public static final String REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY = REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; @@ -547,12 +555,19 @@ private static void writePartitions(Configuration conf, Path partitionsPath, // Write the actual file FileSystem fs = partitionsPath.getFileSystem(conf); - SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, - ImmutableBytesWritable.class, NullWritable.class); + boolean diskBasedSortingEnabled = diskBasedSortingEnabled(conf); + Class keyClass = + diskBasedSortingEnabled ? KeyOnlyCellComparable.class : ImmutableBytesWritable.class; + SequenceFile.Writer writer = + SequenceFile.createWriter(fs, conf, partitionsPath, keyClass, NullWritable.class); try { for (ImmutableBytesWritable startKey : sorted) { - writer.append(startKey, NullWritable.get()); + Writable writable = diskBasedSortingEnabled + ? new KeyOnlyCellComparable(KeyValueUtil.createFirstOnRow(startKey.get())) + : startKey; + + writer.append(writable, NullWritable.get()); } } finally { writer.close(); @@ -576,7 +591,7 @@ private static void writePartitions(Configuration conf, Path partitionsPath, public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator) throws IOException { configureIncrementalLoad(job, table.getDescriptor(), regionLocator); - configureRemoteCluster(job, table.getConfiguration()); + configureForRemoteCluster(job, table.getConfiguration()); } /** @@ -599,6 +614,10 @@ public static void configureIncrementalLoad(Job job, TableDescriptor tableDescri configureIncrementalLoad(job, singleTableInfo, HFileOutputFormat2.class); } + public static boolean diskBasedSortingEnabled(Configuration conf) { + return conf.getBoolean(DISK_BASED_SORTING_ENABLED_KEY, DISK_BASED_SORTING_ENABLED_DEFAULT); + } + static void configureIncrementalLoad(Job job, List multiTableInfo, Class> cls) throws IOException { Configuration conf = job.getConfiguration(); @@ -617,7 +636,13 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, // Based on the configured map output class, set the correct reducer to properly // sort the incoming values. // TODO it would be nice to pick one or the other of these formats. - if ( + boolean diskBasedSorting = diskBasedSortingEnabled(conf); + + if (diskBasedSorting) { + job.setMapOutputKeyClass(KeyOnlyCellComparable.class); + job.setSortComparatorClass(KeyOnlyCellComparable.KeyOnlyCellComparator.class); + job.setReducerClass(PreSortedCellsReducer.class); + } else if ( KeyValue.class.equals(job.getMapOutputValueClass()) || MapReduceExtendedCell.class.equals(job.getMapOutputValueClass()) ) { @@ -752,8 +777,34 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes * @see #REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY + * @deprecated As of release 2.6.4, this will be removed in HBase 4.0.0 Use + * {@link #configureForRemoteCluster(Job, Configuration)} instead. + */ + @Deprecated + public static void configureRemoteCluster(Job job, Configuration clusterConf) { + try { + configureForRemoteCluster(job, clusterConf); + } catch (IOException e) { + LOG.error("Configure remote cluster error.", e); + throw new UncheckedIOException("Configure remote cluster error.", e); + } + } + + /** + * Configure HBase cluster key for remote cluster to load region location for locality-sensitive + * if it's enabled. It's not necessary to call this method explicitly when the cluster key for + * HBase cluster to be used to load region location is configured in the job configuration. Call + * this method when another HBase cluster key is configured in the job configuration. For example, + * you should call when you load data from HBase cluster A using {@link TableInputFormat} and + * generate hfiles for HBase cluster B. Otherwise, HFileOutputFormat2 fetch location from cluster + * A and locality-sensitive won't working correctly. If authentication is enabled, it obtains the + * token for the specific cluster. + * @param job which has configuration to be updated + * @param clusterConf which contains cluster key of the HBase cluster to be locality-sensitive + * @throws IOException Exception while initializing cluster credentials */ - public static void configureRemoteCluster(Job job, Configuration clusterConf) throws IOException { + public static void configureForRemoteCluster(Job job, Configuration clusterConf) + throws IOException { Configuration conf = job.getConfiguration(); if (!conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyOnlyCellComparable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyOnlyCellComparable.java new file mode 100644 index 000000000000..d9b28f8a6895 --- /dev/null +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyOnlyCellComparable.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import java.io.ByteArrayInputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.IOException; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class KeyOnlyCellComparable implements WritableComparable { + + static { + WritableComparator.define(KeyOnlyCellComparable.class, new KeyOnlyCellComparator()); + } + + private ExtendedCell cell = null; + + public KeyOnlyCellComparable() { + } + + public KeyOnlyCellComparable(ExtendedCell cell) { + this.cell = cell; + } + + public ExtendedCell getCell() { + return cell; + } + + @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_COMPARETO_USE_OBJECT_EQUALS", + justification = "This is wrong, yes, but we should be purging Writables, not fixing them") + public int compareTo(KeyOnlyCellComparable o) { + return CellComparator.getInstance().compare(cell, o.cell); + } + + @Override + public void write(DataOutput out) throws IOException { + int keyLen = PrivateCellUtil.estimatedSerializedSizeOfKey(cell); + int valueLen = 0; // We avoid writing value here. So just serialize as if an empty value. + out.writeInt(keyLen + valueLen + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE); + out.writeInt(keyLen); + out.writeInt(valueLen); + PrivateCellUtil.writeFlatKey(cell, out); + out.writeLong(cell.getSequenceId()); + } + + @Override + public void readFields(DataInput in) throws IOException { + cell = KeyValue.create(in); + long seqId = in.readLong(); + cell.setSequenceId(seqId); + } + + public static class KeyOnlyCellComparator extends WritableComparator { + + @Override + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + try (DataInputStream d1 = new DataInputStream(new ByteArrayInputStream(b1, s1, l1)); + DataInputStream d2 = new DataInputStream(new ByteArrayInputStream(b2, s2, l2))) { + KeyOnlyCellComparable kv1 = new KeyOnlyCellComparable(); + kv1.readFields(d1); + KeyOnlyCellComparable kv2 = new KeyOnlyCellComparable(); + kv2.readFields(d2); + return compare(kv1, kv2); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PreSortedCellsReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PreSortedCellsReducer.java new file mode 100644 index 000000000000..8f4b2953ec0d --- /dev/null +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PreSortedCellsReducer.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import java.io.IOException; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.MapReduceExtendedCell; +import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class PreSortedCellsReducer + extends Reducer { + + @Override + protected void reduce(KeyOnlyCellComparable keyComparable, Iterable values, Context context) + throws IOException, InterruptedException { + + int index = 0; + ImmutableBytesWritable key = + new ImmutableBytesWritable(CellUtil.cloneRow(keyComparable.getCell())); + for (Cell cell : values) { + context.write(key, new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(cell))); + if (++index % 100 == 0) { + context.setStatus("Wrote " + index + " cells"); + } + } + } +} diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index cea6da97649d..4c0b12ef7333 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALEditInternalHelper; import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; @@ -114,9 +116,10 @@ protected WALPlayer(final Configuration c) { * A mapper that just writes out KeyValues. This one can be used together with * {@link CellSortReducer} */ - static class WALKeyValueMapper extends Mapper { + static class WALKeyValueMapper extends Mapper, Cell> { private Set tableSet = new HashSet(); private boolean multiTableSupport = false; + private boolean diskBasedSortingEnabled = false; @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { @@ -138,8 +141,8 @@ public void map(WALKey key, WALEdit value, Context context) throws IOException { byte[] outKey = multiTableSupport ? Bytes.add(table.getName(), Bytes.toBytes(tableSeparator), CellUtil.cloneRow(cell)) : CellUtil.cloneRow(cell); - context.write(new ImmutableBytesWritable(outKey), - new MapReduceExtendedCell(PrivateCellUtil.ensureExtendedCell(cell))); + ExtendedCell extendedCell = PrivateCellUtil.ensureExtendedCell(cell); + context.write(wrapKey(outKey, extendedCell), new MapReduceExtendedCell(extendedCell)); } } } catch (InterruptedException e) { @@ -153,8 +156,23 @@ public void setup(Context context) throws IOException { Configuration conf = context.getConfiguration(); String[] tables = conf.getStrings(TABLES_KEY); this.multiTableSupport = conf.getBoolean(MULTI_TABLES_SUPPORT, false); + this.diskBasedSortingEnabled = HFileOutputFormat2.diskBasedSortingEnabled(conf); Collections.addAll(tableSet, tables); } + + private WritableComparable wrapKey(byte[] key, ExtendedCell cell) { + if (this.diskBasedSortingEnabled) { + // Important to build a new cell with the updated key to maintain multi-table support + KeyValue kv = new KeyValue(key, 0, key.length, cell.getFamilyArray(), + cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), + KeyValue.Type.codeToType(PrivateCellUtil.getTypeByte(cell)), null, 0, 0); + kv.setSequenceId(cell.getSequenceId()); + return new KeyOnlyCellComparable(kv); + } else { + return new ImmutableBytesWritable(key); + } + } } /** @@ -331,7 +349,13 @@ public Job createSubmittableJob(String[] args) throws IOException { job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); + boolean diskBasedSortingEnabled = HFileOutputFormat2.diskBasedSortingEnabled(conf); + if (diskBasedSortingEnabled) { + job.setMapOutputKeyClass(KeyOnlyCellComparable.class); + job.setSortComparatorClass(KeyOnlyCellComparable.KeyOnlyCellComparator.class); + } else { + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + } String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null) { @@ -346,7 +370,11 @@ public Job createSubmittableJob(String[] args) throws IOException { List tableNames = getTableNameList(tables); job.setMapperClass(WALKeyValueMapper.class); - job.setReducerClass(CellSortReducer.class); + if (diskBasedSortingEnabled) { + job.setReducerClass(PreSortedCellsReducer.class); + } else { + job.setReducerClass(CellSortReducer.class); + } Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputValueClass(MapReduceExtendedCell.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index edd2da4129ac..c24f8e62c816 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.TestTableSnapshotScanner; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -582,4 +583,104 @@ public void testCleanRestoreDir() throws Exception { TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName); Assert.assertFalse(fs.exists(restorePath)); } + + /** + * Test that explicitly restores a snapshot to a temp directory and reads the restored regions via + * ClientSideRegionScanner through a MapReduce job. + *

+ * This test verifies the full workflow: 1. Create and load a table with data 2. Create a snapshot + * and restore the snapshot to a temporary directory 3. Configure a job to read the restored + * regions via ClientSideRegionScanner using TableSnapshotInputFormat and verify that it succeeds + * 4. Delete restored temporary directory 5. Configure a new job and verify that it fails + */ + @Test + public void testReadFromRestoredSnapshotViaMR() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + final String snapshotName = tableName + "_snapshot"; + try { + if (UTIL.getAdmin().tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + UTIL.createTable(tableName, FAMILIES, new byte[][] { bbb, yyy }); + + Admin admin = UTIL.getAdmin(); + int regionNum = admin.getRegions(tableName).size(); + LOG.info("Created table with {} regions", regionNum); + + Table table = UTIL.getConnection().getTable(tableName); + UTIL.loadTable(table, FAMILIES); + table.close(); + + Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); + FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration()); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), + null, snapshotName, rootDir, fs, true); + Path tempRestoreDir = UTIL.getDataTestDirOnTestFS("restore_" + snapshotName); + RestoreSnapshotHelper.copySnapshotForScanner(UTIL.getConfiguration(), fs, rootDir, + tempRestoreDir, snapshotName); + Assert.assertTrue("Restore directory should exist", fs.exists(tempRestoreDir)); + + Job job = Job.getInstance(UTIL.getConfiguration()); + job.setJarByClass(TestTableSnapshotInputFormat.class); + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), + TestTableSnapshotInputFormat.class); + Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow()); + Configuration conf = job.getConfiguration(); + conf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName); + conf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString()); + conf.setInt("hbase.mapreduce.splits.per.region", 1); + job.setReducerClass(TestTableSnapshotReducer.class); + job.setNumReduceTasks(1); + job.setOutputFormatClass(NullOutputFormat.class); + TableMapReduceUtil.initTableMapperJob(snapshotName, // table name (snapshot name in this case) + scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, false, TableSnapshotInputFormat.class); + TableMapReduceUtil.resetCacheConfig(conf); + Assert.assertTrue(job.waitForCompletion(true)); + Assert.assertTrue(job.isSuccessful()); + + // Now verify that job fails when restore directory is deleted + Assert.assertTrue(fs.delete(tempRestoreDir, true)); + Assert.assertFalse("Restore directory should not exist after deletion", + fs.exists(tempRestoreDir)); + Job failureJob = Job.getInstance(UTIL.getConfiguration()); + failureJob.setJarByClass(TestTableSnapshotInputFormat.class); + TableMapReduceUtil.addDependencyJarsForClasses(failureJob.getConfiguration(), + TestTableSnapshotInputFormat.class); + Configuration failureConf = failureJob.getConfiguration(); + // Configure job to use the deleted restore directory + failureConf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName); + failureConf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString()); + failureConf.setInt("hbase.mapreduce.splits.per.region", 1); + failureJob.setReducerClass(TestTableSnapshotReducer.class); + failureJob.setNumReduceTasks(1); + failureJob.setOutputFormatClass(NullOutputFormat.class); + + TableMapReduceUtil.initTableMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, + ImmutableBytesWritable.class, NullWritable.class, failureJob, false, false, + TableSnapshotInputFormat.class); + TableMapReduceUtil.resetCacheConfig(failureConf); + + Assert.assertFalse("Restore directory should not exist before job execution", + fs.exists(tempRestoreDir)); + failureJob.waitForCompletion(true); + + Assert.assertFalse("Job should fail since the restored snapshot directory is deleted", + failureJob.isSuccessful()); + + } finally { + try { + if (UTIL.getAdmin().tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + } catch (Exception e) { + LOG.warn("Error deleting table", e); + } + try { + UTIL.getAdmin().deleteSnapshot(snapshotName); + } catch (Exception e) { + LOG.warn("Error deleting snapshot", e); + } + } + } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 7818f8d2f739..bbadabab69bf 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.util.ToolRunner; @@ -126,19 +127,22 @@ public void testPlayingRecoveredEdit() throws Exception { TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); // Copy testing recovered.edits file that is over under hbase-server test resources // up into a dir in our little hdfs cluster here. - String hbaseServerTestResourcesEdits = - System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" - + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); - assertTrue(new File(hbaseServerTestResourcesEdits).exists()); - FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); - // Target dir. - Path targetDir = new Path("edits").makeQualified(dfs.getUri(), dfs.getHomeDirectory()); - assertTrue(dfs.mkdirs(targetDir)); - dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); - assertEquals(0, - ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); - // I don't know how many edits are in this file for this table... so just check more than 1. - assertTrue(TEST_UTIL.countRows(tn) > 0); + runWithDiskBasedSortingDisabledAndEnabled(() -> { + String hbaseServerTestResourcesEdits = + System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + assertTrue(new File(hbaseServerTestResourcesEdits).exists()); + FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); + // Target dir. + Path targetDir = new Path("edits").makeQualified(dfs.getUri(), dfs.getHomeDirectory()); + assertTrue(dfs.mkdirs(targetDir)); + dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); + assertEquals(0, + ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); + // I don't know how many edits are in this file for this table... so just check more than 1. + assertTrue(TEST_UTIL.countRows(tn) > 0); + dfs.delete(targetDir, true); + }); } /** @@ -153,7 +157,7 @@ public void testWALPlayerBulkLoadWithOverriddenTimestamps() throws Exception { final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); final byte[] row = Bytes.toBytes("row"); - Table table = TEST_UTIL.createTable(tableName, family); + final Table table = TEST_UTIL.createTable(tableName, family); long now = EnvironmentEdgeManager.currentTime(); // put a row into the first table @@ -191,28 +195,37 @@ public void testWALPlayerBulkLoadWithOverriddenTimestamps() throws Exception { configuration.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true); WALPlayer player = new WALPlayer(configuration); - assertEquals(0, ToolRunner.run(configuration, player, - new String[] { walInputDir, tableName.getNameAsString() })); + final byte[] finalLastVal = lastVal; + + runWithDiskBasedSortingDisabledAndEnabled(() -> { + assertEquals(0, ToolRunner.run(configuration, player, + new String[] { walInputDir, tableName.getNameAsString() })); - Get g = new Get(row); - Result result = table.get(g); - byte[] value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); - assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(lastVal))); + Get g = new Get(row); + Result result = table.get(g); + byte[] value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); + assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(finalLastVal))); - table = TEST_UTIL.truncateTable(tableName); - g = new Get(row); - result = table.get(g); - assertThat(result.listCells(), nullValue()); + TEST_UTIL.truncateTable(tableName); + g = new Get(row); + result = table.get(g); + assertThat(result.listCells(), nullValue()); - BulkLoadHFiles.create(configuration).bulkLoad(tableName, - new Path(outPath, tableName.getNamespaceAsString() + "/" + tableName.getNameAsString())); + BulkLoadHFiles.create(configuration).bulkLoad(tableName, + new Path(outPath, tableName.getNamespaceAsString() + "/" + tableName.getNameAsString())); - g = new Get(row); - result = table.get(g); - value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); + g = new Get(row); + result = table.get(g); + value = CellUtil.cloneValue(result.getColumnLatestCell(family, column1)); - assertThat(result.listCells(), notNullValue()); - assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(lastVal))); + assertThat(result.listCells(), notNullValue()); + assertThat(Bytes.toStringBinary(value), equalTo(Bytes.toStringBinary(finalLastVal))); + + // cleanup + Path out = new Path(outPath); + FileSystem fs = out.getFileSystem(configuration); + assertTrue(fs.delete(out, true)); + }); } /** @@ -247,18 +260,21 @@ public void testWALPlayer() throws Exception { Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName = "_test_.name"; - configuration.set(optionName, "1000"); - player.setupTime(configuration, optionName); - assertEquals(1000, configuration.getLong(optionName, 0)); - assertEquals(0, ToolRunner.run(configuration, player, - new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); - - // verify the WAL was player into table 2 - Get g = new Get(ROW); - Result r = t2.get(g); - assertEquals(1, r.size()); - assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN2)); + + runWithDiskBasedSortingDisabledAndEnabled(() -> { + String optionName = "_test_.name"; + configuration.set(optionName, "1000"); + player.setupTime(configuration, optionName); + assertEquals(1000, configuration.getLong(optionName, 0)); + assertEquals(0, ToolRunner.run(configuration, player, + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); + + // verify the WAL was player into table 2 + Get g = new Get(ROW); + Result r = t2.get(g); + assertEquals(1, r.size()); + assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN2)); + }); } /** @@ -281,7 +297,7 @@ private void testWALKeyValueMapper(final String tableConfigKey) throws Exception WALKey key = mock(WALKey.class); when(key.getTableName()).thenReturn(TableName.valueOf("table")); @SuppressWarnings("unchecked") - Mapper.Context context = mock(Context.class); + Mapper, Cell>.Context context = mock(Context.class); when(context.getConfiguration()).thenReturn(configuration); WALEdit value = mock(WALEdit.class); @@ -338,7 +354,29 @@ public void testMainMethod() throws Exception { System.setErr(oldPrintStream); System.setSecurityManager(SECURITY_MANAGER); } + } + + private static void runWithDiskBasedSortingDisabledAndEnabled(TestMethod method) + throws Exception { + TEST_UTIL.getConfiguration().setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + false); + try { + method.run(); + } finally { + TEST_UTIL.getConfiguration().unset(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY); + } + + TEST_UTIL.getConfiguration().setBoolean(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY, + true); + try { + method.run(); + } finally { + TEST_UTIL.getConfiguration().unset(HFileOutputFormat2.DISK_BASED_SORTING_ENABLED_KEY); + } + } + private interface TestMethod { + void run() throws Exception; } @Test diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index e6a9d8fb2bdf..6e68ce5f1900 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -262,7 +262,7 @@ public interface RemoteProcedure { * Called when RS tells the remote procedure is succeeded through the * {@code reportProcedureDone} method. */ - void remoteOperationCompleted(TEnv env); + void remoteOperationCompleted(TEnv env, byte[] remoteResultData); /** * Called when RS tells the remote procedure is failed through the {@code reportProcedureDone} diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index 091140210d1b..8cf6efa51e8f 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -108,61 +108,11 @@ of com.google.protobuf so instead its o.a.h.h.com.google.protobuf. Plugin is old and in google code archive. Here is usage done by anohther: https://github.com/beiliubei/maven-replacer-plugin/wiki/Usage-Guide - The mess with the regex in the below is to prevent replacement every time - we run mvn install. There is probably a better way of avoiding the - double interpolation but this is it for now. --> com.google.code.maven-replacer-plugin replacer - - org.apache.maven.plugins - maven-shade-plugin - 3.4.1 - - - - shade - - package - - true - true - - - - com.google.protobuf - org.apache.hadoop.hbase.shaded.com.google.protobuf - - - - - - javax.annotation:javax.annotation-api - - org.apache.hbase.thirdparty:* - com.google.protobuf:protobuf-java - com.google.code.findbugs:* - com.google.j2objc:j2objc-annotations - org.codehaus.mojo:animal-sniffer-annotations - junit:junit - log4j:log4j - commons-logging:commons-logging - org.slf4j:slf4j-api - org.apache.yetus:audience-annotations - com.github.stephenc.fingbugs:* - com.github.spotbugs:* - - - - - - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/hbase-protocol-shaded/src/main/protobuf/HBase.proto b/hbase-protocol-shaded/src/main/protobuf/HBase.proto index 0fd3d667d4d0..c66ee7eb9791 100644 --- a/hbase-protocol-shaded/src/main/protobuf/HBase.proto +++ b/hbase-protocol-shaded/src/main/protobuf/HBase.proto @@ -289,3 +289,7 @@ message RotateFileData { required int64 timestamp = 1; required bytes data = 2; } + +message LastHighestWalFilenum { + map file_num = 1; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index a8adaa27453f..6dd6ee723b02 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -799,6 +799,15 @@ message ModifyColumnStoreFileTrackerResponse { message FlushMasterStoreRequest {} message FlushMasterStoreResponse {} +message RollAllWALWritersRequest { + optional uint64 nonce_group = 1 [default = 0]; + optional uint64 nonce = 2 [default = 0]; +} + +message RollAllWALWritersResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -1270,6 +1279,12 @@ service MasterService { rpc FlushTable(FlushTableRequest) returns(FlushTableResponse); + + rpc RestoreBackupSystemTable(RestoreBackupSystemTableRequest) + returns(RestoreBackupSystemTableResponse); + + rpc rollAllWALWriters(RollAllWALWritersRequest) + returns(RollAllWALWritersResponse); } // HBCK Service definitions. @@ -1357,6 +1372,13 @@ message FixMetaRequest {} message FixMetaResponse {} +message RestoreBackupSystemTableRequest { + required string snapshot_name = 1; +} +message RestoreBackupSystemTableResponse { + optional uint64 proc_id = 1; +} + service HbckService { /** Update state of the table in meta only*/ rpc SetTableStateInMeta(SetTableStateInMetaRequest) diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index e3b43afd66aa..7e6c6c8e2fc7 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -839,3 +839,28 @@ message ReloadQuotasProcedureStateData { required ServerName target_server = 1; optional ForeignExceptionMessage error = 2; } + +enum RestoreBackupSystemTableState { + RESTORE_BACKUP_SYSTEM_TABLE_PREPARE = 1; + RESTORE_BACKUP_SYSTEM_TABLE_DISABLE = 2; + RESTORE_BACKUP_SYSTEM_TABLE_RESTORE = 3; + RESTORE_BACKUP_SYSTEM_TABLE_ENABLE = 4; +} + +enum LogRollProcedureState { + LOG_ROLL_ROLL_LOG_ON_RS = 1; + LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM = 2; + LOG_ROLL_UNREGISTER_SERVER_LISTENER = 3; +} + +message LogRollRemoteProcedureStateData { + required ServerName target_server = 1; +} + +message RSLogRollParameter { +} + +message LogRollRemoteProcedureResult { + optional ServerName server_name = 1; + optional uint64 last_highest_wal_filenum = 2; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto index e68ba8e72869..3d2d8c6ff5fd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/RegionServerStatus.proto @@ -160,6 +160,7 @@ message RemoteProcedureResult { optional ForeignExceptionMessage error = 3; // Master active time as fencing token optional int64 initiating_master_active_time = 4; + optional bytes proc_result_data = 5; } message ReportProcedureDoneRequest { repeated RemoteProcedureResult result = 1; diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index 230795f27479..30eb328fd3cd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -420,5 +420,4 @@ service AdminService { rpc GetCachedFilesList(GetCachedFilesListRequest) returns(GetCachedFilesListResponse); - } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java index 8fff3d461f74..415c312ddcd7 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java @@ -376,7 +376,7 @@ public void removeAllQueues(String peerId) throws ReplicationException { table.delete(new Delete(result.getRow())); } } catch (IOException e) { - throw new ReplicationException("failed to listAllQueueIds, peerId=" + peerId, e); + throw new ReplicationException("failed to removeAllQueues, peerId=" + peerId, e); } } diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 1abee9db3ba0..e27ab2278877 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -305,6 +305,11 @@ mockito-core test + + org.mockito + mockito-inline + test + org.slf4j jcl-over-slf4j @@ -559,12 +564,26 @@ + + + + + + + + + + + + + - + - + diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon deleted file mode 100644 index 8798e930c707..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon +++ /dev/null @@ -1,140 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -<%import> -java.util.*; -org.apache.hadoop.hbase.monitoring.*; -org.apache.hadoop.util.StringUtils; - -<%args> -TaskMonitor taskMonitor = TaskMonitor.get(); -String filter = "general"; -String format = "html"; -String parent = ""; - - -<%if format.equals("json")%> - <& renderTasks; filter=filter &> -<%else> -

Tasks

- -
- -
-
- <& jsonView; filter="all" &> - <& renderTasks; filter="all" &> -
-
- <& jsonView; filter="general" &> - <& renderTasks; filter="general" &> -
-
- <& jsonView; filter="handler" &> - <& renderTasks; filter="handler" &> -
-
- <& jsonView; filter="rpc" &> - <& renderTasks; filter="rpc" &> -
-
- <& jsonView; filter="operation" &> - <& renderTasks; filter="operation" &> -
-
-
- - -<%def jsonView> - <%args> - String filter; - - View as JSON - - -<%def renderTasks> - <%args> - String filter; - - <%java> - List tasks = taskMonitor.getTasks(filter); - long now = System.currentTimeMillis(); - Collections.sort(tasks, (t1, t2) -> Long.compare(t1.getStateTime(), t2.getStateTime())); - boolean first = true; - - <%if format.equals("json") %> - [<%for MonitoredTask task : tasks%><%if first%><%java>first = false;<%else>,<% task.toJSON() %>] - <%else> - <%if tasks.isEmpty()%> -

No tasks currently running on this node.

- <%else> - - - - - - - - - <%for MonitoredTask task : tasks %> - - - - - - - - -
Start TimeDescriptionStateStatusCompletion Time
<% new Date(task.getStartTime()) %><% task.getDescription() %><% task.getState() %> - (since <% StringUtils.formatTimeDiff(now, task.getStateTime()) %> ago) - <% task.getStatus() %> - (since <% StringUtils.formatTimeDiff(now, task.getStatusTime()) %> - ago) - <%if task.getCompletionTimestamp() < 0 %> - <% task.getState() %> - <%else> - <% new Date(task.getCompletionTimestamp()) %> (since <% StringUtils.formatTimeDiff(now, task.getCompletionTimestamp()) %> ago) - -
- - - - -<%def stateCss> - <%args> - MonitoredTask.State state; - -<%java> if (state == MonitoredTask.State.COMPLETE) { alert alert-success<%java> } -<%java> else if (state == MonitoredTask.State.ABORTED) { alert alert-danger<%java> } - diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon deleted file mode 100644 index ee899a7340dc..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon +++ /dev/null @@ -1,128 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -<%import> -java.util.Map; -java.util.Set; -java.util.SortedSet; -java.util.concurrent.atomic.AtomicInteger; -java.util.stream.Collectors; -org.apache.hadoop.conf.Configuration; -org.apache.hadoop.hbase.HBaseConfiguration; -org.apache.hadoop.hbase.HConstants; -org.apache.hadoop.hbase.ServerName; -org.apache.hadoop.hbase.client.RegionInfo; -org.apache.hadoop.hbase.client.RegionInfoDisplay; -org.apache.hadoop.hbase.master.RegionState; -org.apache.hadoop.hbase.master.assignment.AssignmentManager; -org.apache.hadoop.hbase.master.assignment.AssignmentManager.RegionInTransitionStat; -org.apache.hadoop.hbase.master.assignment.RegionStates.RegionFailedOpen; -org.apache.hadoop.hbase.util.Pair; - -<%args> -AssignmentManager assignmentManager; -int limit = 100; - - -<%java> -SortedSet rit = assignmentManager.getRegionStates() - .getRegionsInTransitionOrderedByTimestamp(); - - -<%if !rit.isEmpty() %> -<%java> -long currentTime = System.currentTimeMillis(); -RegionInTransitionStat ritStat = assignmentManager.computeRegionInTransitionStat(); - -int numOfRITs = rit.size(); -int ritsPerPage = Math.min(5, numOfRITs); -int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); - -
-

Regions in Transition

-

<% numOfRITs %> region(s) in transition. - <%if ritStat.hasRegionsTwiceOverThreshold() %> - - <%elseif ritStat.hasRegionsOverThreshold() %> - - <%else> - - - <% ritStat.getTotalRITsOverThreshold() %> region(s) in transition for - more than <% ritStat.getRITThreshold() %> milliseconds. - -

-
-
- <%java int recordItr = 0; %> - <%for RegionState rs : rit %> - <%if (recordItr % ritsPerPage) == 0 %> - <%if recordItr == 0 %> -
- <%else> -
- - - - - - <%if ritStat.isRegionTwiceOverThreshold(rs.getRegion()) %> - - <%elseif ritStat.isRegionOverThreshold(rs.getRegion()) %> - - <%else> - - - <%java> - String retryStatus = "0"; - RegionFailedOpen regionFailedOpen = assignmentManager - .getRegionStates().getFailedOpen(rs.getRegion()); - if (regionFailedOpen != null) { - retryStatus = Integer.toString(regionFailedOpen.getRetries()); - } else if (rs.getState() == RegionState.State.FAILED_OPEN) { - retryStatus = "Failed"; - } - - - - - - <%java recordItr++; %> - <%if (recordItr % ritsPerPage) == 0 %> -
RegionStateRIT time (ms) Retries
<% rs.getRegion().getEncodedName() %> - <% RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(rs, - assignmentManager.getConfiguration()) %><% (currentTime - rs.getStamp()) %> <% retryStatus %>
-
- - - - <%if (recordItr % ritsPerPage) != 0 %> - <%for ; (recordItr % ritsPerPage) != 0 ; recordItr++ %> - - - -
- -
- - - -
-
- - diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon deleted file mode 100644 index 21af264bbe34..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon +++ /dev/null @@ -1,70 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -<%args> -HMaster master; - -<%import> -java.util.*; -org.apache.hadoop.hbase.ServerName; -org.apache.hadoop.hbase.ClusterMetrics; -org.apache.hadoop.hbase.master.HMaster; -org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - -<%if (!master.isActiveMaster()) %> - <%java> - ServerName active_master = master.getActiveMaster().orElse(null); - Preconditions.checkState(active_master != null, "Failed to retrieve active master's ServerName!"); - int activeInfoPort = master.getActiveMasterInfoPort(); - -
- -
-

Current Active Master: <% active_master.getHostname() %>

-<%else> -

Backup Masters

- - - - - - - - <%java> - Collection backup_masters = master.getBackupMasters(); - ServerName [] backupServerNames = backup_masters.toArray(new ServerName[backup_masters.size()]); - Arrays.sort(backupServerNames); - for (ServerName serverName : backupServerNames) { - int infoPort = master.getBackupMasterInfoPort(serverName); - - - - - - - <%java> - } - - -
ServerNamePortStart Time
<% serverName.getHostname() %> - <% serverName.getPort() %><% new Date(serverName.getStartcode()) %>
Total:<% backupServerNames.length %>
- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon deleted file mode 100644 index c80707d8c83b..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ /dev/null @@ -1,800 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -<%args> -HMaster master; -Map frags = null; -ServerName metaLocation = null; -List servers = null; -Set deadServers = null; -boolean catalogJanitorEnabled = true; -String filter = "general"; -String format = "html"; -ServerManager serverManager = null; -AssignmentManager assignmentManager = null; - -<%import> -java.util.*; -java.net.URLEncoder; -java.io.IOException; -org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; -org.apache.hadoop.hbase.client.RegionInfo; -org.apache.hadoop.hbase.client.TableDescriptor; -org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -org.apache.hadoop.hbase.HBaseConfiguration; -org.apache.hadoop.hbase.HConstants; -org.apache.hadoop.hbase.NamespaceDescriptor; -org.apache.hadoop.hbase.ServerName; -org.apache.hadoop.hbase.TableName; -org.apache.hadoop.hbase.client.Admin; -org.apache.hadoop.hbase.client.MasterSwitchType; -org.apache.hadoop.hbase.client.TableState; -org.apache.hadoop.hbase.master.assignment.AssignmentManager; -org.apache.hadoop.hbase.master.DeadServer; -org.apache.hadoop.hbase.master.HMaster; -org.apache.hadoop.hbase.master.RegionState; -org.apache.hadoop.hbase.master.ServerManager; -org.apache.hadoop.hbase.quotas.QuotaUtil; -org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; -org.apache.hadoop.hbase.rsgroup.RSGroupInfo; -org.apache.hadoop.hbase.rsgroup.RSGroupUtil; -org.apache.hadoop.hbase.security.access.PermissionStorage; -org.apache.hadoop.hbase.security.visibility.VisibilityConstants; -org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; -org.apache.hadoop.hbase.tool.CanaryTool; -org.apache.hadoop.hbase.util.Bytes; -org.apache.hadoop.hbase.util.CommonFSUtils; -org.apache.hadoop.hbase.util.JvmVersion; -org.apache.hadoop.hbase.util.PrettyPrinter; -org.apache.hadoop.util.StringUtils; -org.apache.hadoop.hbase.util.Strings; - - -<%if format.equals("json") %> - <& ../common/TaskMonitorTmpl; filter = filter; format = "json" &> - <%java return; %> - -<%java> -ServerManager serverManager = master.getServerManager(); -AssignmentManager assignmentManager = master.getAssignmentManager(); - - -<%class> - public String formatZKString() { - StringBuilder quorums = new StringBuilder(); - String zkQuorum = master.getZooKeeper().getQuorum(); - - if (null == zkQuorum) { - return quorums.toString(); - } - - String[] zks = zkQuorum.split(","); - - if (zks.length == 0) { - return quorums.toString(); - } - - for(int i = 0; i < zks.length; ++i) { - quorums.append(zks[i].trim()); - - if (i != (zks.length - 1)) { - quorums.append("
"); - } - } - - return quorums.toString(); - } - - -<%class> - public static String getUserTables(HMaster master, List tables){ - if (master.isInitialized()){ - try { - Map descriptorMap = master.getTableDescriptors().getAll(); - if (descriptorMap != null) { - for (TableDescriptor desc : descriptorMap.values()) { - if (!desc.getTableName().isSystemTable()) { - tables.add(desc); - } - } - } - } catch (IOException e) { - return "Got user tables error, " + e.getMessage(); - } - } - return null; - } - - - - - - - - <%if master.isActiveMaster() %>Master: <%else>Backup Master: </%if> - <% master.getServerName().getHostname() %> - - - - - - - - - - - -
- <%if master.isActiveMaster() %> -
- -
- -
- - <%if JvmVersion.isBadJvmVersion() %> - - - <%if master.isInitialized() && !catalogJanitorEnabled %> - - - <%if master.isInMaintenanceMode() %> - - - <%if !master.isBalancerOn() %> - - - <%if !master.isSplitOrMergeEnabled(MasterSwitchType.SPLIT) %> - - - <%if !master.isSplitOrMergeEnabled(MasterSwitchType.MERGE) %> - - - <%if master.getAssignmentManager() != null %> - <& AssignmentManagerStatusTmpl; assignmentManager=master.getAssignmentManager()&> - - <%if !master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null %> - <%if RSGroupUtil.isRSGroupEnabled(master.getConfiguration()) && - serverManager.getOnlineServersList().size() > 0 %> -
-

RSGroup

- <& RSGroupListTmpl; master= master; serverManager= serverManager&> -
- - -
-
-
-

Region Servers

- <& RegionServerListTmpl; master= master; servers = servers &> - - <%if (deadServers != null) %> - <& deadRegionServers &> - -
-
-
-
- <& BackupMasterStatusTmpl; master = master &> -
-
-
-
-

Tables

-
- -
-
- <%if (metaLocation != null) %> - <& userTables &> - -
-
- <%if (metaLocation != null) %> - <& catalogTables &> - -
-
-
-
-
-
-
-
-
-

Region Visualizer

- <& RegionVisualizerTmpl &> -
-
-
-
-

Peers

- <& peerConfigs &> -
-
- <%else> -
- <& BackupMasterStatusTmpl; master = master &> -
- - - -
- <& ../common/TaskMonitorTmpl; filter = filter; parent = "/master-status" &> -
- -
-

Software Attributes

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - <%escape #n> - - - - - - - - - - - - - - - - - - - - - - - - <%if master.isActiveMaster() %> - - - - - - - - - - - - - - - - <%if frags != null %> - - - - - - - - - - - - - - - - - -
Attribute NameValueDescription
JVM Version<% JvmVersion.getVersion() %>JVM vendor and version
HBase Version<% org.apache.hadoop.hbase.util.VersionInfo.getVersion() %>, revision=<% org.apache.hadoop.hbase.util.VersionInfo.getRevision() %>HBase version and revision
HBase Compiled<% org.apache.hadoop.hbase.util.VersionInfo.getDate() %>, <% org.apache.hadoop.hbase.util.VersionInfo.getUser() %>When HBase version was compiled and by whom
HBase Source Checksum<% org.apache.hadoop.hbase.util.VersionInfo.getSrcChecksum() %>HBase source SHA512 checksum
Hadoop Version<% org.apache.hadoop.util.VersionInfo.getVersion() %>, revision=<% org.apache.hadoop.util.VersionInfo.getRevision() %>Hadoop version and revision
Hadoop Compiled<% org.apache.hadoop.util.VersionInfo.getDate() %>, <% org.apache.hadoop.util.VersionInfo.getUser() %>When Hadoop version was compiled and by whom
Hadoop Source Checksum<% org.apache.hadoop.util.VersionInfo.getSrcChecksum() %>Hadoop source MD5 checksum
ZooKeeper Client Version<% org.apache.zookeeper.Version.getVersion() %>, revision=<% org.apache.zookeeper.Version.getRevisionHash() %>ZooKeeper client version and revision hash
ZooKeeper Client Compiled<% org.apache.zookeeper.Version.getBuildDate() %>When ZooKeeper client version was compiled
ZooKeeper Quorum <% formatZKString() %> Addresses of all registered ZK servers. For more, see zk dump.
ZooKeeper Base Path <% master.getZooKeeper().getZNodePaths().baseZNode %>Root node of this cluster in ZK.
Cluster Key <% formatZKString() %>:<% master.getZooKeeper().getZNodePaths().baseZNode %>Key to add this cluster as a peer for replication. Use 'help "add_peer"' in the shell for details.
HBase Root Directory<% CommonFSUtils.getRootDir(master.getConfiguration()).toString() %>Location of HBase home directory
HMaster Start Time<% new Date(master.getMasterStartTime()) %>Date stamp of when this HMaster was started
HMaster Active Time<% new Date(master.getMasterActiveTime()) %>Date stamp of when this HMaster became active
HBase Cluster ID<% master.getClusterId() != null ? master.getClusterId() : "Not set" %>Unique identifier generated for each HBase cluster
Load average<% master.getServerManager() == null ? "0.00" : - StringUtils.limitDecimalTo2(master.getServerManager().getAverageLoad()) %>Average number of regions per regionserver. Naive computation.
Fragmentation<% frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %>Overall fragmentation of all tables, including hbase:meta
Coprocessors<% master.getMasterCoprocessorHost() == null ? "[]" : - java.util.Arrays.toString(master.getMasterCoprocessors()) %>Coprocessors currently loaded by the master
LoadBalancer<% master.getLoadBalancerClassName() %>LoadBalancer to be used in the Master
-
-
- - - - - - - - - - - - -<%def catalogTables> -<%java> - List sysTables = master.isInitialized() ? - master.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) : null; - -<%if (sysTables != null && sysTables.size() > 0)%> - - - - <%if (frags != null) %> - - - - -<%for TableDescriptor systemTable : sysTables%> - -<%java>TableName tableName = systemTable.getTableName(); - - <%if (frags != null)%> - - - <%java>String description = null; - if (tableName.equals(TableName.META_TABLE_NAME)){ - description = "The hbase:meta table holds references to all User Table regions."; - } else if (tableName.equals(CanaryTool.DEFAULT_WRITE_TABLE_NAME)){ - description = "The hbase:canary table is used to sniff the write availbility of" - + " each regionserver."; - } else if (tableName.equals(PermissionStorage.ACL_TABLE_NAME)){ - description = "The hbase:acl table holds information about acl."; - } else if (tableName.equals(VisibilityConstants.LABELS_TABLE_NAME)){ - description = "The hbase:labels table holds information about visibility labels."; - } else if (tableName.equals(QuotaUtil.QUOTA_TABLE_NAME)){ - description = "The hbase:quota table holds quota information about number" + - " or size of requests in a given time frame."; - } else if (tableName.equals(TableName.valueOf("hbase:rsgroup"))){ - description = "The hbase:rsgroup table holds information about regionserver groups."; - } else if (tableName.equals(TableName.valueOf("hbase:replication"))) { - description = "The hbase:replication table tracks cross cluster replication through " + - "WAL file offsets."; - } - - - - -
Table NameFrag.Description
<% tableName %><% frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) - .intValue() + "%" : "n/a" %><% description %>
- - - -<%def userTables> -<%java> - List tables = new ArrayList(); - String errorMessage = getUserTables(master, tables); - -<%if (tables.size() == 0 && errorMessage != null)%> -

<% errorMessage %>

- - -<%if (tables != null && tables.size() > 0)%> - - - - - - <%if (frags != null) %> - - - - - - - - - - - - - - - - - - <%for TableDescriptor desc : tables%> - <%java> - TableName tableName = desc.getTableName(); - TableState tableState = master.getTableStateManager().getTableState(tableName); - Map> tableRegions = - master.getAssignmentManager().getRegionStates() - .getRegionByStateOfTable(tableName); - int openRegionsCount = tableRegions.get(RegionState.State.OPEN).size(); - int openingRegionsCount = tableRegions.get(RegionState.State.OPENING).size(); - int closedRegionsCount = tableRegions.get(RegionState.State.CLOSED).size(); - int closingRegionsCount = tableRegions.get(RegionState.State.CLOSING).size(); - int offlineRegionsCount = tableRegions.get(RegionState.State.OFFLINE).size(); - int splitRegionsCount = tableRegions.get(RegionState.State.SPLIT).size(); - int otherRegionsCount = 0; - for (List list: tableRegions.values()) { - otherRegionsCount += list.size(); - } - // now subtract known states - otherRegionsCount = otherRegionsCount - openRegionsCount - - offlineRegionsCount - splitRegionsCount - - openingRegionsCount - closedRegionsCount - - closingRegionsCount; - String encodedTableName = URLEncoder.encode(tableName.getNameAsString()); - - - - <%if (tableState.isDisabledOrDisabling()) %> <%else> - <%if (frags != null) %> - - - <%if (tableState.isDisabledOrDisabling()) %> <%else> - - <%if (openingRegionsCount > 0) %> <%else> - <%if (closedRegionsCount > 0) %> <%else> - <%if (closingRegionsCount > 0) %> <%else> - <%if (offlineRegionsCount > 0) %> <%else> - <%if (splitRegionsCount > 0) %> <%else> - - - - -

<% tables.size() %> table(s) in set. [Details]. Click count below to - see list of regions currently in 'state' designated by the column title. For 'Other' Region state, - browse to hbase:meta and adjust filter on 'Meta Entries' to - query on states other than those listed here. Queries may take a while if the hbase:meta table - is large.

- -
NamespaceNameFrag.StateRegionsDescription
OPENOPENINGCLOSEDCLOSINGOFFLINESPLITOther
<% tableName.getNamespaceAsString() %>><% URLEncoder.encode(tableName.getQualifierAsString()) %>><% URLEncoder.encode(tableName.getQualifierAsString()) %> <% frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()).intValue() + "%" : "n/a" %><% tableState.getState().name() %><% tableState.getState() %> <% openRegionsCount %><% openingRegionsCount %><% openingRegionsCount %> <% closedRegionsCount %><% closedRegionsCount %> <% closingRegionsCount %><% closingRegionsCount %> <% offlineRegionsCount %><% offlineRegionsCount %> <% splitRegionsCount %><% splitRegionsCount %> <% otherRegionsCount %><% desc.toStringCustomizedValues() %>
- - - - - -<%def deadRegionServers> - -<%if (deadServers != null && deadServers.size() > 0)%> -

Dead Region Servers

- - - - - - <%if !master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null %> - <%if RSGroupUtil.isRSGroupEnabled(master.getConfiguration()) %> - - - - - <%java> - RSGroupInfoManager inMgr = null; - DeadServer deadServerUtil = master.getServerManager().getDeadServers(); - ServerName [] deadServerNames = deadServers.toArray(new ServerName[deadServers.size()]); - Arrays.sort(deadServerNames); - if (!master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null - && RSGroupUtil.isRSGroupEnabled(master.getConfiguration())) { - inMgr = master.getRSGroupInfoManager(); - } - for (ServerName deadServerName: deadServerNames) { - String rsGroupName = null; - if (inMgr != null){ - RSGroupInfo groupInfo = inMgr.getRSGroupOfServer(deadServerName.getAddress()); - rsGroupName = groupInfo == null ? RSGroupInfo.DEFAULT_GROUP : groupInfo.getName(); - } - - - - - - <%if rsGroupName != null %> - - - - <%java> - } - - - - - - -
ServerNameStop timeRSGroup
<% deadServerName %><% deadServerUtil.getTimeOfDeath(deadServerName) %><% rsGroupName %>
Total: servers: <% deadServers.size() %>
- - - -<%def peerConfigs> -<%java> - List peers = null; - if (master.getReplicationPeerManager() != null) { - peers = master.getReplicationPeerManager().listPeers(null); - } - - - - - - - - - - - - - - - - - -<%if (peers != null && peers.size() > 0)%> - <%for ReplicationPeerDescription peer : peers %> - <%java> - String peerId = peer.getPeerId(); - ReplicationPeerConfig peerConfig = peer.getPeerConfig(); - - - - - - - - - - - - - - - - - -
Peer IdCluster KeyEndpointStateIsSerialRemote WALSync Replication StateBandwidthReplicateAllNamespacesExclude NamespacesTable CfsExclude Table Cfs
<% peerId %><% peerConfig.getClusterKey() %><% peerConfig.getReplicationEndpointImpl() %><% peer.isEnabled() ? "ENABLED" : "DISABLED" %><% peerConfig.isSerial() %><% peerConfig.getRemoteWALDir() == null ? "" : peerConfig.getRemoteWALDir() %> - <% peer.getSyncReplicationState() %> - <% peerConfig.getBandwidth() == 0? "UNLIMITED" : Strings.humanReadableInt(peerConfig.getBandwidth()) %><% peerConfig.replicateAllUserTables() %> - <% peerConfig.getNamespaces() == null ? "" : ReplicationPeerConfigUtil.convertToString(peerConfig.getNamespaces()).replaceAll(";", "; ") %> - - <% peerConfig.getExcludeNamespaces() == null ? "" : ReplicationPeerConfigUtil.convertToString(peerConfig.getExcludeNamespaces()).replaceAll(";", "; ") %> - - <% peerConfig.getTableCFsMap() == null ? "" : ReplicationPeerConfigUtil.convertToString(peerConfig.getTableCFsMap()).replaceAll(";", "; ") %> - - <% peerConfig.getExcludeTableCFsMap() == null ? "" : ReplicationPeerConfigUtil.convertToString(peerConfig.getExcludeTableCFsMap()).replaceAll(";", "; ") %> -
Total: <% (peers != null) ? peers.size() : 0 %>
- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon deleted file mode 100644 index 277c90e53b13..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon +++ /dev/null @@ -1,393 +0,0 @@ -<%doc> -Copyright The Apache Software Foundation - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -<%args> -HMaster master; -ServerManager serverManager; - - -<%import> - java.util.Collections; - java.util.List; - java.util.Map; - java.util.Set; - java.util.stream.Collectors; - org.apache.hadoop.hbase.master.HMaster; - org.apache.hadoop.hbase.RegionMetrics; - org.apache.hadoop.hbase.ServerMetrics; - org.apache.hadoop.hbase.Size; - org.apache.hadoop.hbase.master.ServerManager; - org.apache.hadoop.hbase.net.Address; - org.apache.hadoop.hbase.rsgroup.RSGroupInfo; - org.apache.hadoop.hbase.rsgroup.RSGroupUtil; - org.apache.hadoop.util.StringUtils; - org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; - -<%java> -List groups = master.getRSGroupInfoManager().listRSGroups(); - -<%if (groups != null && groups.size() > 0)%> - -<%java> -RSGroupInfo [] rsGroupInfos = groups.toArray(new RSGroupInfo[groups.size()]); -Map collectServers = Collections.emptyMap(); -if (master.getServerManager() != null) { - collectServers = - master.getServerManager().getOnlineServers().entrySet().stream() - .collect(Collectors.toMap(p -> p.getKey().getAddress(), Map.Entry::getValue)); -} - - -
- -
-
- <& rsgroup_baseStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> -
-
- <& rsgroup_memoryStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> -
-
- <& rsgroup_requestStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> -
-
- <& rsgroup_storeStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> -
-
- <& rsgroup_compactStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> -
-
-
- - - -<%def rsgroup_baseStats> -<%args> - RSGroupInfo [] rsGroupInfos; - Map collectServers; - - - - - - - - - - - -<%java> - int totalOnlineServers = 0; - int totalDeadServers = 0; - int totalTables = 0; - int totalRequests = 0; - int totalRegions = 0; - for (RSGroupInfo rsGroupInfo: rsGroupInfos) { - String rsGroupName = rsGroupInfo.getName(); - int onlineServers = 0; - int deadServers = 0; - int tables = 0; - long requestsPerSecond = 0; - int numRegionsOnline = 0; - Set
servers = rsGroupInfo.getServers(); - for (Address server : servers) { - ServerMetrics sl = collectServers.get(server); - if (sl != null) { - requestsPerSecond += sl.getRequestCountPerSecond(); - numRegionsOnline += sl.getRegionMetrics().size(); - //rsgroup total - totalRegions += sl.getRegionMetrics().size(); - totalRequests += sl.getRequestCountPerSecond(); - totalOnlineServers++; - onlineServers++; - } else { - totalDeadServers++; - deadServers++; - } - } - tables = RSGroupUtil.listTablesInRSGroup(master, rsGroupInfo.getName()).size(); - totalTables += tables; - double avgLoad = onlineServers == 0 ? 0 : - (double)numRegionsOnline / (double)onlineServers; - -
- - - - - - - - -<%java> -} - - - - - - - - - -
RSGroup NameNum. Online ServersNum. Dead ServersNum. TablesRequests Per SecondNum. RegionsAverage Load
<& rsGroupLink; rsGroupName=rsGroupName; &><% onlineServers %><% deadServers %><% tables %><% requestsPerSecond %><% numRegionsOnline %><% StringUtils.limitDecimalTo2(avgLoad) %>
Total:<% rsGroupInfos.length %><% totalOnlineServers %><% totalDeadServers %><% totalTables %><% totalRequests %><% totalRegions %><% StringUtils.limitDecimalTo2(master.getServerManager().getAverageLoad()) %>
- - -<%def rsgroup_memoryStats> -<%args> - RSGroupInfo [] rsGroupInfos; - Map collectServers; - - - - - - - - - -<%java> - final String ZEROMB = "0 MB"; - for (RSGroupInfo rsGroupInfo: rsGroupInfos) { - String usedHeapStr = ZEROMB; - String maxHeapStr = ZEROMB; - String memstoreSizeStr = ZEROMB; - String rsGroupName = rsGroupInfo.getName(); - long usedHeap = 0; - long maxHeap = 0; - long memstoreSize = 0; - for (Address server : rsGroupInfo.getServers()) { - ServerMetrics sl = collectServers.get(server); - if (sl != null) { - usedHeap += (long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE); - maxHeap += (long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE); - memstoreSize += (long) sl.getRegionMetrics().values().stream().mapToDouble( - rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE)).sum(); - } - } - - if (usedHeap > 0) { - usedHeapStr = TraditionalBinaryPrefix.long2String(usedHeap - * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if (maxHeap > 0) { - maxHeapStr = TraditionalBinaryPrefix.long2String(maxHeap - * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if (memstoreSize > 0) { - memstoreSizeStr = TraditionalBinaryPrefix.long2String(memstoreSize - * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - - - - - - - - -<%java> -} - -
RSGroup NameUsed HeapMax HeapMemstore Size
<& rsGroupLink; rsGroupName=rsGroupName; &><% usedHeapStr %><% maxHeapStr %><% memstoreSizeStr %>
- - -<%def rsgroup_requestStats> -<%args> - RSGroupInfo [] rsGroupInfos; - Map collectServers; - - - - - - - - -<%java> - for (RSGroupInfo rsGroupInfo: rsGroupInfos) { - String rsGroupName = rsGroupInfo.getName(); - long requestsPerSecond = 0; - long readRequests = 0; - long writeRequests = 0; - for (Address server : rsGroupInfo.getServers()) { - ServerMetrics sl = collectServers.get(server); - if (sl != null) { - for (RegionMetrics rm : sl.getRegionMetrics().values()) { - readRequests += rm.getReadRequestCount(); - writeRequests += rm.getWriteRequestCount(); - } - requestsPerSecond += sl.getRequestCountPerSecond(); - } - } - - - - - - - -<%java> -} - -
RSGroup NameRequest Per SecondRead Request CountWrite Request Count
<& rsGroupLink; rsGroupName=rsGroupName; &><% requestsPerSecond %><% readRequests %><% writeRequests %>
- - - -<%def rsgroup_storeStats> -<%args> - RSGroupInfo [] rsGroupInfos; - Map collectServers; - - - - - - - - - - - -<%java> - final String ZEROKB = "0 KB"; - final String ZEROMB = "0 MB"; - for (RSGroupInfo rsGroupInfo: rsGroupInfos) { - String uncompressedStorefileSizeStr = ZEROMB; - String storefileSizeStr = ZEROMB; - String indexSizeStr = ZEROKB; - String bloomSizeStr = ZEROKB; - String rsGroupName = rsGroupInfo.getName(); - int numStores = 0; - long numStorefiles = 0; - long uncompressedStorefileSize = 0; - long storefileSize = 0; - long indexSize = 0; - long bloomSize = 0; - int count = 0; - for (Address server : rsGroupInfo.getServers()) { - ServerMetrics sl = collectServers.get(server); - if (sl != null) { - for (RegionMetrics rm : sl.getRegionMetrics().values()) { - numStores += rm.getStoreCount(); - numStorefiles += rm.getStoreFileCount(); - uncompressedStorefileSize += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storefileSize += rm.getStoreFileSize().get(Size.Unit.MEGABYTE); - indexSize += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); - bloomSize += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE); - } - count++; - } - } - if (uncompressedStorefileSize > 0) { - uncompressedStorefileSizeStr = TraditionalBinaryPrefix. - long2String(uncompressedStorefileSize * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if (storefileSize > 0) { - storefileSizeStr = TraditionalBinaryPrefix. - long2String(storefileSize * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if (indexSize > 0) { - indexSizeStr = TraditionalBinaryPrefix. - long2String(indexSize * TraditionalBinaryPrefix.KILO.value, "B", 1); - } - if (bloomSize > 0) { - bloomSizeStr = TraditionalBinaryPrefix. - long2String(bloomSize * TraditionalBinaryPrefix.KILO.value, "B", 1); - } - - - - - - - - - - -<%java> -} - -
RSGroup NameNum. StoresNum. StorefilesStorefile Size UncompressedStorefile SizeIndex SizeBloom Size
<& rsGroupLink; rsGroupName=rsGroupName; &><% numStores %><% numStorefiles %><% uncompressedStorefileSizeStr %><% storefileSizeStr %><% indexSizeStr %><% bloomSizeStr %>
- - -<%def rsgroup_compactStats> -<%args> - RSGroupInfo [] rsGroupInfos; - Map collectServers; - - - - - - - - - -<%java> - for (RSGroupInfo rsGroupInfo: rsGroupInfos) { - String rsGroupName = rsGroupInfo.getName(); - int numStores = 0; - long totalCompactingCells = 0; - long totalCompactedCells = 0; - long remainingCells = 0; - long compactionProgress = 0; - for (Address server : rsGroupInfo.getServers()) { - ServerMetrics sl = collectServers.get(server); - if (sl != null) { - for (RegionMetrics rl : sl.getRegionMetrics().values()) { - totalCompactingCells += rl.getCompactingCellCount(); - totalCompactedCells += rl.getCompactedCellCount(); - } - } - } - remainingCells = totalCompactingCells - totalCompactedCells; - String percentDone = ""; - if (totalCompactingCells > 0) { - percentDone = String.format("%.2f", 100 * - ((float) totalCompactedCells / totalCompactingCells)) + "%"; - } - - - - - - - - -<%java> -} - -
RSGroup NameNum. Compacting CellsNum. Compacted CellsRemaining CellsCompaction Progress
<& rsGroupLink; rsGroupName=rsGroupName; &><% totalCompactingCells %><% totalCompactedCells %><% remainingCells %><% percentDone %>
- - - -<%def rsGroupLink> - <%args> - String rsGroupName; - - ><% rsGroupName %> - diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon deleted file mode 100644 index c748f4162179..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon +++ /dev/null @@ -1,538 +0,0 @@ -<%doc> -Copyright The Apache Software Foundation - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -<%args> -List servers = null; -HMaster master; - - -<%import> - java.util.*; - org.apache.hadoop.hbase.master.HMaster; - org.apache.hadoop.hbase.procedure2.util.StringUtils; - org.apache.hadoop.hbase.replication.ReplicationLoadSource; - org.apache.hadoop.hbase.RegionMetrics; - org.apache.hadoop.hbase.ServerMetrics; - org.apache.hadoop.hbase.ServerName; - org.apache.hadoop.hbase.Size; - org.apache.hadoop.hbase.util.VersionInfo; - org.apache.hadoop.hbase.util.Pair; - org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; - org.apache.hadoop.hbase.net.Address; - org.apache.hadoop.hbase.rsgroup.RSGroupInfo; - org.apache.hadoop.hbase.rsgroup.RSGroupUtil; - - -<%if (servers != null && servers.size() > 0)%> - -<%java> -ServerName [] serverNames = servers.toArray(new ServerName[servers.size()]); -Arrays.sort(serverNames); - - -
- -
-
- <& baseStats; serverNames = serverNames; &> -
-
- <& memoryStats; serverNames = serverNames; &> -
-
- <& requestStats; serverNames = serverNames; &> -
-
- <& storeStats; serverNames = serverNames; &> -
-
- <& compactionStats; serverNames = serverNames; &> -
-
- <& replicationStats; serverNames = serverNames; &> -
-
-
- - - -<%def baseStats> -<%args> - ServerName [] serverNames; - - - - - - - - - - - - <%if !master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null %> - <%if RSGroupUtil.isRSGroupEnabled(master.getConfiguration()) %> - - - - - - -<%java> - int totalRegions = 0; - int totalRequestsPerSecond = 0; - int inconsistentNodeNum = 0; - String state = "Normal"; - String masterVersion = VersionInfo.getVersion(); - Set decommissionedServers = new HashSet<>(master.listDecommissionedRegionServers()); - String rsGroupName = "default"; - List groups; - Map server2GroupMap = new HashMap<>(); - if (!master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null - && RSGroupUtil.isRSGroupEnabled(master.getConfiguration())) { - groups = master.getRSGroupInfoManager().listRSGroups(); - groups.forEach(group -> { - group.getServers().forEach(address -> server2GroupMap.put(address, group)); - }); - } - for (ServerName serverName: serverNames) { - if (decommissionedServers.contains(serverName)) { - state = "Decommissioned"; - } - ServerMetrics sl = master.getServerManager().getLoad(serverName); - String version = master.getRegionServerVersion(serverName); - if (!masterVersion.equals(version)) { - inconsistentNodeNum ++; - } - - double requestsPerSecond = 0.0; - int numRegionsOnline = 0; - long lastContact = 0; - - if (sl != null) { - requestsPerSecond = sl.getRequestCountPerSecond(); - numRegionsOnline = sl.getRegionMetrics().size(); - totalRegions += sl.getRegionMetrics().size(); - totalRequestsPerSecond += sl.getRequestCountPerSecond(); - lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000; - } - long startcode = serverName.getStartcode(); - if (!master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null - && RSGroupUtil.isRSGroupEnabled(master.getConfiguration())) { - rsGroupName = server2GroupMap.get(serverName.getAddress()).getName(); - } - - - - - - - - - - <%if !master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null %> - <%if RSGroupUtil.isRSGroupEnabled(master.getConfiguration()) %> - - - - -<%java> -} - - - - - - -<%if inconsistentNodeNum > 0%> - -<%else> - - - - - -
ServerNameStateStart timeLast contactVersionRequests Per SecondNum. RegionsRSGroup
<& serverNameLink; serverName=serverName; &><% state %><% new Date(startcode) %><% TraditionalBinaryPrefix.long2String(lastContact, "s", 1) %><% version %><% String.format("%,.0f", requestsPerSecond) %><% String.format("%,d", numRegionsOnline) %><% rsGroupName %>
Total:<% servers.size() %><% inconsistentNodeNum %> nodes with inconsistent version<% totalRequestsPerSecond %><% totalRegions %>
- - -<%def memoryStats> -<%args> - ServerName [] serverNames; - - - - - - - - - - - - -<%java> -final String ZEROMB = "0 MB"; -for (ServerName serverName: serverNames) { - String usedHeapStr = ZEROMB; - String maxHeapStr = ZEROMB; - String memStoreSizeMBStr = ZEROMB; - ServerMetrics sl = master.getServerManager().getLoad(serverName); - if (sl != null) { - long memStoreSizeMB = 0; - for (RegionMetrics rl : sl.getRegionMetrics().values()) { - memStoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE); - } - if (memStoreSizeMB > 0) { - memStoreSizeMBStr = TraditionalBinaryPrefix.long2String(memStoreSizeMB - * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - - double usedHeapSizeMB = sl.getUsedHeapSize().get(Size.Unit.MEGABYTE); - if (usedHeapSizeMB > 0) { - usedHeapStr = TraditionalBinaryPrefix.long2String((long) usedHeapSizeMB - * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - double maxHeapSizeMB = sl.getMaxHeapSize().get(Size.Unit.MEGABYTE); - if (maxHeapSizeMB > 0) { - maxHeapStr = TraditionalBinaryPrefix.long2String((long) maxHeapSizeMB - * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - - - - - - - -<%java> - } else { - -<& emptyStat; serverName=serverName; &> -<%java> - } -} - - -
ServerNameUsed HeapMax HeapMemstore Size
<& serverNameLink; serverName=serverName; &><% usedHeapStr %><% maxHeapStr %><% memStoreSizeMBStr %>
- - - -<%def requestStats> -<%args> - ServerName [] serverNames; - - - - - - - - - - - - -<%java> -for (ServerName serverName: serverNames) { - -ServerMetrics sl = master.getServerManager().getLoad(serverName); -if (sl != null) { - long readRequestCount = 0; - long writeRequestCount = 0; - long filteredReadRequestCount = 0; - for (RegionMetrics rl : sl.getRegionMetrics().values()) { - readRequestCount += rl.getReadRequestCount(); - writeRequestCount += rl.getWriteRequestCount(); - filteredReadRequestCount += rl.getFilteredReadRequestCount(); - } - - - - - - - - -<%java> - } else { - -<& emptyStat; serverName=serverName; &> -<%java> - } -} - - -
ServerNameRequest Per SecondRead Request CountFiltered Read Request CountWrite Request Count
<& serverNameLink; serverName=serverName; &><% String.format("%,d", sl.getRequestCountPerSecond()) %><% String.format("%,d", readRequestCount) %><% String.format("%,d", filteredReadRequestCount) %><% String.format("%,d", writeRequestCount) %>
- - - -<%def storeStats> -<%args> - ServerName [] serverNames; - - - - - - - - - - - - - - -<%java> -final String ZEROKB = "0 KB"; -final String ZEROMB = "0 MB"; -for (ServerName serverName: serverNames) { - - String storeUncompressedSizeMBStr = ZEROMB; - String storeFileSizeMBStr = ZEROMB; - String totalStaticIndexSizeKBStr = ZEROKB; - String totalStaticBloomSizeKBStr = ZEROKB; - ServerMetrics sl = master.getServerManager().getLoad(serverName); - if (sl != null) { - long storeCount = 0; - long storeFileCount = 0; - long storeUncompressedSizeMB = 0; - long storeFileSizeMB = 0; - long totalStaticIndexSizeKB = 0; - long totalStaticBloomSizeKB = 0; - for (RegionMetrics rl : sl.getRegionMetrics().values()) { - storeCount += rl.getStoreCount(); - storeFileCount += rl.getStoreFileCount(); - storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storeFileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE); - totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); - totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE); - } - if (storeUncompressedSizeMB > 0) { - storeUncompressedSizeMBStr = TraditionalBinaryPrefix. - long2String(storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if (storeFileSizeMB > 0) { - storeFileSizeMBStr = TraditionalBinaryPrefix. - long2String(storeFileSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if (totalStaticIndexSizeKB > 0) { - totalStaticIndexSizeKBStr = TraditionalBinaryPrefix. - long2String(totalStaticIndexSizeKB * TraditionalBinaryPrefix.KILO.value, "B", 1); - } - if (totalStaticBloomSizeKB > 0) { - totalStaticBloomSizeKBStr = TraditionalBinaryPrefix. - long2String(totalStaticBloomSizeKB * TraditionalBinaryPrefix.KILO.value, "B", 1); - } - - - - - - - - - - -<%java> - } else { - -<& emptyStat; serverName=serverName; &> -<%java> - } -} - - -
ServerNameNum. StoresNum. StorefilesStorefile Size UncompressedStorefile SizeIndex SizeBloom Size
<& serverNameLink; serverName=serverName; &><% String.format("%,d", storeCount) %><% String.format("%,d", storeFileCount) %><% storeUncompressedSizeMBStr %><% storeFileSizeMBStr %><% totalStaticIndexSizeKBStr %><% totalStaticBloomSizeKBStr %>
- - -<%def compactionStats> -<%args> - ServerName [] serverNames; - - - - - - - - - - - - -<%java> -for (ServerName serverName: serverNames) { - -ServerMetrics sl = master.getServerManager().getLoad(serverName); -if (sl != null) { -long totalCompactingCells = 0; -long totalCompactedCells = 0; -for (RegionMetrics rl : sl.getRegionMetrics().values()) { - totalCompactingCells += rl.getCompactingCellCount(); - totalCompactedCells += rl.getCompactedCellCount(); -} -String percentDone = ""; -if (totalCompactingCells > 0) { - percentDone = String.format("%.2f", 100 * - ((float) totalCompactedCells / totalCompactingCells)) + "%"; -} - - - - - - - - -<%java> - } else { - -<& emptyStat; serverName=serverName; &> -<%java> - } -} - - -
ServerNameNum. Compacting CellsNum. Compacted CellsRemaining CellsCompaction Progress
<& serverNameLink; serverName=serverName; &><% String.format("%,d", totalCompactingCells) %><% String.format("%,d", totalCompactedCells) %><% String.format("%,d", totalCompactingCells - totalCompactedCells) %><% percentDone %>
- - -<%def replicationStats> -<%args> - ServerName [] serverNames; - -<%java> - HashMap>> replicationLoadSourceMap - = master.getReplicationLoad(serverNames); - List peers = null; - if (replicationLoadSourceMap != null && replicationLoadSourceMap.size() > 0){ - peers = new ArrayList<>(replicationLoadSourceMap.keySet()); - Collections.sort(peers); - } - - -<%if (replicationLoadSourceMap != null && replicationLoadSourceMap.size() > 0) %> - -
- -
- <%java> - active = "active"; - for (String peer : peers){ - -
- - - - - - - - - <%for Pair pair: replicationLoadSourceMap.get(peer) %> - - - - - - - -
ServerAgeOfLastShippedOpSizeOfLogQueueReplicationLag
<& serverNameLink; serverName=pair.getFirst(); &><% StringUtils.humanTimeDiff(pair.getSecond().getAgeOfLastShippedOp()) %><% pair.getSecond().getSizeOfLogQueue() %><% pair.getSecond().getReplicationLag() == Long.MAX_VALUE ? "UNKNOWN" : StringUtils.humanTimeDiff(pair.getSecond().getReplicationLag()) %>
-
- <%java> - active = ""; - } - -
-

If the replication delay is UNKNOWN, that means this walGroup doesn't start replicate yet and it may get disabled.

-
-<%else> -

No Peers Metrics

- - - - - -<%def serverNameLink> - <%args> - ServerName serverName; - - <%java> - int infoPort = master.getRegionServerInfoPort(serverName); - String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; - - - <%if infoPort > 0%> - <% serverName.getServerName() %> - <%else> - <% serverName.getServerName() %> - - - -<%def emptyStat> - <%args> - ServerName serverName; - - - <& serverNameLink; serverName=serverName; &> - - - - - - - - - - - - - - - diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionVisualizerTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionVisualizerTmpl.jamon deleted file mode 100644 index 9a98cfefed7f..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionVisualizerTmpl.jamon +++ /dev/null @@ -1,119 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - - - - - -
- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon deleted file mode 100644 index 82609aad7190..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon +++ /dev/null @@ -1,564 +0,0 @@ -<%doc> -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Template for rendering Block Cache tabs in RegionServer Status page. - -<%args> -CacheConfig cacheConfig; -Configuration config; -BlockCache bc; - -<%java> - String bcUrl = bc == null ? null : "http://hbase.apache.org/devapidocs/" + bc.getClass().getName().replaceAll("\\.", "/") + ".html"; - String bcName = bc == null ? null : bc.getClass().getSimpleName(); - BlockCache [] bcs = bc == null ? null : bc.getBlockCaches(); - boolean evictions = bcs != null && bcs.length > 1; - -<%import> -java.util.Map; -org.apache.hadoop.hbase.io.hfile.BlockCacheUtil; -org.apache.hadoop.hbase.io.hfile.BlockCacheUtil.CachedBlocksByFile; -org.apache.hadoop.hbase.io.hfile.AgeSnapshot; -org.apache.hadoop.hbase.io.hfile.CachedBlock; -org.apache.hadoop.conf.Configuration; -org.apache.hadoop.hbase.io.hfile.CacheConfig; -org.apache.hadoop.hbase.io.hfile.BlockCache; -org.apache.hadoop.hbase.io.hfile.LruBlockCache; -org.apache.hadoop.hbase.io.hfile.bucket.BucketCacheStats; -org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; -org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator; -org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket; -org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; - -
- -
-
- <& bc_baseInfo; bc = bc; bcUrl = bcUrl; bcName = bcName; &> -
-
- <& bc_config; cacheConfig = cacheConfig &> -
-
- <& bc_stats; bc = bc &> -
-
- <& bc_l; bc = bcs == null? bc: bcs[0]; name = "L1"; evictions = evictions; &> -
-
- <& bc_l; bc = bcs == null? null: bcs.length <= 1? null: bcs[1]; name = "L2"; evictions = evictions; &> -
-
-
- -<%def bc_baseInfo> -<%args> - BlockCache bc; - String bcUrl; - String bcName; - -<%java> - BlockCache [] bcs = bc == null? null: bc.getBlockCaches(); - String bcl1Url = null; - String bcl1Name = null; - String bcl2Url = null; - String bcl2Name = null; - if (bcs != null) { - BlockCache bcl1 = bcs[0]; - if (bcl1 != null) { - bcl1Url = "http://hbase.apache.org/devapidocs/" + bcl1.getClass().getName().replaceAll("\\.", "/") + ".html"; - bcl1Name = bcl1.getClass().getSimpleName(); - } - if (bcs.length == 2) { - BlockCache bcl2 = bcs[1]; - bcl2Url = "http://hbase.apache.org/devapidocs/" + bcl2.getClass().getName().replaceAll("\\.", "/") + ".html"; - bcl2Name = bcl2.getClass().getSimpleName(); - } - } - - - - - - - - - - - - - -
AttributeValueDescription
Implementation<% bcName %>Block cache implementing class
-

See block cache in the HBase Reference Guide for help.

- - -<%def bc_config> -<%args> - CacheConfig cacheConfig; - -<%if cacheConfig == null %> -

CacheConfig is null

-<%else> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
AttributeValueDescription
Cache DATA on Read<% cacheConfig.shouldCacheDataOnRead() %>True if DATA blocks are cached on read - (INDEX & BLOOM blocks are always cached)
Cache DATA on Write<% cacheConfig.shouldCacheDataOnWrite() %>True if DATA blocks are cached on write.
Cache INDEX on Write<% cacheConfig.shouldCacheIndexesOnWrite() %>True if INDEX blocks are cached on write
Cache BLOOM on Write<% cacheConfig.shouldCacheBloomsOnWrite() %>True if BLOOM blocks are cached on write
Evict blocks on Close<% cacheConfig.shouldEvictOnClose() %>True if blocks are evicted from cache when an HFile - reader is closed
Cache DATA in compressed format<% cacheConfig.shouldCacheDataCompressed() %>True if DATA blocks are cached in their compressed form
Prefetch on Open<% cacheConfig.shouldPrefetchOnOpen() %>True if blocks are prefetched into cache on open
- - - -<%def evictions_tmpl> -<%args> - BlockCache bc; - -<%java> - AgeSnapshot ageAtEvictionSnapshot = bc.getStats().getAgeAtEvictionSnapshot(); - // Only show if non-zero mean and stddev as is the case in combinedblockcache - - - Evicted - <% String.format("%,d", bc.getStats().getEvictedCount()) %> - The total number of blocks evicted - - - Evictions - <% String.format("%,d", bc.getStats().getEvictionCount()) %> - The total number of times an eviction has occurred - - - Mean - <% String.format("%,d", (long)ageAtEvictionSnapshot.getMean()) %> - Mean age of Blocks at eviction time (seconds) - - - -<%def hits_tmpl> -<%args> - BlockCache bc; - -<%java> - int hitPeriods = 0; - for(int i=0; i -<%if hitPeriods > 0 %> - - - - Hits - <% String.format("%,d", bc.getStats().getHitCount()) %> - Number requests that were cache hits - - - Hits Caching - <% String.format("%,d", bc.getStats().getHitCachingCount()) %> - Cache hit block requests but only requests set to cache block if a miss - - - Misses - <% String.format("%,d", bc.getStats().getMissCount()) %> - Block requests that were cache misses but set to cache missed blocks - - - Misses Caching - <% String.format("%,d", bc.getStats().getMissCachingCount()) %> - Block requests that were cache misses but only requests set to use block cache - - - All Time Hit Ratio - <% String.format("%,.2f", bc.getStats().getHitRatio() * 100) %><% "%" %> - Hit Count divided by total requests count - - <%for int i=0; i - - Hit Ratio for period starting at <% bc.getStats().getWindowPeriods()[i] %> - <%if bc.getStats().getRequestCounts()[i] > 0 %> - <% String.format("%,.2f", ((double)bc.getStats().getHitCounts()[i] / (double)bc.getStats().getRequestCounts()[i]) * 100.0) %><% "%" %> - <%else> - No requests - - Hit Count divided by total requests count over the <% i %>th period of <% bc.getStats().getPeriodTimeInMinutes() %> minutes - - - <%if hitPeriods > 0 %> - - -
- - - - - - -
- - - - <%if bc.getStats().getPeriodTimeInMinutes() > 0 %> - - Last <% bc.getStats().getNumPeriodsInWindow()*bc.getStats().getPeriodTimeInMinutes() %> minutes Hit Ratio - <% String.format("%,.2f", bc.getStats().getHitRatioPastNPeriods() * 100.0) %><% "%" %> - Hit Count divided by total requests count for the last <% bc.getStats().getNumPeriodsInWindow()*bc.getStats().getPeriodTimeInMinutes() %> minutes - - - - - -<%def bc_stats> -<%args> - BlockCache bc; - -<%if bc == null %> -

BlockCache is null

-<%else> - - - - - - - - - - - - - - - - - - - - - - <& evictions_tmpl; bc = bc; &> - <& hits_tmpl; bc = bc; &> -
AttributeValueDescription
Size<% TraditionalBinaryPrefix.long2String(bc.getCurrentSize(), - "B", 1) %>Current size of block cache in use
Free<% TraditionalBinaryPrefix.long2String(bc.getFreeSize(), - "B", 1) %>The total free memory currently available to store more cache entries
Count<% String.format("%,d", bc.getBlockCount()) %>Number of blocks in block cache
-

If block cache is made up of more than one cache -- i.e. a L1 and a L2 -- then the above -are combined counts. Request count is sum of hits and misses.

- - - -<%def bc_l> -<%args> - BlockCache bc; - String name; - boolean evictions; - -<%if bc == null %> -

No <% name %> deployed

-<%else> -<& block_cache; bc = bc; name = name; evictions = evictions; &> - - - -<%def block_cache> -<%args> - BlockCache bc; - String name; - boolean evictions; - -<%java> - String bcUrl = "http://hbase.apache.org/devapidocs/" + bc.getClass().getName().replaceAll("\\.", "/") + ".html"; - String bcName = bc.getClass().getSimpleName(); - int maxCachedBlocksByFile = BlockCacheUtil.getMaxCachedBlocksByFile(config); - - boolean lru = bc instanceof LruBlockCache; - - boolean bucketCache = bc.getClass().getSimpleName().equals("BucketCache"); - BucketCacheStats bucketCacheStats = null; - BucketAllocator bucketAllocator = null; - - if (bucketCache) { - bucketCacheStats = (BucketCacheStats)bc.getStats(); - bucketAllocator = ((BucketCache)bc).getAllocator(); - } - - - - - - - - - - - - -<%if bucketCache %> - - - - - - - - - - - - - - - - -<%if !bucketCache %> - - - - - - -<%if lru %> - - - - - - - - - - - - - - - - -<%if !bucketCache %> - - - - - - -<%if lru %> - - - - - - - - - - - - <& evictions_tmpl; bc = bc; &> -<& hits_tmpl; bc = bc; &> - -<%if bucketCache %> - - - - - - - - - - - -
AttributeValueDescription
Implementation<% bc.getClass().getSimpleName() %>Class implementing this block cache Level
Implementation<% ((BucketCache)bc).getIoEngine() %>IOEngine
Cache Size Limit<% TraditionalBinaryPrefix.long2String(bc.getMaxSize(), "B", 1) %>Max size of cache
Block Count<% String.format("%,d", bc.getBlockCount()) %>Count of Blocks
Data Block Count<% String.format("%,d", bc.getDataBlockCount()) %>Count of DATA Blocks
Index Block Count<% String.format("%,d", ((LruBlockCache)bc).getIndexBlockCount()) %>Count of INDEX Blocks
Bloom Block Count<% String.format("%,d", ((LruBlockCache)bc).getBloomBlockCount()) %>Count of BLOOM Blocks
Size of Blocks<% TraditionalBinaryPrefix.long2String(bc.getCurrentSize(), "B", 1) %>Size of Blocks
Size of Data Blocks<% TraditionalBinaryPrefix.long2String(bc.getCurrentDataSize(), "B", 1) %>Size of DATA Blocks
Size of Index Blocks<% TraditionalBinaryPrefix.long2String(((LruBlockCache)bc).getCurrentIndexSize(), "B", 1) %>Size of INDEX Blocks
Size of Bloom Blocks<% TraditionalBinaryPrefix.long2String(((LruBlockCache)bc).getCurrentBloomSize(), "B", 1) %>Size of BLOOM Blocks
Hits per Second<% bucketCacheStats.getIOHitsPerSecond() %>Block gets against this cache per second
Time per Hit<% bucketCacheStats.getIOTimePerHit() %>Time per cache hit
- -<%doc>Call through to block cache Detail rendering template -

-View block cache as JSON | Block cache as JSON by file -<%if bc.getBlockCount() > maxCachedBlocksByFile %> -
-Note: JSON view of block cache will be incomplete, because block count <% bc.getBlockCount() %> is greater than hbase.ui.blockcache.by.file.max value of <% maxCachedBlocksByFile %>. -Increase that value to get a complete picture. - -

- - diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon deleted file mode 100644 index ce55aaf12702..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon +++ /dev/null @@ -1,80 +0,0 @@ -<%doc> -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -This template is used to give views on an individual block cache as JSON. - -<%args> -CacheConfig cacheConfig; -Configuration conf; -String bcn; -String bcv; -BlockCache blockCache; - -<%import> -java.util.*; -org.apache.hadoop.conf.Configuration; -org.apache.hadoop.hbase.io.hfile.BlockCacheUtil.CachedBlocksByFile; -org.apache.hadoop.hbase.io.hfile.BlockCacheUtil; -org.apache.hadoop.hbase.io.hfile.CachedBlock; -org.apache.hadoop.hbase.io.hfile.CacheConfig; -org.apache.hadoop.hbase.io.hfile.BlockCache; -org.apache.hadoop.hbase.io.hfile.bucket.BucketCacheStats; -org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; -org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator; -org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket; -org.apache.hadoop.util.StringUtils; - -<%java> - BlockCache bc = blockCache; - BlockCache [] bcs = bc == null ? null : bc.getBlockCaches(); - if (bcn.equals("L1")) { - bc = bcs == null || bcs.length == 0? bc: bcs[0]; - } else { - if (bcs == null || bcs.length < 2) { - System.out.println("There is no L2 block cache"); - return; - } - bc = bcs[1]; - } - if (bc == null) { - System.out.println("There is no block cache"); - return; - } - CachedBlocksByFile cbsbf = BlockCacheUtil.getLoadedCachedBlocksByFile(conf, bc); - -<%if bcv.equals("file") %><& bc_by_file; cbsbf = cbsbf; &><%else>[ <% BlockCacheUtil.toJSON(bc) %>, <% BlockCacheUtil.toJSON(cbsbf) %> ] -<%java> -cbsbf = null; - - -<%def bc_by_file> -<%args> - CachedBlocksByFile cbsbf; - -<%java> - boolean firstEntry = true; - -[<%for Map.Entry> e: cbsbf.getCachedBlockStatsByFile().entrySet() %> - <%if !firstEntry %>,<% BlockCacheUtil.toJSON(e.getKey(), e.getValue()) %> - <%java> - if (firstEntry) { - firstEntry = false; - } - -] - diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon deleted file mode 100644 index 4b8047b9e446..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ /dev/null @@ -1,330 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -<%args> -HRegionServer regionServer; -String filter = "general"; -String format = "html"; -String bcn = ""; -String bcv = ""; - -<%import> -java.util.*; -org.apache.hadoop.hbase.regionserver.HRegionServer; -org.apache.hadoop.hbase.client.RegionInfo; -org.apache.hadoop.hbase.ServerName; -org.apache.hadoop.hbase.HBaseConfiguration; -org.apache.hadoop.hbase.io.hfile.CacheConfig; -org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo; -org.apache.hadoop.hbase.util.JvmVersion; -org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; - -<%doc>If json AND bcn is NOT an empty string presume it a block cache view request. -<%if format.equals("json") && bcn != null && bcn.length() > 0 %> - <& BlockCacheViewTmpl; conf = regionServer.getConfiguration(); cacheConfig = new CacheConfig(regionServer.getConfiguration()); bcn = bcn; bcv = bcv; blockCache = regionServer.getBlockCache().orElse(null) &> - <%java return; %> -<%elseif format.equals("json") %> - <& ../common/TaskMonitorTmpl; filter = filter; format = "json" &> - <%java return; %> - -<%java> - ServerInfo serverInfo = ProtobufUtil.getServerInfo(null, regionServer.getRSRpcServices()); - ServerName serverName = ProtobufUtil.toServerName(serverInfo.getServerName()); - List onlineRegions = ProtobufUtil.getOnlineRegions(regionServer.getRSRpcServices()); - MasterAddressTracker masterAddressTracker = regionServer.getMasterAddressTracker(); - ServerName masterServerName = masterAddressTracker == null ? null - : masterAddressTracker.getMasterAddress(); - int infoPort = masterAddressTracker == null ? 0 : masterAddressTracker.getMasterInfoPort(); - - -<%class> - public String formatZKString() { - StringBuilder quorums = new StringBuilder(); - String zkQuorum = regionServer.getZooKeeper().getQuorum(); - - if (null == zkQuorum) { - return quorums.toString(); - } - - String[] zks = zkQuorum.split(","); - - if (zks.length == 0) { - return quorums.toString(); - } - - for(int i = 0; i < zks.length; ++i) { - quorums.append(zks[i].trim()); - - if (i != (zks.length - 1)) { - quorums.append("
"); - } - } - - return quorums.toString(); - } - - - - - - - - HBase Region Server: <% serverName.getHostname() %> - - - - - - - - - - - - -
-
- -
-
- -
-

Server Metrics

- <& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); - mServerWrap = regionServer.getRpcServer().getMetrics().getHBaseServerWrapper(); - bbAllocator = regionServer.getRpcServer().getByteBuffAllocator(); &> -
- -
-

Block Cache

- <& BlockCacheTmpl; cacheConfig = new CacheConfig(regionServer.getConfiguration()); config = regionServer.getConfiguration(); bc = regionServer.getBlockCache().orElse(null) &> -
- -
- <& ../common/TaskMonitorTmpl; filter = filter; parent = "/rs-status" &> -
- -
-

Regions

- <& RegionListTmpl; regionServer = regionServer; onlineRegions = onlineRegions; &> -
- -
-

Replication Status

- <& ReplicationStatusTmpl; regionServer = regionServer; &> -
- -
-

Software Attributes

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - <%escape #n> - - - - - - - - - - - - - - - - - - -
Attribute NameValueDescription
JVM Version<% JvmVersion.getVersion() %>JVM vendor and version
HBase Version<% org.apache.hadoop.hbase.util.VersionInfo.getVersion() %>, revision=<% org.apache.hadoop.hbase.util.VersionInfo.getRevision() %>HBase version and revision
HBase Compiled<% org.apache.hadoop.hbase.util.VersionInfo.getDate() %>, <% org.apache.hadoop.hbase.util.VersionInfo.getUser() %>When HBase version was compiled and by whom
HBase Source Checksum<% org.apache.hadoop.hbase.util.VersionInfo.getSrcChecksum() %>HBase source SHA512 checksum
Hadoop Version<% org.apache.hadoop.util.VersionInfo.getVersion() %>, revision=<% org.apache.hadoop.util.VersionInfo.getRevision() %>Hadoop version and revision
Hadoop Compiled<% org.apache.hadoop.util.VersionInfo.getDate() %>, <% org.apache.hadoop.util.VersionInfo.getUser() %>When Hadoop version was compiled and by whom
Hadoop Source Checksum<% org.apache.hadoop.util.VersionInfo.getSrcChecksum() %>Hadoop source MD5 checksum
ZooKeeper Client Version<% org.apache.zookeeper.Version.getVersion() %>, revision=<% org.apache.zookeeper.Version.getRevisionHash() %>ZooKeeper client version and revision hash
ZooKeeper Client Compiled<% org.apache.zookeeper.Version.getBuildDate() %>When ZooKeeper client version was compiled
ZooKeeper Quorum<% formatZKString() %>Addresses of all registered ZK servers
Coprocessors<% java.util.Arrays.toString(regionServer.getRegionServerCoprocessors()) %>Coprocessors currently loaded by this regionserver
RS Start Time<% new Date(regionServer.getStartcode()) %>Date stamp of when this region server was started
HBase Master - <%if masterServerName == null %> - No master found - <%else> - <%java> - String host = masterServerName.getHostname() + ":" + infoPort; - String url = "//" + host + "/master-status"; - - <% host %> - - Address of HBase Master
-
-
-
- - - - - - - diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon deleted file mode 100644 index c4b947308bad..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ /dev/null @@ -1,338 +0,0 @@ -<%doc> - - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -<%args> - HRegionServer regionServer; - List onlineRegions; - -<%import> - java.util.*; - org.apache.commons.lang3.time.FastDateFormat; - org.apache.hadoop.hbase.regionserver.HRegionServer; - org.apache.hadoop.hbase.util.Bytes; - org.apache.hadoop.hbase.client.RegionInfo; - org.apache.hadoop.hbase.client.RegionInfoDisplay; - org.apache.hadoop.hbase.regionserver.Region; - org.apache.hadoop.hbase.regionserver.HStoreFile; - org.apache.hadoop.hbase.regionserver.Store; - org.apache.hadoop.hbase.regionserver.StoreFile; - org.apache.hadoop.hbase.ServerName; - org.apache.hadoop.hbase.HBaseConfiguration; - org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo; - org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; - org.apache.hadoop.hbase.client.RegionReplicaUtil; - org.apache.hadoop.hbase.regionserver.MetricsRegionWrapper; - org.apache.hadoop.util.StringUtils; - org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; - -<%if (onlineRegions != null && onlineRegions.size() > 0) %> - - <%java> - Collections.sort(onlineRegions, RegionInfo.COMPARATOR); - - -
- -
-
- <& baseInfo; onlineRegions = onlineRegions; &> -
-
- <& requestStats; onlineRegions = onlineRegions; &> -
-
- <& storeStats; onlineRegions = onlineRegions; &> -
-
- <& memstoreStats; onlineRegions = onlineRegions; &> -
-
- <& compactStats; onlineRegions = onlineRegions; &> -
-
-
-

Region names are made of the containing table's name, a comma, - the start key, a comma, and a randomly generated region id. To illustrate, - the region named - domains,apache.org,5464829424211263407 is party to the table - domains, has an id of 5464829424211263407 and the first key - in the region is apache.org. The hbase:meta 'table' is an internal - system table (or a 'catalog' table in db-speak). - The hbase:meta table keeps a list of all regions in the system. The empty key is used to denote - table start and table end. A region with an empty start key is the first region in a table. - If a region has both an empty start key and an empty end key, it's the only region in the - table. See HBase Home for further explication.

-<%else> -

Not serving regions

- - -<%def baseInfo> -<%args> - List onlineRegions; - - - - - - - - - - - - - <%for RegionInfo r: onlineRegions %> - - <%java> - String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r, - regionServer.getConfiguration()); - - - - - - - - -
Region NameStart KeyEnd KeyReplicaID
<% displayName %><% Bytes.toStringBinary(RegionInfoDisplay.getStartKeyForDisplay(r, - regionServer.getConfiguration())) %><% Bytes.toStringBinary(RegionInfoDisplay.getEndKeyForDisplay(r, - regionServer.getConfiguration())) %><% r.getReplicaId() %>
- - -<%def requestStats> -<%args> - List onlineRegions; - - - - - - - - - - - - - <%for RegionInfo r: onlineRegions %> - - - <%java> - RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); - String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r, - regionServer.getConfiguration()); - - - <%if load != null %> - - - - - - - -
Region NameRead Request CountFiltered Read Request CountWrite Request Count
<% displayName %><% String.format("%,1d", load.getReadRequestsCount()) %><% String.format("%,1d", load.getFilteredReadRequestsCount()) %><% String.format("%,1d", load.getWriteRequestsCount()) %>
- - - -<%def storeStats> -<%args> - List onlineRegions; - - - - - - - - - - - - - - - - - - - <%for RegionInfo r: onlineRegions %> - - - <%java> - final String ZEROMB = "0 MB"; - final String ZEROKB = "0 KB"; - String uncompressedStorefileSizeStr = ZEROMB; - String storefileSizeStr = ZEROMB; - String indexSizeStr = ZEROKB; - String bloomSizeStr = ZEROKB; - RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); - String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r, - regionServer.getConfiguration()); - if (load != null) { - long uncompressedStorefileSize = load.getStoreUncompressedSizeMB(); - long storefileSize = load.getStorefileSizeMB(); - long indexSize = load.getTotalStaticIndexSizeKB(); - long bloomSize = load.getTotalStaticBloomSizeKB(); - if (uncompressedStorefileSize > 0) { - uncompressedStorefileSizeStr = TraditionalBinaryPrefix.long2String( - uncompressedStorefileSize * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if (storefileSize > 0) { - storefileSizeStr = TraditionalBinaryPrefix.long2String(storefileSize - * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - if(indexSize > 0) { - indexSizeStr = TraditionalBinaryPrefix.long2String(indexSize - * TraditionalBinaryPrefix.KILO.value, "B", 1); - } - if (bloomSize > 0) { - bloomSizeStr = TraditionalBinaryPrefix.long2String(bloomSize - * TraditionalBinaryPrefix.KILO.value, "B", 1); - } - } - long lenOfBiggestCellInRegion = -1L; - Region region = regionServer.getRegion(r.getEncodedName()); - if (region != null) { - List stores = region.getStores(); - for (Store store : stores) { - Collection storeFiles = store.getStorefiles(); - for (StoreFile sf : storeFiles) { - long lenOfBiggestCell = ((HStoreFile)sf).getFileInfo().getHFileInfo().getLenOfBiggestCell(); - if (lenOfBiggestCellInRegion < lenOfBiggestCell) { - lenOfBiggestCellInRegion = lenOfBiggestCell; - } - } - } - } - - - - <%if load != null %> - - - - - - - - - - - - - -
Region NameNum. StoresNum. StorefilesStorefile Size UncompressedStorefile SizeIndex SizeBloom SizeData LocalityLen Of Biggest Cell% Cached
<% displayName %><% String.format("%,1d", load.getStores()) %><% String.format("%,1d", load.getStorefiles()) %><% uncompressedStorefileSizeStr %><% storefileSizeStr %><% indexSizeStr %><% bloomSizeStr %><% load.getDataLocality() %><% String.format("%,1d", lenOfBiggestCellInRegion) %><% StringUtils.formatPercent(load.getCurrentRegionCachedRatio(), 2) %>
- - - -<%def compactStats> -<%args> - List onlineRegions; - - - - - - - - - - - - - - <%for RegionInfo r: onlineRegions %> - - - <%java> - RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); - String percentDone = ""; - String compactTime = ""; - if (load != null) { - if (load.getTotalCompactingKVs() > 0) { - percentDone = String.format("%.2f", 100 * - ((float) load.getCurrentCompactedKVs() / load.getTotalCompactingKVs())) + "%"; - } - if (load.getLastMajorCompactionTs() > 0) { - FastDateFormat fdf = FastDateFormat.getInstance("yyyy-MM-dd HH:mm (ZZ)"); - compactTime = fdf.format(load.getLastMajorCompactionTs()); - } - } - String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r, - regionServer.getConfiguration()); - - - <%if load != null %> - - - - - - - - -
Region NameNum. Compacting CellsNum. Compacted CellsCompaction ProgressLast Major Compaction
<% displayName %><% String.format("%,1d", load.getTotalCompactingKVs()) %><% String.format("%,1d", load.getCurrentCompactedKVs()) %><% percentDone %><% compactTime %>
- - -<%def memstoreStats> -<%args> - List onlineRegions; - - - - - - - - - - - <%for RegionInfo r: onlineRegions %> - - - <%java> - final String ZEROMB = "0 MB"; - String memStoreSizeMBStr = ZEROMB; - RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); - String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r, - regionServer.getConfiguration()); - if (load != null) { - long memStoreSizeMB = load.getMemStoreSizeMB(); - if (memStoreSizeMB > 0) { - memStoreSizeMBStr = TraditionalBinaryPrefix.long2String( - memStoreSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1); - } - } - - - <%if load != null %> - - - - - -
Region NameMemstore Size
<% displayName %><% memStoreSizeMBStr %>
- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ReplicationStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ReplicationStatusTmpl.jamon deleted file mode 100644 index 273b26aecd36..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ReplicationStatusTmpl.jamon +++ /dev/null @@ -1,105 +0,0 @@ -<%doc> - - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -<%args> - HRegionServer regionServer; - -<%import> - java.util.*; - java.util.Map.Entry; - org.apache.hadoop.hbase.procedure2.util.StringUtils; - org.apache.hadoop.hbase.regionserver.HRegionServer; - org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus; - - -<%java> - Map walGroupsReplicationStatus = regionServer.getWalGroupsReplicationStatus(); - - -<%if (walGroupsReplicationStatus != null && walGroupsReplicationStatus.size() > 0) %> - -
- -
-
- <& currentLog; metrics = walGroupsReplicationStatus; &> -
-
- <& replicationDelay; metrics = walGroupsReplicationStatus; &> -
-
-
-

If the replication delay is UNKNOWN, that means this walGroup doesn't start replicate yet and it may get disabled. - If the size of log is 0, it means we are replicating current HLog, thus we can't get accurate size since it's not closed yet.

- -<%else> -

No Replication Metrics for Peers

- - -<%def currentLog> -<%args> - Map metrics; - - - - - - - - - - - <%for Map.Entry entry: metrics.entrySet() %> - - - - - - - - - -
PeerIdWalGroupCurrent LogSizeQueue SizeOffset
<% entry.getValue().getPeerId() %><% entry.getValue().getWalGroup() %><% entry.getValue().getCurrentPath() %> <% StringUtils.humanSize(entry.getValue().getFileSize()) %><% entry.getValue().getQueueSize() %><% StringUtils.humanSize(entry.getValue().getCurrentPosition()) %>
- - -<%def replicationDelay> -<%args> - Map metrics; - - - - - - - - - - <%for Map.Entry entry: metrics.entrySet() %> - - - - - - - - -
PeerIdWalGroupCurrent LogLast Shipped AgeReplication Delay
<% entry.getValue().getPeerId() %><% entry.getValue().getWalGroup() %><% entry.getValue().getCurrentPath() %> <% StringUtils.humanTimeDiff(entry.getValue().getAgeOfLastShippedOp()) %><% entry.getValue().getReplicationDelay() == Long.MAX_VALUE ? "UNKNOWN" : StringUtils.humanTimeDiff(entry.getValue().getReplicationDelay()) %>
- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon deleted file mode 100644 index 7805a8b09087..000000000000 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon +++ /dev/null @@ -1,265 +0,0 @@ -<%doc> - -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -<%args> -MetricsRegionServerWrapper mWrap; -MetricsHBaseServerWrapper mServerWrap; -ByteBuffAllocator bbAllocator; - -<%import> -java.util.*; -org.apache.hadoop.hbase.regionserver.HRegionServer; -org.apache.hadoop.hbase.io.ByteBuffAllocator; -org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapper; -org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper; -org.apache.hadoop.hbase.util.Bytes; -org.apache.hadoop.hbase.ServerName; -org.apache.hadoop.hbase.HBaseConfiguration; -org.apache.hadoop.hbase.util.DirectMemoryUtils; -org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; -java.lang.management.MemoryUsage; -org.apache.hadoop.hbase.io.util.MemorySizeUtil; - -
- -
-
- <& baseStats; mWrap = mWrap &> -
-
- <& memoryStats; mWrap = mWrap &> -
-
- <& requestStats; mWrap = mWrap &> -
-
- <& walStats; mWrap = mWrap &> -
-
- <& storeStats; mWrap = mWrap &> -
-
- <& queueStats; mWrap = mWrap; mServerWrap = mServerWrap; &> -
-
- <& byteBuffAllocatorStats; bbAllocator = bbAllocator; &> -
-
-
- -<%def baseStats> -<%args> - MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - - - - - - -
Requests Per SecondNum. RegionsBlock localityBlock locality (Secondary replicas)Slow WAL Append Count
<% String.format("%.0f", mWrap.getRequestsPerSecond()) %><% mWrap.getNumOnlineRegions() %><% String.format("%.3f",mWrap.getPercentFileLocal()) %><% "%" %><% String.format("%.3f",mWrap.getPercentFileLocalSecondaryRegions()) %><% "%" %><% mWrap.getNumWALSlowAppend() %>
- - -<%def memoryStats> -<%args> -MetricsRegionServerWrapper mWrap; - -<%java - long usedHeap = -1L; - long maxHeap = -1L; - final MemoryUsage usage = MemorySizeUtil.safeGetHeapMemoryUsage(); - if (usage != null) { - maxHeap = usage.getMax(); - usedHeap = usage.getUsed(); - } -%> - - - - - - - - - - - - - - - - - - - - - -
Used HeapMax HeapDirect Memory UsedDirect Memory ConfiguredMemstore On-Heap Size / LimitMemstore Off-Heap Size / LimitMemstore Data Size (On&&Off Heap)
- <% TraditionalBinaryPrefix.long2String(usedHeap, "B", 1) %> - - <% TraditionalBinaryPrefix.long2String(maxHeap, "B", 1) %> - - <% TraditionalBinaryPrefix.long2String(DirectMemoryUtils.getDirectMemoryUsage(), "B", 1) %> - - <% TraditionalBinaryPrefix.long2String(DirectMemoryUtils.getDirectMemorySize(), "B", 1) %> - - <% TraditionalBinaryPrefix.long2String(mWrap.getOnHeapMemStoreSize(), "B", 1) + " / " - + TraditionalBinaryPrefix.long2String(mWrap.getOnHeapMemStoreLimit(), "B", 1) %> - - <% TraditionalBinaryPrefix.long2String(mWrap.getOffHeapMemStoreSize(), "B", 1) + " / " - + TraditionalBinaryPrefix.long2String(mWrap.getOffHeapMemStoreLimit(), "B", 1) %> - - <% TraditionalBinaryPrefix.long2String(mWrap.getMemStoreSize(), "B", 1) %> -
- - -<%def walStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - - - -
Num. WAL FilesSize. WAL FilesWAL exclude DNs
<% mWrap.getNumWALFiles() %><% TraditionalBinaryPrefix.long2String(mWrap.getWALFileSize(), "B", 1) %> - <%for String exclude: mWrap.getWALExcludeDNs() %> - <% exclude %>
- -
- - -<%def storeStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - - - - - - -
Num. StoresNum. StorefilesRoot Index SizeIndex SizeBloom Size
<% mWrap.getNumStores() %><% mWrap.getNumStoreFiles() %><% TraditionalBinaryPrefix.long2String(mWrap.getStoreFileIndexSize(), "B", 1) %><% TraditionalBinaryPrefix.long2String(mWrap.getTotalStaticIndexSize(), "B", 1) %><% TraditionalBinaryPrefix.long2String(mWrap.getTotalStaticBloomSize(), "B", 1) %>
- - - -<%def requestStats> -<%args> -MetricsRegionServerWrapper mWrap; - - - - - - - - - - - - - - -
Request Per SecondRead Request CountFiltered Read Request CountWrite Request Count
<% String.format("%.0f", mWrap.getRequestsPerSecond()) %><% mWrap.getReadRequestsCount() %><% mWrap.getFilteredReadRequestsCount() %><% mWrap.getWriteRequestsCount() %>
- - -<%def queueStats> -<%args> -MetricsRegionServerWrapper mWrap; -MetricsHBaseServerWrapper mServerWrap; - - - - - - - - - - - - - - - - - - - -
Compaction Queue LengthFlush Queue LengthPriority Call Queue LengthGeneral Call Queue LengthReplication Call Queue LengthTotal Call Queue Size
<% mWrap.getCompactionQueueSize() %><% mWrap.getFlushQueueSize() %><% mServerWrap.getPriorityQueueLength() %><% mServerWrap.getGeneralQueueLength() %><% mServerWrap.getReplicationQueueLength() %><% TraditionalBinaryPrefix.long2String(mServerWrap.getTotalQueueSize(), "B", 1) %>
- - -<%def byteBuffAllocatorStats> -<%args> -ByteBuffAllocator bbAllocator; - - - - - - - - - - - - - - - - - - -
Total Heap AllocationTotal Pool AllocationHeap Allocation RatioTotal Buffer CountUsed Buffer CountBuffer Size
<% TraditionalBinaryPrefix.long2String(ByteBuffAllocator.getHeapAllocationBytes(bbAllocator, ByteBuffAllocator.HEAP), "B", 1) %><% TraditionalBinaryPrefix.long2String(bbAllocator.getPoolAllocationBytes(), "B", 1) %><% String.format("%.3f", ByteBuffAllocator.getHeapAllocationRatio(bbAllocator, ByteBuffAllocator.HEAP) * 100) %><% "%" %><% bbAllocator.getTotalBufferCount() %><% bbAllocator.getUsedBufferCount() %><% TraditionalBinaryPrefix.long2String(bbAllocator.getBufferSize(), "B", 1) %>
- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 98750d38a7c3..05b049e27dbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hbase.filter.SubstringComparator; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.regionserver.RSAnnotationReadingPriorityFunction; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ExceptionUtil; @@ -185,6 +186,7 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re } Get get = new Get(row); get.addFamily(HConstants.CATALOG_FAMILY); + get.setPriority(RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS); Result r; try (Table t = getMetaHTable(connection)) { r = t.get(get); @@ -213,6 +215,7 @@ public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri) throws IOException { Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(ri)); get.addFamily(HConstants.CATALOG_FAMILY); + get.setPriority(RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS); try (Table t = getMetaHTable(connection)) { return t.get(get); } @@ -226,11 +229,7 @@ public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri) */ public static Result getRegionResult(Connection connection, RegionInfo regionInfo) throws IOException { - Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo)); - get.addFamily(HConstants.CATALOG_FAMILY); - try (Table t = getMetaHTable(connection)) { - return t.get(get); - } + return getCatalogFamilyRow(connection, regionInfo); } /** @@ -341,6 +340,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { scan.setReadType(Scan.ReadType.PREAD); } scan.setCaching(scannerCaching); + scan.setPriority(RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS); return scan; } @@ -368,7 +368,7 @@ public static List> getTableRegionsAndLocations( final boolean excludeOfflinedSplitParents) throws IOException { if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { throw new IOException( - "This method can't be used to locate meta regions;" + " use MetaTableLocator instead"); + "This method can't be used to locate meta regions; use MetaTableLocator instead"); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress ClientMetaTableAccessor.CollectRegionLocationsVisitor visitor = @@ -385,10 +385,10 @@ public static void fullScanMetaAndPrint(Connection connection) throws IOExceptio if (r == null || r.isEmpty()) { return true; } - LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); + LOG.info("fullScanMetaAndPrint.Current Meta Row: {}", r); TableState state = CatalogFamilyFormat.getTableState(r); if (state != null) { - LOG.info("fullScanMetaAndPrint.Table State={}" + state); + LOG.info("fullScanMetaAndPrint.Table State={}", state); } else { RegionLocations locations = CatalogFamilyFormat.getRegionLocations(r); if (locations == null) { @@ -461,6 +461,15 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor); } + /** + * Performs a scan of META table. + * @param connection connection we're using + * @param startRow Where to start the scan. Pass null if want to begin scan at first row. + * @param stopRow Where to stop the scan. Pass null if want to scan all rows from the start one + * @param type scanned part of meta + * @param maxRows maximum rows to return + * @param visitor Visitor invoked against each row. + */ public static void scanMeta(Connection connection, @Nullable final byte[] startRow, @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, final ClientMetaTableAccessor.Visitor visitor) throws IOException { @@ -481,9 +490,11 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR } if (LOG.isTraceEnabled()) { - LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) - + " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit - + " with caching=" + scan.getCaching()); + LOG.trace( + "Scanning META starting at row={} stopping at row={} for max={} with caching={} " + + "priority={}", + Bytes.toStringBinary(startRow), Bytes.toStringBinary(stopRow), rowUpperLimit, + scan.getCaching(), scan.getPriority()); } int currentRow = 0; @@ -912,7 +923,7 @@ private static void updateLocation(Connection connection, RegionInfo regionInfo, addRegionInfo(put, regionInfo); addLocation(put, sn, openSeqNum, regionInfo.getReplicaId()); putToMetaTable(connection, put); - LOG.info("Updated row {} with server=", regionInfo.getRegionNameAsString(), sn); + LOG.info("Updated row {} with server = {}", regionInfo.getRegionNameAsString(), sn); } public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException { @@ -937,7 +948,7 @@ public static Put addLocation(Put p, ServerName sn, long openSeqNum, int replica .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) .setQualifier(CatalogFamilyFormat.getStartCodeColumn(replicaId)) .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put) - .setValue(Bytes.toBytes(sn.getStartcode())).build()) + .setValue(Bytes.toBytes(sn.getStartCode())).build()) .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) .setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) .setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)).build()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 65fe524d0a49..7b7f7e208f51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -1412,6 +1412,22 @@ default void postReplayWALs(ObserverContext ctx, + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } + + /** + * Called after a {@link WALEdit} replayed for this region. + * @param ctx the environment provided by the region server + */ + default void postWALRestore(ObserverContext ctx, + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } + /** * Called before bulkLoadHFile. Users can create a StoreFile instance to access the contents of a * HFile. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index fce32333577d..fee132b7a4d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -303,7 +303,13 @@ public enum EventType { * RS reload quotas.
* RS_RELOAD_QUOTAS */ - RS_RELOAD_QUOTAS(90, ExecutorType.RS_RELOAD_QUOTAS_OPERATIONS); + RS_RELOAD_QUOTAS(90, ExecutorType.RS_RELOAD_QUOTAS_OPERATIONS), + + /** + * RS log roll.
+ * RS_LOG_ROLL + */ + RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL); private final int code; private final ExecutorType executor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index 1d689d276aa1..668cd701c0d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -56,7 +56,8 @@ public enum ExecutorType { RS_CLAIM_REPLICATION_QUEUE(35), RS_SNAPSHOT_OPERATIONS(36), RS_FLUSH_OPERATIONS(37), - RS_RELOAD_QUOTAS_OPERATIONS(38); + RS_RELOAD_QUOTAS_OPERATIONS(38), + RS_LOG_ROLL(39); ExecutorType(int value) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index 7cc919aa026d..e39cb21a422e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -285,7 +285,7 @@ public static HFileBlock getBlockForCaching(CacheConfig cacheConf, HFileBlock bl .withOnDiskSizeWithoutHeader(block.getOnDiskSizeWithoutHeader()) .withUncompressedSizeWithoutHeader(block.getUncompressedSizeWithoutHeader()) .withPrevBlockOffset(block.getPrevBlockOffset()).withByteBuff(buff) - .withFillHeader(FILL_HEADER).withOffset(block.getOffset()).withNextBlockOnDiskSize(-1) + .withFillHeader(FILL_HEADER).withOffset(block.getOffset()) .withOnDiskDataSizeWithHeader(block.getOnDiskDataSizeWithHeader() + numBytes) .withNextBlockOnDiskSize(block.getNextBlockOnDiskSize()) .withHFileContext(cloneContext(block.getHFileContext())) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 72ca37c0557c..fc8f4d569176 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -132,7 +132,7 @@ public class CacheConfig implements PropagatingConfigurationObserver { private volatile boolean cacheDataOnRead; /** Whether blocks should be flagged as in-memory when being cached */ - private final boolean inMemory; + private boolean inMemory; /** Whether data blocks should be cached when new files are written */ private volatile boolean cacheDataOnWrite; @@ -147,29 +147,29 @@ public class CacheConfig implements PropagatingConfigurationObserver { private volatile boolean evictOnClose; /** Whether data blocks should be stored in compressed and/or encrypted form in the cache */ - private final boolean cacheDataCompressed; + private boolean cacheDataCompressed; /** Whether data blocks should be prefetched into the cache */ - private final boolean prefetchOnOpen; + private boolean prefetchOnOpen; /** * Whether data blocks should be cached when compacted file is written */ - private final boolean cacheCompactedDataOnWrite; + private boolean cacheCompactedDataOnWrite; /** * Determine threshold beyond which we do not cache blocks on compaction */ private long cacheCompactedDataOnWriteThreshold; - private final boolean dropBehindCompaction; + private boolean dropBehindCompaction; // Local reference to the block cache private final BlockCache blockCache; private final ByteBuffAllocator byteBuffAllocator; - private final double heapUsageThreshold; + private double heapUsageThreshold; /** * Create a cache configuration using the specified configuration object and defaults for family @@ -191,32 +191,34 @@ public CacheConfig(Configuration conf, BlockCache blockCache) { */ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache blockCache, ByteBuffAllocator byteBuffAllocator) { - this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) - && (family == null ? true : family.isBlockCacheEnabled()); - this.inMemory = family == null ? DEFAULT_IN_MEMORY : family.isInMemory(); - this.cacheDataCompressed = - conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); - this.dropBehindCompaction = - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); - // For the following flags we enable them regardless of per-schema settings - // if they are enabled in the global configuration. - this.cacheDataOnWrite = conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) - || (family == null ? false : family.isCacheDataOnWrite()); - this.cacheIndexesOnWrite = - conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) - || (family == null ? false : family.isCacheIndexesOnWrite()); - this.cacheBloomsOnWrite = - conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) - || (family == null ? false : family.isCacheBloomsOnWrite()); - this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) - || (family == null ? false : family.isEvictBlocksOnClose()); - this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) - || (family == null ? false : family.isPrefetchBlocksOnOpen()); - this.cacheCompactedDataOnWrite = - conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); - this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); - this.heapUsageThreshold = - conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD); + if (family == null || family.isBlockCacheEnabled()) { + this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ); + this.inMemory = family == null ? DEFAULT_IN_MEMORY : family.isInMemory(); + this.cacheDataCompressed = + conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); + this.dropBehindCompaction = + conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); + // For the following flags we enable them regardless of per-schema settings + // if they are enabled in the global configuration. + this.cacheDataOnWrite = + conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) + || (family == null ? false : family.isCacheDataOnWrite()); + this.cacheIndexesOnWrite = + conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) + || (family == null ? false : family.isCacheIndexesOnWrite()); + this.cacheBloomsOnWrite = + conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) + || (family == null ? false : family.isCacheBloomsOnWrite()); + this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) + || (family == null ? false : family.isEvictBlocksOnClose()); + this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) + || (family == null ? false : family.isPrefetchBlocksOnOpen()); + this.cacheCompactedDataOnWrite = conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, + DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); + this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); + this.heapUsageThreshold = + conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD); + } this.blockCache = blockCache; this.byteBuffAllocator = byteBuffAllocator; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index a99eac4085e4..7080a865aa68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType; -import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; @@ -190,7 +190,7 @@ public static final long getChecksumFailuresCount() { } public static final void updateReadLatency(long latencyMillis, boolean pread, boolean tooSlow) { - RpcServer.getCurrentCall().ifPresent(call -> call.updateFsReadTime(latencyMillis)); + ThreadLocalServerSideScanMetrics.addFsReadTime(latencyMillis); if (pread) { MetricsIO.getInstance().updateFsPreadTime(latencyMillis); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 899a681131f4..8f1bb3be7a5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1172,6 +1172,32 @@ public HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boo } return cachedBlock; } + } catch (Exception e) { + if (cachedBlock != null) { + returnAndEvictBlock(cache, cacheKey, cachedBlock); + } + LOG.warn("Failed retrieving block from cache with key {}. " + + "\n Evicting this block from cache and will read it from file system. " + + "\n Exception details: ", cacheKey, e); + if (LOG.isDebugEnabled()) { + LOG.debug("Further tracing details for failed block cache retrieval:" + + "\n Complete File path - {}," + "\n Expected Block Type - {}, Actual Block Type - {}," + + "\n Cache compressed - {}" + "\n Header size (after deserialized from cache) - {}" + + "\n Size with header - {}" + "\n Uncompressed size without header - {} " + + "\n Total byte buffer size - {}" + "\n Encoding code - {}", this.path, + expectedBlockType, (cachedBlock != null ? cachedBlock.getBlockType() : "N/A"), + (expectedBlockType != null + ? cacheConf.shouldCacheCompressed(expectedBlockType.getCategory()) + : "N/A"), + (cachedBlock != null ? cachedBlock.headerSize() : "N/A"), + (cachedBlock != null ? cachedBlock.getOnDiskSizeWithHeader() : "N/A"), + (cachedBlock != null ? cachedBlock.getUncompressedSizeWithoutHeader() : "N/A"), + (cachedBlock != null ? cachedBlock.getBufferReadOnly().limit() : "N/A"), + (cachedBlock != null + ? cachedBlock.getBufferReadOnly().getShort(cachedBlock.headerSize()) + : "N/A")); + } + return null; } finally { // Count bytes read as cached block is being returned if (isScanMetricsEnabled && cachedBlock != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java index c93dac8a572b..6ee953717341 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java @@ -117,7 +117,7 @@ public class BucketEntry implements HBaseReferenceCounted { this.onDiskSizeWithHeader = onDiskSizeWithHeader; this.accessCounter = accessCounter; this.cachedTime = cachedTime; - this.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.MULTI; + this.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.SINGLE; this.refCnt = RefCnt.create(createRecycler.apply(this)); this.markedAsEvicted = new AtomicBoolean(false); this.allocator = allocator; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java index a86e6554b1cc..97c3a8765256 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.regionserver.RSAnnotationReadingPriorityFunction; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +33,10 @@ public class MetaRWQueueRpcExecutor extends RWQueueRpcExecutor { "hbase.ipc.server.metacallqueue.read.ratio"; public static final String META_CALL_QUEUE_SCAN_SHARE_CONF_KEY = "hbase.ipc.server.metacallqueue.scan.ratio"; - public static final float DEFAULT_META_CALL_QUEUE_READ_SHARE = 0.9f; + public static final String META_CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = + "hbase.ipc.server.metacallqueue.handler.factor"; + public static final float DEFAULT_META_CALL_QUEUE_READ_SHARE = 0.8f; + private static final float DEFAULT_META_CALL_QUEUE_SCAN_SHARE = 0.2f; public MetaRWQueueRpcExecutor(final String name, final int handlerCount, final int maxQueueLength, final PriorityFunction priority, final Configuration conf, final Abortable abortable) { @@ -46,6 +50,23 @@ protected float getReadShare(final Configuration conf) { @Override protected float getScanShare(final Configuration conf) { - return conf.getFloat(META_CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0); + return conf.getFloat(META_CALL_QUEUE_SCAN_SHARE_CONF_KEY, DEFAULT_META_CALL_QUEUE_SCAN_SHARE); + } + + @Override + public boolean dispatch(CallRunner callTask) { + RpcCall call = callTask.getRpcCall(); + int level = call.getHeader().getPriority(); + final boolean toWriteQueue = isWriteRequest(call.getHeader(), call.getParam()); + // dispatch client system read request to read handlers + // dispatch internal system read request to scan handlers + final boolean toScanQueue = + getNumScanQueues() > 0 && level == RSAnnotationReadingPriorityFunction.INTERNAL_READ_QOS; + return dispatchTo(toWriteQueue, toScanQueue, callTask); + } + + @Override + protected float getCallQueueHandlerFactor(Configuration conf) { + return conf.getFloat(META_CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.5f); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java index 298a9fc3aeb2..70a7b74b8e2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java @@ -297,4 +297,8 @@ private void propagateBalancerConfigChange(QueueBalancer balancer, Configuration ((ConfigurationObserver) balancer).onConfigurationChange(conf); } } + + protected int getNumScanQueues() { + return numScanQueues; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 804d7b32bb42..ff3bae19e296 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -133,8 +133,4 @@ void setResponse(Message param, ExtendedCellScanner cells, Throwable errorThrowa /** Returns A short string format of this call without possibly lengthy params */ String toShortString(); - - void updateFsReadTime(long latencyMillis); - - long getFsReadTime(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java index 7e5bdfcc7d6f..15c9afe030c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java @@ -130,7 +130,7 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ this.conf = conf; this.abortable = abortable; - float callQueuesHandlersFactor = this.conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.1f); + float callQueuesHandlersFactor = getCallQueueHandlerFactor(conf); if ( Float.compare(callQueuesHandlersFactor, 1.0f) > 0 || Float.compare(0.0f, callQueuesHandlersFactor) > 0 @@ -468,4 +468,8 @@ public void onConfigurationChange(Configuration conf) { } } } + + protected float getCallQueueHandlerFactor(Configuration conf) { + return conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.1f); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 2db08fd7398b..6dfb5bfb4113 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.regionserver.RSRpcServices; @@ -461,19 +462,18 @@ public Pair call(RpcCall call, MonitoredRPCHandler int processingTime = (int) (endTime - startTime); int qTime = (int) (startTime - receiveTime); int totalTime = (int) (endTime - receiveTime); + long fsReadTime = ThreadLocalServerSideScanMetrics.getFsReadTimeCounter().get(); if (LOG.isTraceEnabled()) { LOG.trace( "{}, response: {}, receiveTime: {}, queueTime: {}, processingTime: {}, " + "totalTime: {}, fsReadTime: {}", CurCall.get().toString(), TextFormat.shortDebugString(result), - CurCall.get().getReceiveTime(), qTime, processingTime, totalTime, - CurCall.get().getFsReadTime()); + CurCall.get().getReceiveTime(), qTime, processingTime, totalTime, fsReadTime); } // Use the raw request call size for now. long requestSize = call.getSize(); long responseSize = result.getSerializedSize(); long responseBlockSize = call.getBlockBytesScanned(); - long fsReadTime = call.getFsReadTime(); if (call.isClientCellBlockSupported()) { // Include the payload size in HBaseRpcController responseSize += call.getResponseCellSize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index fc3b6fc0a6b1..ed7e67edfaf0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -101,7 +101,6 @@ public abstract class ServerCall implements RpcCa private long responseCellSize = 0; private long responseBlockSize = 0; - private long fsReadTimeMillis = 0; // cumulative size of serialized exceptions private long exceptionSize = 0; private final boolean retryImmediatelySupported; @@ -604,14 +603,4 @@ public int getRemotePort() { public synchronized BufferChain getResponse() { return response; } - - @Override - public void updateFsReadTime(long latencyMillis) { - fsReadTimeMillis += latencyMillis; - } - - @Override - public long getFsReadTime() { - return fsReadTimeMillis; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 1cda553a81dc..f8abca44e4c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -160,6 +160,7 @@ import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure; import org.apache.hadoop.hbase.master.procedure.FlushTableProcedure; import org.apache.hadoop.hbase.master.procedure.InitMetaProcedure; +import org.apache.hadoop.hbase.master.procedure.LogRollProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; @@ -4201,11 +4202,11 @@ public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() { return (RemoteProcedure) procedure; } - public void remoteProcedureCompleted(long procId) { + public void remoteProcedureCompleted(long procId, byte[] remoteResultData) { LOG.debug("Remote procedure done, pid={}", procId); RemoteProcedure procedure = getRemoteProcedure(procId); if (procedure != null) { - procedure.remoteOperationCompleted(procedureExecutor.getEnvironment()); + procedure.remoteOperationCompleted(procedureExecutor.getEnvironment(), remoteResultData); } } @@ -4539,7 +4540,7 @@ public long flushTable(TableName tableName, List columnFamilies, long no @Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preTableFlush(tableName); - LOG.info(getClientIdAuditPrefix() + " flush " + tableName); + LOG.info("{} flush {}", getClientIdAuditPrefix(), tableName); submitProcedure( new FlushTableProcedure(procedureExecutor.getEnvironment(), tableName, columnFamilies)); getMaster().getMasterCoprocessorHost().postTableFlush(tableName); @@ -4551,4 +4552,28 @@ protected String getDescription() { } }); } + + @Override + public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException { + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() { + LOG.info("{} roll all wal writers", getClientIdAuditPrefix()); + submitProcedure(new LogRollProcedure()); + } + + @Override + protected String getDescription() { + return "RollAllWALWriters"; + } + }); + } + + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public MobFileCleanerChore getMobFileCleanerChore() { + return mobFileCleanerChore; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index fc246d38d513..e9e0f970ef8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; +import org.apache.hadoop.hbase.master.procedure.RestoreBackupSystemTableProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure; import org.apache.hadoop.hbase.mob.MobUtils; @@ -321,6 +322,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest; @@ -1372,7 +1375,7 @@ public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, @Override public GetProcedureResultResponse getProcedureResult(RpcController controller, GetProcedureResultRequest request) throws ServiceException { - LOG.debug("Checking to see if procedure is done pid=" + request.getProcId()); + LOG.debug("Checking to see if procedure is done pid={}", request.getProcId()); try { server.checkInitialized(); GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); @@ -2575,7 +2578,9 @@ public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, } request.getResultList().forEach(result -> { if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { - server.remoteProcedureCompleted(result.getProcId()); + byte[] remoteResultData = + result.hasProcResultData() ? result.getProcResultData().toByteArray() : null; + server.remoteProcedureCompleted(result.getProcId(), remoteResultData); } else { server.remoteProcedureFailed(result.getProcId(), RemoteProcedureException.fromProto(result.getError())); @@ -3662,4 +3667,33 @@ public FlushTableResponse flushTable(RpcController controller, FlushTableRequest throw new ServiceException(ioe); } } + + @Override + public MasterProtos.RestoreBackupSystemTableResponse restoreBackupSystemTable( + RpcController rpcController, + MasterProtos.RestoreBackupSystemTableRequest restoreBackupSystemTableRequest) + throws ServiceException { + try { + String snapshotName = restoreBackupSystemTableRequest.getSnapshotName(); + SnapshotDescription snapshot = server.snapshotManager.getCompletedSnapshots().stream() + .filter(s -> s.getName().equals(snapshotName)).findFirst() + .orElseThrow(() -> new ServiceException("Snapshot %s not found".formatted(snapshotName))); + long pid = server.getMasterProcedureExecutor() + .submitProcedure(new RestoreBackupSystemTableProcedure(snapshot)); + return MasterProtos.RestoreBackupSystemTableResponse.newBuilder().setProcId(pid).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RollAllWALWritersResponse rollAllWALWriters(RpcController rpcController, + RollAllWALWritersRequest request) throws ServiceException { + try { + long procId = server.rollAllWALWriters(request.getNonceGroup(), request.getNonce()); + return RollAllWALWritersResponse.newBuilder().setProcId(procId).build(); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index e9c98d624460..0573b1a75628 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -515,4 +515,10 @@ long flushTable(final TableName tableName, final List columnFamilies, * @return procedure Id */ long truncateRegion(RegionInfo regionInfo, long nonceGroup, long nonce) throws IOException; + + /** + * Roll WAL writer for all RegionServers + * @return procedure id + */ + long rollAllWALWriters(long nonceGroup, long nonce) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 55cfc28bb53a..b99f0448e8f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -236,6 +236,14 @@ public boolean unregisterListener(final ServerListener listener) { return this.listeners.remove(listener); } + /** + * Removes all of the ServerListeners of this collection that satisfy the given predicate. + * @param filter a predicate which returns true for ServerListener to be removed + */ + public boolean unregisterListenerIf(final Predicate filter) { + return this.listeners.removeIf(filter); + } + /** * Let the server manager know a new regionserver has come online * @param request the startup request diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index 18dfc7d493bf..32b2f4d21f29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -26,7 +26,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Optional; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -35,7 +34,6 @@ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.master.procedure.SplitWALProcedure; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; @@ -153,25 +151,19 @@ List createSplitWALProcedures(List splittingWALs, */ public ServerName acquireSplitWALWorker(Procedure procedure) throws ProcedureSuspendedException { - Optional worker = splitWorkerAssigner.acquire(); - if (worker.isPresent()) { - LOG.debug("Acquired split WAL worker={}", worker.get()); - return worker.get(); - } - splitWorkerAssigner.suspend(procedure); - throw new ProcedureSuspendedException(); + ServerName worker = splitWorkerAssigner.acquire(procedure); + LOG.debug("Acquired split WAL worker={}", worker); + return worker; } /** * After the worker finished the split WAL task, it will release the worker, and wake up all the * suspend procedures in the ProcedureEvent - * @param worker worker which is about to release - * @param scheduler scheduler which is to wake up the procedure event + * @param worker worker which is about to release */ - public void releaseSplitWALWorker(ServerName worker, MasterProcedureScheduler scheduler) { + public void releaseSplitWALWorker(ServerName worker) { LOG.debug("Release split WAL worker={}", worker); splitWorkerAssigner.release(worker); - splitWorkerAssigner.wake(scheduler); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java index b6df41acee23..7b1ec80cab4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/WorkerAssigner.java @@ -23,9 +23,9 @@ import java.util.Map; import java.util.Optional; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.yetus.audience.InterfaceAudience; /** @@ -51,36 +51,37 @@ public WorkerAssigner(MasterServices master, int maxTasks, ProcedureEvent eve } } - public synchronized Optional acquire() { + public synchronized ServerName acquire(Procedure proc) throws ProcedureSuspendedException { List serverList = master.getServerManager().getOnlineServersList(); Collections.shuffle(serverList); Optional worker = serverList.stream() .filter( serverName -> !currentWorkers.containsKey(serverName) || currentWorkers.get(serverName) > 0) .findAny(); - worker.ifPresent(name -> currentWorkers.compute(name, (serverName, - availableWorker) -> availableWorker == null ? maxTasks - 1 : availableWorker - 1)); - return worker; + if (worker.isPresent()) { + ServerName sn = worker.get(); + currentWorkers.compute(sn, (serverName, + availableWorker) -> availableWorker == null ? maxTasks - 1 : availableWorker - 1); + return sn; + } else { + event.suspend(); + event.suspendIfNotReady(proc); + throw new ProcedureSuspendedException(); + } } public synchronized void release(ServerName serverName) { currentWorkers.compute(serverName, (k, v) -> v == null ? null : v + 1); - } - - public void suspend(Procedure proc) { - event.suspend(); - event.suspendIfNotReady(proc); - } - - public void wake(MasterProcedureScheduler scheduler) { if (!event.isReady()) { - event.wake(scheduler); + event.wake(master.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler()); } } @Override - public void serverAdded(ServerName worker) { - this.wake(master.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler()); + public synchronized void serverAdded(ServerName worker) { + if (!event.isReady()) { + event.wake(master.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler()); + } } public synchronized void addUsedWorker(ServerName worker) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java index a828b5b668fc..cb3b91ca0e20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java @@ -103,7 +103,7 @@ public Optional remoteCallBuild(Maste newRemoteOperation(MasterProcedureEnv env); @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { // should not be called since we use reportRegionStateTransition to report the result throw new UnsupportedOperationException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java index e0712f1d2aa3..4cf685f50a0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java @@ -166,7 +166,7 @@ protected boolean abort(final MasterProcedureEnv env) { } @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { // should not be called for region operation until we modified the open/close region procedure throw new UnsupportedOperationException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java index 4c24ba1f81c5..700914f07b90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java @@ -44,6 +44,10 @@ public void init(Map params) { /** * Should the master delete the file or keep it? + *

+ * This method can be called concurrently by multiple threads. Implementations must be thread + * safe. + *

* @param fStat file status of the file to check * @return true if the file is deletable, false if not */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java index e08f53294336..714aaffaa052 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java @@ -33,6 +33,10 @@ public interface FileCleanerDelegate extends Configurable, Stoppable { /** * Determines which of the given files are safe to delete + *

+ * This method can be called concurrently by multiple threads. Implementations must be thread + * safe. + *

* @param files files to check for deletion * @return files that are ok to delete according to this cleaner */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusConstants.java new file mode 100644 index 000000000000..7432e529dfc8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusConstants.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.http; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Constants used by the web UI JSP pages. + */ +@InterfaceAudience.Private +public final class MasterStatusConstants { + + public static final String FRAGS = "frags"; + public static final String SERVER_NAMES = "serverNames"; + public static final String SERVER_NAME = "serverName"; + public static final String RS_GROUP_INFOS = "rsGroupInfos"; + public static final String COLLECT_SERVERS = "collectServers"; + public static final String FILTER = "filter"; + public static final String FORMAT = "format"; + public static final String PARENT = "parent"; + + private MasterStatusConstants() { + // Do not instantiate. + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java index 09bb5375a5d5..564e5f01124b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java @@ -18,25 +18,13 @@ package org.apache.hadoop.hbase.master.http; import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Set; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.master.assignment.RegionStateNode; -import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.yetus.audience.InterfaceAudience; /** - * The servlet responsible for rendering the index page of the master. + * Only kept for redirecting to master.jsp. */ @InterfaceAudience.Private public class MasterStatusServlet extends HttpServlet { @@ -44,52 +32,6 @@ public class MasterStatusServlet extends HttpServlet { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { - HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); - assert master != null : "No Master in context!"; - - response.setContentType("text/html"); - - Configuration conf = master.getConfiguration(); - - Map frags = getFragmentationInfo(master, conf); - ServerName metaLocation = null; - List servers = null; - Set deadServers = null; - - if (master.isActiveMaster()) { - metaLocation = getMetaLocationOrNull(master); - ServerManager serverManager = master.getServerManager(); - if (serverManager != null) { - deadServers = serverManager.getDeadServers().copyServerNames(); - servers = serverManager.getOnlineServersList(); - } - } - - MasterStatusTmpl tmpl = - new MasterStatusTmpl().setFrags(frags).setMetaLocation(metaLocation).setServers(servers) - .setDeadServers(deadServers).setCatalogJanitorEnabled(master.isCatalogJanitorEnabled()); - - if (request.getParameter("filter") != null) tmpl.setFilter(request.getParameter("filter")); - if (request.getParameter("format") != null) tmpl.setFormat(request.getParameter("format")); - tmpl.render(response.getWriter(), master); - } - - private ServerName getMetaLocationOrNull(HMaster master) { - RegionStateNode rsn = master.getAssignmentManager().getRegionStates() - .getRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO); - if (rsn != null) { - return rsn.isInState(RegionState.State.OPEN) ? rsn.getRegionLocation() : null; - } - return null; - } - - private Map getFragmentationInfo(HMaster master, Configuration conf) - throws IOException { - boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); - if (showFragmentation) { - return FSUtils.getTableFragmentation(master); - } else { - return null; - } + response.sendRedirect(request.getContextPath() + "/master.jsp"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusUtil.java new file mode 100644 index 000000000000..c860e89681c9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusUtil.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.http; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Utility used by the web UI JSP pages. + */ +@InterfaceAudience.Private +public final class MasterStatusUtil { + + private MasterStatusUtil() { + // Do not instantiate. + } + + public static String getUserTables(HMaster master, List tables) { + if (master.isInitialized()) { + try { + Map descriptorMap = master.getTableDescriptors().getAll(); + if (descriptorMap != null) { + for (TableDescriptor desc : descriptorMap.values()) { + if (!desc.getTableName().isSystemTable()) { + tables.add(desc); + } + } + } + } catch (IOException e) { + return "Got user tables error, " + e.getMessage(); + } + } + return null; + } + + public static Map getFragmentationInfo(HMaster master, Configuration conf) + throws IOException { + boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); + if (showFragmentation) { + return FSUtils.getTableFragmentation(master); + } else { + return null; + } + } + + public static ServerName getMetaLocationOrNull(HMaster master) { + RegionStateNode rsn = master.getAssignmentManager().getRegionStates() + .getRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO); + if (rsn != null) { + return rsn.isInState(RegionState.State.OPEN) ? rsn.getRegionLocation() : null; + } + return null; + } + + public static String serverNameLink(HMaster master, ServerName serverName) { + int infoPort = master.getRegionServerInfoPort(serverName); + String url = "//" + serverName.getHostname() + ":" + infoPort + "/regionserver.jsp"; + if (infoPort > 0) { + return "" + serverName.getServerName() + ""; + } else { + return serverName.getServerName(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/RegionVisualizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/RegionVisualizer.java index 09171c3a8c2f..ceb08bcedf2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/RegionVisualizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/RegionVisualizer.java @@ -58,7 +58,7 @@ /** * Support class for the "Region Visualizer" rendered out of - * {@code src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionVisualizerTmpl.jamon} + * {@code src/main/resources/hbase-webapps/master/regionVisualizer.jsp} */ @InterfaceAudience.Private public class RegionVisualizer extends AbstractHBaseTool { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index d2cc2e2bfdb4..1244d5bf3525 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -29,6 +29,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; @@ -37,9 +38,12 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.exceptions.MergeRegionException; +import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -102,6 +106,7 @@ void fixHoles(CatalogJanitorReport report) { final List newRegionInfos = createRegionInfosForHoles(holes); final List newMetaEntries = createMetaEntries(masterServices, newRegionInfos); + createRegionDirectories(masterServices, newMetaEntries); final TransitRegionStateProcedure[] assignProcedures = masterServices.getAssignmentManager().createRoundRobinAssignProcedures(newMetaEntries); @@ -217,6 +222,27 @@ private static List createMetaEntries(final MasterServices masterSer return createMetaEntriesSuccesses; } + private static void createRegionDirectories(final MasterServices masterServices, + final List regions) { + if (regions.isEmpty()) { + return; + } + final MasterFileSystem mfs = masterServices.getMasterFileSystem(); + final Path rootDir = mfs.getRootDir(); + for (RegionInfo regionInfo : regions) { + if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { + try { + Path tableDir = CommonFSUtils.getTableDir(rootDir, regionInfo.getTable()); + HRegionFileSystem.createRegionOnFileSystem(masterServices.getConfiguration(), + mfs.getFileSystem(), tableDir, regionInfo); + } catch (IOException e) { + LOG.warn("Failed to create region directory for {}: {}", + regionInfo.getRegionNameAsString(), e.getMessage(), e); + } + } + } + } + /** * Fix overlaps noted in CJ consistency report. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java index 7c67f0e3ee90..af482aeff281 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/FlushRegionProcedure.java @@ -149,7 +149,7 @@ public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, IOEx } @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { complete(env, null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollProcedure.java new file mode 100644 index 000000000000..a61b2c4afa55 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollProcedure.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LastHighestWalFilenum; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollProcedureState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollRemoteProcedureResult; + +/** + * The procedure to perform WAL rolling on all of RegionServers. + */ +@InterfaceAudience.Private +public class LogRollProcedure + extends StateMachineProcedure + implements GlobalProcedureInterface { + + private static final Logger LOG = LoggerFactory.getLogger(LogRollProcedure.class); + + public LogRollProcedure() { + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, LogRollProcedureState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.info("{} execute state={}", this, state); + + final ServerManager serverManager = env.getMasterServices().getServerManager(); + + try { + switch (state) { + case LOG_ROLL_ROLL_LOG_ON_RS: + // avoid potential new region server missing + serverManager.registerListener(new NewServerWALRoller(env)); + + final List subProcedures = + serverManager.getOnlineServersList().stream().map(LogRollRemoteProcedure::new).toList(); + addChildProcedure(subProcedures.toArray(new LogRollRemoteProcedure[0])); + setNextState(LogRollProcedureState.LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM); + return Flow.HAS_MORE_STATE; + case LOG_ROLL_COLLECT_RS_HIGHEST_WAL_FILENUM: + // get children procedure + List children = + env.getMasterServices().getMasterProcedureExecutor().getProcedures().stream() + .filter(p -> p instanceof LogRollRemoteProcedure) + .filter(p -> p.getParentProcId() == getProcId()).map(p -> (LogRollRemoteProcedure) p) + .toList(); + LastHighestWalFilenum.Builder builder = LastHighestWalFilenum.newBuilder(); + for (Procedure child : children) { + LogRollRemoteProcedureResult result = + LogRollRemoteProcedureResult.parseFrom(child.getResult()); + builder.putFileNum(ProtobufUtil.toServerName(result.getServerName()).toString(), + result.getLastHighestWalFilenum()); + } + setResult(builder.build().toByteArray()); + setNextState(LogRollProcedureState.LOG_ROLL_UNREGISTER_SERVER_LISTENER); + return Flow.HAS_MORE_STATE; + case LOG_ROLL_UNREGISTER_SERVER_LISTENER: + serverManager.unregisterListenerIf(l -> l instanceof NewServerWALRoller); + return Flow.NO_MORE_STATE; + } + } catch (Exception e) { + setFailure("log-roll", e); + } + return Flow.NO_MORE_STATE; + } + + @Override + public String getGlobalId() { + return getClass().getSimpleName(); + } + + private static final class NewServerWALRoller implements ServerListener { + + private final MasterProcedureEnv env; + + public NewServerWALRoller(MasterProcedureEnv env) { + this.env = env; + } + + @Override + public void serverAdded(ServerName server) { + env.getMasterServices().getMasterProcedureExecutor() + .submitProcedure(new LogRollRemoteProcedure(server)); + } + } + + @Override + protected void rollbackState(MasterProcedureEnv env, LogRollProcedureState state) { + // nothing to rollback + } + + @Override + protected LogRollProcedureState getState(int stateId) { + return LogRollProcedureState.forNumber(stateId); + } + + @Override + protected int getStateId(LogRollProcedureState state) { + return state.getNumber(); + } + + @Override + protected LogRollProcedureState getInitialState() { + return LogRollProcedureState.LOG_ROLL_ROLL_LOG_ON_RS; + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + return false; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.serializeStateData(serializer); + + if (getResult() != null && getResult().length > 0) { + serializer.serialize(LastHighestWalFilenum.parseFrom(getResult())); + } else { + serializer.serialize(LastHighestWalFilenum.getDefaultInstance()); + } + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + super.deserializeStateData(serializer); + + if (getResult() == null) { + LastHighestWalFilenum lastHighestWalFilenum = + serializer.deserialize(LastHighestWalFilenum.class); + if (lastHighestWalFilenum != null) { + if ( + lastHighestWalFilenum.getFileNumMap().isEmpty() + && getCurrentState() == LogRollProcedureState.LOG_ROLL_UNREGISTER_SERVER_LISTENER + ) { + LOG.warn("pid = {}, current state is the last state, but rsHighestWalFilenumMap is " + + "empty, this should not happen. Are all region servers down ?", getProcId()); + } else { + setResult(lastHighestWalFilenum.toByteArray()); + } + } + } + } + + @Override + protected void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollRemoteProcedure.java new file mode 100644 index 000000000000..df8e02ed6010 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/LogRollRemoteProcedure.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.Optional; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.ServerOperation; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; +import org.apache.hadoop.hbase.regionserver.LogRollCallable; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollRemoteProcedureStateData; + +/** + * The remote procedure to perform WAL rolling on the specific RegionServer without retrying. + */ +@InterfaceAudience.Private +public class LogRollRemoteProcedure extends ServerRemoteProcedure + implements ServerProcedureInterface { + + public LogRollRemoteProcedure() { + } + + public LogRollRemoteProcedure(ServerName targetServer) { + this.targetServer = targetServer; + } + + @Override + protected void rollback(MasterProcedureEnv env) { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + return false; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + serializer.serialize(LogRollRemoteProcedureStateData.newBuilder() + .setTargetServer(ProtobufUtil.toServerName(targetServer)).build()); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + LogRollRemoteProcedureStateData data = + serializer.deserialize(LogRollRemoteProcedureStateData.class); + this.targetServer = ProtobufUtil.toServerName(data.getTargetServer()); + } + + @Override + public Optional remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { + return Optional.of(new ServerOperation(this, getProcId(), LogRollCallable.class, + LogRollRemoteProcedureStateData.getDefaultInstance().toByteArray(), + env.getMasterServices().getMasterActiveTime())); + } + + @Override + public ServerName getServerName() { + return targetServer; + } + + @Override + public boolean hasMetaTableRegion() { + return false; + } + + @Override + public ServerOperationType getServerOperationType() { + return ServerOperationType.LOG_ROLL; + } + + @Override + protected boolean complete(MasterProcedureEnv env, Throwable error) { + // do not retry. just returns. + if (error != null) { + LOG.warn("Failed to roll wal for {}", targetServer, error); + return false; + } else { + return true; + } + } + + @Override + public synchronized void remoteOperationCompleted(MasterProcedureEnv env, + byte[] remoteResultData) { + setResult(remoteResultData); + super.remoteOperationCompleted(env, remoteResultData); + } + + @Override + protected void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()).append(" targetServer=").append(targetServer); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java new file mode 100644 index 000000000000..3a204d42a2c8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreBackupSystemTableProcedure.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreBackupSystemTableState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; + +@InterfaceAudience.Private +public class RestoreBackupSystemTableProcedure + extends AbstractStateMachineTableProcedure { + private static final Logger LOG = + LoggerFactory.getLogger(RestoreBackupSystemTableProcedure.class); + + private final SnapshotDescription snapshot; + private boolean enableOnRollback = false; + + // Necessary for the procedure framework. Do not remove. + public RestoreBackupSystemTableProcedure() { + this(null); + } + + public RestoreBackupSystemTableProcedure(SnapshotDescription snapshot) { + this.snapshot = snapshot; + } + + @Override + public TableName getTableName() { + return TableName.valueOf(snapshot.getTable()); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.RESTORE_BACKUP_SYSTEM_TABLE; + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, RestoreBackupSystemTableState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + LOG.info("{} execute state={}", this, state); + + try { + switch (state) { + case RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: + prepare(env); + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_DISABLE); + case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE: + TableState tableState = + env.getMasterServices().getTableStateManager().getTableState(getTableName()); + if (tableState.isEnabled()) { + addChildProcedure(createDisableTableProcedure(env)); + } + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_RESTORE); + case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE: + addChildProcedure(createRestoreSnapshotProcedure(env)); + return moreState(RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_ENABLE); + case RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: + addChildProcedure(createEnableTableProcedure(env)); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (Exception e) { + setFailure("restore-backup-system-table", e); + LOG.warn("unexpected exception while execute {}. Mark procedure Failed.", this, e); + return Flow.NO_MORE_STATE; + } + } + + @Override + protected void rollbackState(MasterProcedureEnv env, RestoreBackupSystemTableState state) + throws IOException, InterruptedException { + switch (state) { + case RESTORE_BACKUP_SYSTEM_TABLE_DISABLE: + case RESTORE_BACKUP_SYSTEM_TABLE_PREPARE: + return; + case RESTORE_BACKUP_SYSTEM_TABLE_RESTORE: + case RESTORE_BACKUP_SYSTEM_TABLE_ENABLE: + if (enableOnRollback) { + addChildProcedure(createEnableTableProcedure(env)); + } + return; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } + + @Override + protected RestoreBackupSystemTableState getState(int stateId) { + return RestoreBackupSystemTableState.forNumber(stateId); + } + + @Override + protected int getStateId(RestoreBackupSystemTableState state) { + return state.getNumber(); + } + + @Override + protected RestoreBackupSystemTableState getInitialState() { + return RestoreBackupSystemTableState.RESTORE_BACKUP_SYSTEM_TABLE_PREPARE; + } + + private Flow moreState(RestoreBackupSystemTableState next) { + setNextState(next); + return Flow.HAS_MORE_STATE; + } + + private Procedure[] createDisableTableProcedure(MasterProcedureEnv env) + throws HBaseIOException { + DisableTableProcedure disableTableProcedure = + new DisableTableProcedure(env, getTableName(), true); + return new DisableTableProcedure[] { disableTableProcedure }; + } + + private Procedure[] createEnableTableProcedure(MasterProcedureEnv env) { + EnableTableProcedure enableTableProcedure = new EnableTableProcedure(env, getTableName()); + return new EnableTableProcedure[] { enableTableProcedure }; + } + + private Procedure[] createRestoreSnapshotProcedure(MasterProcedureEnv env) + throws IOException { + TableDescriptor desc = env.getMasterServices().getTableDescriptors().get(getTableName()); + RestoreSnapshotProcedure restoreSnapshotProcedure = + new RestoreSnapshotProcedure(env, desc, snapshot); + return new RestoreSnapshotProcedure[] { restoreSnapshotProcedure }; + } + + private void prepare(MasterProcedureEnv env) throws IOException { + List snapshots = + env.getMasterServices().getSnapshotManager().getCompletedSnapshots(); + boolean exists = snapshots.stream().anyMatch(s -> s.getName().equals(snapshot.getName())); + if (!exists) { + throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(snapshot)); + } + + TableState tableState = + env.getMasterServices().getTableStateManager().getTableState(getTableName()); + if (tableState.isEnabled()) { + enableOnRollback = true; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java index e73b23a3f965..b7ff6db67dbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java @@ -62,6 +62,11 @@ public enum ServerOperationType { * Re-read the hbase:quotas table and update {@link QuotaCache}. */ RELOAD_QUOTAS, + + /** + * send roll log request to region server and handle the response + */ + LOG_ROLL } /** Returns Name of this server instance. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java index 57912f419039..55920bd47b38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerQueue.java @@ -44,6 +44,7 @@ public boolean requireExclusiveLock(Procedure proc) { case CLAIM_REPLICATION_QUEUE_REMOTE: case VERIFY_SNAPSHOT: case RELOAD_QUOTAS: + case LOG_ROLL: return false; default: break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java index 0c89b6396417..563961d765e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerRemoteProcedure.java @@ -123,7 +123,8 @@ public synchronized void remoteCallFailed(MasterProcedureEnv env, ServerName ser } @Override - public synchronized void remoteOperationCompleted(MasterProcedureEnv env) { + public synchronized void remoteOperationCompleted(MasterProcedureEnv env, + byte[] remoteResultData) { state = MasterProcedureProtos.ServerRemoteProcedureState.SERVER_REMOTE_PROCEDURE_REPORT_SUCCEED; remoteOperationDone(env, null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java index 05621767e7f8..f4df40b168f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotRegionProcedure.java @@ -108,7 +108,7 @@ public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, IOEx } @Override - public void remoteOperationCompleted(MasterProcedureEnv env) { + public void remoteOperationCompleted(MasterProcedureEnv env, byte[] remoteResultData) { complete(env, null); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java index a3e126484c34..34a12ed52b1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotVerifyProcedure.java @@ -109,8 +109,7 @@ protected synchronized boolean complete(MasterProcedureEnv env, Throwable error) setFailure("verify-snapshot", e); } finally { // release the worker - env.getMasterServices().getSnapshotManager().releaseSnapshotVerifyWorker(this, targetServer, - env.getProcedureScheduler()); + env.getMasterServices().getSnapshotManager().releaseSnapshotVerifyWorker(this, targetServer); } return isProcedureCompleted; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java index 699834f9c1d7..98c2c0ec6930 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.java @@ -90,7 +90,7 @@ protected Flow executeFromState(MasterProcedureEnv env, MasterProcedureProtos.Sp skipPersistence(); throw new ProcedureSuspendedException(); } - splitWALManager.releaseSplitWALWorker(worker, env.getProcedureScheduler()); + splitWALManager.releaseSplitWALWorker(worker); if (!finished) { LOG.warn("Failed to split wal {} by server {}, retry...", walPath, worker); setNextState(MasterProcedureProtos.SplitWALState.ACQUIRE_SPLIT_WAL_WORKER); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index 00b9776366d5..c5c7ec602eab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -50,7 +50,8 @@ public enum TableOperationType { REGION_UNASSIGN, REGION_GC, MERGED_REGIONS_GC/* region operations */, - REGION_TRUNCATE + REGION_TRUNCATE, + RESTORE_BACKUP_SYSTEM_TABLE } /** Returns the name of the table the procedure is operating on */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java index be66a28d275e..7be4c4b1810e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java @@ -54,6 +54,7 @@ static boolean requireTableExclusiveLock(TableProcedureInterface proc) { case DISABLE: case SNAPSHOT: case ENABLE: + case RESTORE_BACKUP_SYSTEM_TABLE: return true; case EDIT: // we allow concurrent edit on the ns family in meta table diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java index 2a3732c99984..ef11e68217a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java @@ -109,6 +109,7 @@ assert getRegion().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID || isFailed() setNextState(TruncateRegionState.TRUNCATE_REGION_MAKE_ONLINE); break; case TRUNCATE_REGION_MAKE_ONLINE: + createRegionOnFileSystem(env); addChildProcedure(createAssignProcedures(env)); setNextState(TruncateRegionState.TRUNCATE_REGION_POST_OPERATION); break; @@ -130,6 +131,20 @@ assert getRegion().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID || isFailed() return Flow.HAS_MORE_STATE; } + private void createRegionOnFileSystem(final MasterProcedureEnv env) throws IOException { + RegionStateNode regionNode = + env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion()); + regionNode.lock(); + try { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName()); + HRegionFileSystem.createRegionOnFileSystem(env.getMasterConfiguration(), mfs.getFileSystem(), + tableDir, getRegion()); + } finally { + regionNode.unlock(); + } + } + private void deleteRegionFromFileSystem(final MasterProcedureEnv env) throws IOException { RegionStateNode regionNode = env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index ac9e654fcb34..c86af2bda5e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -26,7 +26,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; @@ -66,7 +65,6 @@ import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner; import org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.RestoreSnapshotProcedure; import org.apache.hadoop.hbase.master.procedure.SnapshotProcedure; @@ -1474,20 +1472,14 @@ public boolean snapshotProcedureEnabled() { public ServerName acquireSnapshotVerifyWorker(SnapshotVerifyProcedure procedure) throws ProcedureSuspendedException { - Optional worker = verifyWorkerAssigner.acquire(); - if (worker.isPresent()) { - LOG.debug("{} Acquired verify snapshot worker={}", procedure, worker.get()); - return worker.get(); - } - verifyWorkerAssigner.suspend(procedure); - throw new ProcedureSuspendedException(); + ServerName worker = verifyWorkerAssigner.acquire(procedure); + LOG.debug("{} Acquired verify snapshot worker={}", procedure, worker); + return worker; } - public void releaseSnapshotVerifyWorker(SnapshotVerifyProcedure procedure, ServerName worker, - MasterProcedureScheduler scheduler) { + public void releaseSnapshotVerifyWorker(SnapshotVerifyProcedure procedure, ServerName worker) { LOG.debug("{} Release verify snapshot worker={}", procedure, worker); verifyWorkerAssigner.release(worker); - verifyWorkerAssigner.wake(scheduler); } private void restoreWorkers() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java index 5efea69788ca..e8332cd0a014 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java @@ -144,6 +144,12 @@ public final class MobConstants { public static final String MOB_COMPACTION_THREADS_MAX = "hbase.mob.compaction.threads.max"; public static final int DEFAULT_MOB_COMPACTION_THREADS_MAX = 1; + public static final String MOB_CLEANER_THREAD_COUNT = "hbase.master.mob.cleaner.threads"; + public static final int DEFAULT_MOB_CLEANER_THREAD_COUNT = 1; + public static final String MOB_FILE_CLEANER_CHORE_TIME_OUT = + "hbase.master.mob.cleaner.chore.timeout"; + public static final int DEFAULT_MOB_FILE_CLEANER_CHORE_TIME_OUT = 5 * 60; // 5 minutes + private MobConstants() { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java index 3144b71f11e9..3f9413bfe4d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCleanerChore.java @@ -17,30 +17,49 @@ */ package org.apache.hadoop.hbase.mob; +import static org.apache.hadoop.hbase.mob.MobConstants.DEFAULT_MOB_FILE_CLEANER_CHORE_TIME_OUT; +import static org.apache.hadoop.hbase.mob.MobConstants.MOB_FILE_CLEANER_CHORE_TIME_OUT; + +import com.google.errorprone.annotations.RestrictedApi; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.master.HMaster; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * The class MobFileCleanerChore for running cleaner regularly to remove the expired and obsolete * (files which have no active references to) mob files. */ @InterfaceAudience.Private -public class MobFileCleanerChore extends ScheduledChore { +public class MobFileCleanerChore extends ScheduledChore implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(MobFileCleanerChore.class); + private final HMaster master; - private ExpiredMobFileCleaner cleaner; + private final ExpiredMobFileCleaner cleaner; + private final ThreadPoolExecutor executor; + private final int cleanerFutureTimeout; + private int threadCount; public MobFileCleanerChore(HMaster master) { super(master.getServerName() + "-MobFileCleanerChore", master, @@ -52,7 +71,21 @@ public MobFileCleanerChore(HMaster master) { this.master = master; cleaner = new ExpiredMobFileCleaner(); cleaner.setConf(master.getConfiguration()); + threadCount = master.getConfiguration().getInt(MobConstants.MOB_CLEANER_THREAD_COUNT, + MobConstants.DEFAULT_MOB_CLEANER_THREAD_COUNT); + if (threadCount < 1) { + threadCount = 1; + } + + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("mobfile-cleaner-pool-%d").build(); + + executor = new ThreadPoolExecutor(threadCount, threadCount, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), threadFactory); + checkObsoleteConfigurations(); + cleanerFutureTimeout = master.getConfiguration().getInt(MOB_FILE_CLEANER_CHORE_TIME_OUT, + DEFAULT_MOB_FILE_CLEANER_CHORE_TIME_OUT); } private void checkObsoleteConfigurations() { @@ -83,29 +116,93 @@ protected void chore() { LOG.error("MobFileCleanerChore failed", e); return; } + List> futureList = new ArrayList<>(map.size()); for (TableDescriptor htd : map.values()) { - for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { - if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) { - try { - cleaner.cleanExpiredMobFiles(htd, hcd); - } catch (IOException e) { - LOG.error("Failed to clean the expired mob files table={} family={}", - htd.getTableName().getNameAsString(), hcd.getNameAsString(), e); - } - } - } + Future future = executor.submit(() -> handleOneTable(htd)); + futureList.add(future); + } + + for (Future future : futureList) { try { - // Now clean obsolete files for a table - LOG.info("Cleaning obsolete MOB files from table={}", htd.getTableName()); - try (final Admin admin = master.getConnection().getAdmin()) { - MobFileCleanupUtil.cleanupObsoleteMobFiles(master.getConfiguration(), htd.getTableName(), - admin); + future.get(cleanerFutureTimeout, TimeUnit.SECONDS); + } catch (InterruptedException e) { + LOG.warn("MobFileCleanerChore interrupted while waiting for futures", e); + Thread.currentThread().interrupt(); + cancelAllFutures(futureList); + break; + } catch (ExecutionException e) { + LOG.error("Exception during execution of MobFileCleanerChore task", e); + } catch (TimeoutException e) { + LOG.error("MobFileCleanerChore timed out waiting for a task to complete", e); + } + } + } + + private void cancelAllFutures(List> futureList) { + long pendingTaskCounter = 0; + for (Future f : futureList) { + if (!f.isDone()) { + f.cancel(true); // interrupt running tasks + pendingTaskCounter++; + } + } + LOG.info("Cancelled {} pending mob file cleaner tasks", pendingTaskCounter); + } + + private void handleOneTable(TableDescriptor htd) { + for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { + if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) { + try { + cleaner.cleanExpiredMobFiles(htd, hcd); + } catch (IOException e) { + LOG.error("Failed to clean the expired mob files table={} family={}", + htd.getTableName().getNameAsString(), hcd.getNameAsString(), e); } - LOG.info("Cleaning obsolete MOB files finished for table={}", htd.getTableName()); - } catch (IOException e) { - LOG.error("Failed to clean the obsolete mob files for table={}", htd.getTableName(), e); } } + try { + // Now clean obsolete files for a table + LOG.info("Cleaning obsolete MOB files from table={}", htd.getTableName()); + try (final Admin admin = master.getConnection().getAdmin()) { + MobFileCleanupUtil.cleanupObsoleteMobFiles(master.getConfiguration(), htd.getTableName(), + admin); + } + LOG.info("Cleaning obsolete MOB files finished for table={}", htd.getTableName()); + } catch (IOException e) { + LOG.error("Failed to clean the obsolete mob files for table={}", htd.getTableName(), e); + } + } + + @Override + public void onConfigurationChange(Configuration conf) { + int newThreadCount = conf.getInt(MobConstants.MOB_CLEANER_THREAD_COUNT, + MobConstants.DEFAULT_MOB_CLEANER_THREAD_COUNT); + if (newThreadCount < 1) { + return; // invalid value , skip the config change + } + + if (newThreadCount != threadCount) { + resizeThreadPool(newThreadCount, newThreadCount); + threadCount = newThreadCount; + } } + private void resizeThreadPool(int newCoreSize, int newMaxSize) { + int currentCoreSize = executor.getCorePoolSize(); + if (newCoreSize > currentCoreSize) { + // Increasing the pool size: Set max first, then core + executor.setMaximumPoolSize(newMaxSize); + executor.setCorePoolSize(newCoreSize); + } else { + // Decreasing the pool size: Set core first, then max + executor.setCorePoolSize(newCoreSize); + executor.setMaximumPoolSize(newMaxSize); + } + } + + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public ThreadPoolExecutor getExecutor() { + return executor; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java index 8c9ec24e8662..e14761ab6e18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadLocalServerSideScanMetrics.java @@ -18,10 +18,12 @@ package org.apache.hadoop.hbase.monitoring; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** * Thread-local storage for server-side scan metrics that captures performance data separately for @@ -61,7 +63,8 @@ * @see RegionScanner * @see org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) +@InterfaceStability.Evolving public final class ThreadLocalServerSideScanMetrics { private ThreadLocalServerSideScanMetrics() { } @@ -81,6 +84,9 @@ private ThreadLocalServerSideScanMetrics() { private static final ThreadLocal BLOCK_READ_OPS_COUNT = ThreadLocal.withInitial(() -> new AtomicLong(0)); + private static final ThreadLocal FS_READ_TIME = + ThreadLocal.withInitial(() -> new AtomicLong(0)); + public static void setScanMetricsEnabled(boolean enable) { IS_SCAN_METRICS_ENABLED.set(enable); } @@ -101,6 +107,10 @@ public static long addBlockReadOpsCount(long count) { return BLOCK_READ_OPS_COUNT.get().addAndGet(count); } + public static long addFsReadTime(long time) { + return FS_READ_TIME.get().addAndGet(time); + } + public static boolean isScanMetricsEnabled() { return IS_SCAN_METRICS_ENABLED.get(); } @@ -121,6 +131,10 @@ public static AtomicLong getBlockReadOpsCountCounter() { return BLOCK_READ_OPS_COUNT.get(); } + public static AtomicLong getFsReadTimeCounter() { + return FS_READ_TIME.get(); + } + public static long getBytesReadFromFsAndReset() { return getBytesReadFromFsCounter().getAndSet(0); } @@ -137,11 +151,16 @@ public static long getBlockReadOpsCountAndReset() { return getBlockReadOpsCountCounter().getAndSet(0); } + public static long getFsReadTimeAndReset() { + return getFsReadTimeCounter().getAndSet(0); + } + public static void reset() { getBytesReadFromFsAndReset(); getBytesReadFromBlockCacheAndReset(); getBytesReadFromMemstoreAndReset(); getBlockReadOpsCountAndReset(); + getFsReadTimeAndReset(); } public static void populateServerSideScanMetrics(ServerSideScanMetrics metrics) { @@ -156,5 +175,7 @@ public static void populateServerSideScanMetrics(ServerSideScanMetrics metrics) getBytesReadFromMemstoreCounter().get()); metrics.addToCounter(ServerSideScanMetrics.BLOCK_READ_OPS_COUNT_METRIC_NAME, getBlockReadOpsCountCounter().get()); + metrics.addToCounter(ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME, + getFsReadTimeCounter().get()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java index 68aac1ef6e2d..7ea98d00cc7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/BaseRSProcedureCallable.java @@ -28,12 +28,11 @@ public abstract class BaseRSProcedureCallable implements RSProcedureCallable { private Exception initError; @Override - public final Void call() throws Exception { + public final byte[] call() throws Exception { if (initError != null) { throw initError; } - doCall(); - return null; + return doCall(); } @Override @@ -46,7 +45,7 @@ public final void init(byte[] parameter, HRegionServer rs) { } } - protected abstract void doCall() throws Exception; + protected abstract byte[] doCall() throws Exception; protected abstract void initParameter(byte[] parameter) throws Exception; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java index 635d2b6f87a5..7ed9ff7664b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java @@ -26,7 +26,7 @@ * A general interface for a sub procedure runs at RS side. */ @InterfaceAudience.Private -public interface RSProcedureCallable extends Callable { +public interface RSProcedureCallable extends Callable { /** * Initialize the callable diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FeedbackAdaptiveRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FeedbackAdaptiveRateLimiter.java new file mode 100644 index 000000000000..6acfd07328e8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FeedbackAdaptiveRateLimiter.java @@ -0,0 +1,361 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicDouble; + +/** + * An adaptive rate limiter that dynamically adjusts its behavior based on observed usage patterns + * to achieve stable, full utilization of configured quota allowances while managing client + * contention. + *

+ * Core Algorithm: This rate limiter divides time into fixed refill intervals (configurable + * via {@code hbase.quota.rate.limiter.refill.interval.ms}, default is 1 refill per TimeUnit of the + * RateLimiter). At the beginning of each interval, a fresh allocation of resources becomes + * available based on the configured limit. Clients consume resources as they make requests. When + * resources are exhausted, clients must wait until the next refill, or until enough resources + * become available. + *

+ * Adaptive Backpressure: When multiple threads compete for limited resources (contention), + * this limiter detects the contention and applies increasing backpressure by extending wait + * intervals. This prevents thundering herd behavior where many threads wake simultaneously and + * compete for the same resources. The backoff multiplier increases by a small increment (see + * {@link #FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_INCREMENT}) per interval when contention occurs, and + * decreases (see {@link #FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_DECREMENT}) when no contention is + * detected, converging toward optimal throughput. The multiplier is capped at a maximum value (see + * {@link #FEEDBACK_ADAPTIVE_MAX_BACKOFF_MULTIPLIER}) to prevent unbounded waits. + *

+ * Contention is detected when {@link #getWaitInterval} is called with insufficient available + * resources (i.e., {@code amount > available}), indicating a thread needs to wait for resources. If + * this occurs more than once in a refill interval, the limiter identifies it as contention + * requiring increased backpressure. + *

+ * Oversubscription for Full Utilization: In practice, synchronization overhead and timing + * variations often prevent clients from consuming exactly their full allowance, resulting in + * consistent under-utilization. This limiter addresses this by tracking utilization via an + * exponentially weighted moving average (EWMA). When average utilization falls below the target + * range (determined by {@link #FEEDBACK_ADAPTIVE_UTILIZATION_ERROR_BUDGET}), the limiter gradually + * increases the oversubscription proportion (see + * {@link #FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_INCREMENT}), allowing more resources per interval than + * the base limit. Conversely, when utilization exceeds the target range, oversubscription is + * decreased (see {@link #FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_DECREMENT}). Oversubscription is capped + * (see {@link #FEEDBACK_ADAPTIVE_MAX_OVERSUBSCRIPTION}) to prevent excessive bursts while still + * enabling consistent full utilization. + *

+ * Example Scenario: Consider a quota of 1000 requests per second with a 1-second refill + * interval. Without oversubscription, clients might typically achieve only 950 req/s due to + * coordination delays. This limiter would detect the under-utilization, gradually increase + * oversubscription, allowing slightly more resources per interval, which compensates for + * inefficiencies and achieves stable throughput closer to the configured quota. If multiple threads + * simultaneously try to consume resources and repeatedly wait, the backoff multiplier increases + * their wait times, spreading out their retry attempts and reducing wasted CPU cycles. + *

+ * Configuration Parameters: + *

    + *
  • {@link #FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_INCREMENT}: Controls rate of backpressure + * increase
  • + *
  • {@link #FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_DECREMENT}: Controls rate of backpressure + * decrease
  • + *
  • {@link #FEEDBACK_ADAPTIVE_MAX_BACKOFF_MULTIPLIER}: Caps the maximum wait time extension
  • + *
  • {@link #FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_INCREMENT}: Controls rate of oversubscription + * increase
  • + *
  • {@link #FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_DECREMENT}: Controls rate of oversubscription + * decrease
  • + *
  • {@link #FEEDBACK_ADAPTIVE_MAX_OVERSUBSCRIPTION}: Caps the maximum burst capacity
  • + *
  • {@link #FEEDBACK_ADAPTIVE_UTILIZATION_ERROR_BUDGET}: Defines the acceptable range around full + * utilization
  • + *
+ *

+ * This algorithm converges toward stable operation where: (1) wait intervals are just long enough + * to prevent excessive contention, and (2) oversubscription is just high enough to achieve + * consistent full utilization of the configured allowance. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class FeedbackAdaptiveRateLimiter extends RateLimiter { + + /** + * Amount to increase the backoff multiplier when contention is detected per refill interval. In + * other words, if we are throttling more than once per refill interval, then we will increase our + * wait intervals (increase backpressure, decrease throughput). + */ + public static final String FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_INCREMENT = + "hbase.quota.rate.limiter.feedback.adaptive.backoff.multiplier.increment"; + public static final double DEFAULT_BACKOFF_MULTIPLIER_INCREMENT = 0.0005; + + /** + * Amount to decrease the backoff multiplier when no contention is detected per refill interval. + * In other words, if we are only throttling once per refill interval, then we will decrease our + * wait interval (decrease backpressure, increase throughput). + */ + public static final String FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_DECREMENT = + "hbase.quota.rate.limiter.feedback.adaptive.backoff.multiplier.decrement"; + public static final double DEFAULT_BACKOFF_MULTIPLIER_DECREMENT = 0.0001; + + /** + * Maximum ceiling for the backoff multiplier to avoid unbounded waits. + */ + public static final String FEEDBACK_ADAPTIVE_MAX_BACKOFF_MULTIPLIER = + "hbase.quota.rate.limiter.feedback.adaptive.max.backoff.multiplier"; + public static final double DEFAULT_MAX_BACKOFF_MULTIPLIER = 10.0; + + /** + * Amount to increase the oversubscription proportion when utilization is below (1.0-errorBudget). + */ + public static final String FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_INCREMENT = + "hbase.quota.rate.limiter.feedback.adaptive.oversubscription.increment"; + public static final double DEFAULT_OVERSUBSCRIPTION_INCREMENT = 0.001; + + /** + * Amount to decrease the oversubscription proportion when utilization exceeds (1.0+errorBudget). + */ + public static final String FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_DECREMENT = + "hbase.quota.rate.limiter.feedback.adaptive.oversubscription.decrement"; + public static final double DEFAULT_OVERSUBSCRIPTION_DECREMENT = 0.00005; + + /** + * Maximum ceiling for oversubscription to prevent unbounded bursts. Some oversubscription can be + * nice, because it allows you to balance the inefficiency and latency of retries, landing on + * stable usage at approximately your configured allowance. Without adequate oversubscription, + * your steady state may often seem significantly, and suspiciously, lower than your configured + * allowance. + */ + public static final String FEEDBACK_ADAPTIVE_MAX_OVERSUBSCRIPTION = + "hbase.quota.rate.limiter.feedback.adaptive.max.oversubscription"; + public static final double DEFAULT_MAX_OVERSUBSCRIPTION = 0.25; + + /** + * Acceptable deviation around full utilization (1.0) for adjusting oversubscription. If stable + * throttle usage is typically under (1.0-errorBudget), then we will allow more oversubscription. + * If stable throttle usage is typically over (1.0+errorBudget), then we will pull back + * oversubscription. + */ + public static final String FEEDBACK_ADAPTIVE_UTILIZATION_ERROR_BUDGET = + "hbase.quota.rate.limiter.feedback.adaptive.utilization.error.budget"; + public static final double DEFAULT_UTILIZATION_ERROR_BUDGET = 0.025; + + private static final int WINDOW_TIME_MS = 60_000; + + public static class FeedbackAdaptiveRateLimiterFactory { + + private final long refillInterval; + private final double backoffMultiplierIncrement; + private final double backoffMultiplierDecrement; + private final double maxBackoffMultiplier; + private final double oversubscriptionIncrement; + private final double oversubscriptionDecrement; + private final double maxOversubscription; + private final double utilizationErrorBudget; + + public FeedbackAdaptiveRateLimiterFactory(Configuration conf) { + refillInterval = conf.getLong(FixedIntervalRateLimiter.RATE_LIMITER_REFILL_INTERVAL_MS, + RateLimiter.DEFAULT_TIME_UNIT); + + maxBackoffMultiplier = + conf.getDouble(FEEDBACK_ADAPTIVE_MAX_BACKOFF_MULTIPLIER, DEFAULT_MAX_BACKOFF_MULTIPLIER); + + backoffMultiplierIncrement = conf.getDouble(FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_INCREMENT, + DEFAULT_BACKOFF_MULTIPLIER_INCREMENT); + backoffMultiplierDecrement = conf.getDouble(FEEDBACK_ADAPTIVE_BACKOFF_MULTIPLIER_DECREMENT, + DEFAULT_BACKOFF_MULTIPLIER_DECREMENT); + + oversubscriptionIncrement = conf.getDouble(FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_INCREMENT, + DEFAULT_OVERSUBSCRIPTION_INCREMENT); + oversubscriptionDecrement = conf.getDouble(FEEDBACK_ADAPTIVE_OVERSUBSCRIPTION_DECREMENT, + DEFAULT_OVERSUBSCRIPTION_DECREMENT); + + maxOversubscription = + conf.getDouble(FEEDBACK_ADAPTIVE_MAX_OVERSUBSCRIPTION, DEFAULT_MAX_OVERSUBSCRIPTION); + utilizationErrorBudget = conf.getDouble(FEEDBACK_ADAPTIVE_UTILIZATION_ERROR_BUDGET, + DEFAULT_UTILIZATION_ERROR_BUDGET); + } + + public FeedbackAdaptiveRateLimiter create() { + return new FeedbackAdaptiveRateLimiter(refillInterval, backoffMultiplierIncrement, + backoffMultiplierDecrement, maxBackoffMultiplier, oversubscriptionIncrement, + oversubscriptionDecrement, maxOversubscription, utilizationErrorBudget); + } + } + + private volatile long nextRefillTime = -1L; + private final long refillInterval; + private final double backoffMultiplierIncrement; + private final double backoffMultiplierDecrement; + private final double maxBackoffMultiplier; + private final double oversubscriptionIncrement; + private final double oversubscriptionDecrement; + private final double maxOversubscription; + private final double minTargetUtilization; + private final double maxTargetUtilization; + + // Adaptive backoff state + private final AtomicDouble currentBackoffMultiplier = new AtomicDouble(1.0); + private volatile boolean hadContentionThisInterval = false; + + // Over-subscription proportion state + private final AtomicDouble oversubscriptionProportion = new AtomicDouble(0.0); + + // EWMA tracking + private final double emaAlpha; + private volatile double utilizationEma = 0.0; + private final AtomicLong lastIntervalConsumed; + + FeedbackAdaptiveRateLimiter(long refillInterval, double backoffMultiplierIncrement, + double backoffMultiplierDecrement, double maxBackoffMultiplier, + double oversubscriptionIncrement, double oversubscriptionDecrement, double maxOversubscription, + double utilizationErrorBudget) { + super(); + Preconditions.checkArgument(getTimeUnitInMillis() >= refillInterval, String.format( + "Refill interval %s must be ≤ TimeUnit millis %s", refillInterval, getTimeUnitInMillis())); + + Preconditions.checkArgument(backoffMultiplierIncrement > 0.0, + String.format("Backoff multiplier increment %s must be > 0.0", backoffMultiplierIncrement)); + Preconditions.checkArgument(backoffMultiplierDecrement > 0.0, + String.format("Backoff multiplier decrement %s must be > 0.0", backoffMultiplierDecrement)); + Preconditions.checkArgument(maxBackoffMultiplier > 1.0, + String.format("Max backoff multiplier %s must be > 1.0", maxBackoffMultiplier)); + Preconditions.checkArgument(utilizationErrorBudget > 0.0 && utilizationErrorBudget <= 1.0, + String.format("Utilization error budget %s must be between 0.0 and 1.0", + utilizationErrorBudget)); + + this.refillInterval = refillInterval; + this.backoffMultiplierIncrement = backoffMultiplierIncrement; + this.backoffMultiplierDecrement = backoffMultiplierDecrement; + this.maxBackoffMultiplier = maxBackoffMultiplier; + this.oversubscriptionIncrement = oversubscriptionIncrement; + this.oversubscriptionDecrement = oversubscriptionDecrement; + this.maxOversubscription = maxOversubscription; + this.minTargetUtilization = 1.0 - utilizationErrorBudget; + this.maxTargetUtilization = 1.0 + utilizationErrorBudget; + + this.emaAlpha = refillInterval / (double) (WINDOW_TIME_MS + refillInterval); + this.lastIntervalConsumed = new AtomicLong(0); + } + + @Override + public long refill(long limit) { + final long now = EnvironmentEdgeManager.currentTime(); + if (nextRefillTime == -1) { + nextRefillTime = now + refillInterval; + hadContentionThisInterval = false; + return getOversubscribedLimit(limit); + } + if (now < nextRefillTime) { + return 0; + } + long diff = refillInterval + now - nextRefillTime; + long refills = diff / refillInterval; + nextRefillTime = now + refillInterval; + + long intendedUsage = getRefillIntervalAdjustedLimit(limit); + if (intendedUsage > 0) { + long consumed = lastIntervalConsumed.get(); + if (consumed > 0) { + double util = (double) consumed / intendedUsage; + utilizationEma = emaAlpha * util + (1.0 - emaAlpha) * utilizationEma; + } + } + + if (hadContentionThisInterval) { + currentBackoffMultiplier.set(Math + .min(currentBackoffMultiplier.get() + backoffMultiplierIncrement, maxBackoffMultiplier)); + } else { + currentBackoffMultiplier + .set(Math.max(currentBackoffMultiplier.get() - backoffMultiplierDecrement, 1.0)); + } + + double avgUtil = utilizationEma; + if (avgUtil < minTargetUtilization) { + oversubscriptionProportion.set(Math + .min(oversubscriptionProportion.get() + oversubscriptionIncrement, maxOversubscription)); + } else if (avgUtil >= maxTargetUtilization) { + oversubscriptionProportion + .set(Math.max(oversubscriptionProportion.get() - oversubscriptionDecrement, 0.0)); + } + + hadContentionThisInterval = false; + lastIntervalConsumed.set(0); + + long refillAmount = refills * getRefillIntervalAdjustedLimit(limit); + long maxRefill = getOversubscribedLimit(limit); + return Math.min(maxRefill, refillAmount); + } + + private long getOversubscribedLimit(long limit) { + return limit + (long) (limit * oversubscriptionProportion.get()); + } + + @Override + public void consume(long amount) { + super.consume(amount); + lastIntervalConsumed.addAndGet(amount); + } + + @Override + public long getWaitInterval(long limit, long available, long amount) { + limit = getRefillIntervalAdjustedLimit(limit); + if (nextRefillTime == -1) { + return 0; + } + + final long now = EnvironmentEdgeManager.currentTime(); + final long refillTime = nextRefillTime; + long diff = amount - available; + if (diff > 0) { + hadContentionThisInterval = true; + } + + long nextInterval = refillTime - now; + if (diff <= limit) { + return applyBackoffMultiplier(nextInterval); + } + + long extra = diff / limit; + if (diff % limit == 0) { + extra--; + } + long baseWait = nextInterval + (extra * refillInterval); + return applyBackoffMultiplier(baseWait); + } + + private long getRefillIntervalAdjustedLimit(long limit) { + return (long) Math.ceil(refillInterval / (double) getTimeUnitInMillis() * limit); + } + + private long applyBackoffMultiplier(long baseWaitInterval) { + return (long) (baseWaitInterval * currentBackoffMultiplier.get()); + } + + // strictly for testing + @Override + public void setNextRefillTime(long nextRefillTime) { + this.nextRefillTime = nextRefillTime; + } + + @Override + public long getNextRefillTime() { + return this.nextRefillTime; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java index c5b2fc7f5d83..a71b5d4b2fba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java @@ -20,8 +20,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * With this limiter resources will be refilled only after a fixed interval of time. @@ -43,6 +43,8 @@ public class FixedIntervalRateLimiter extends RateLimiter { public static final String RATE_LIMITER_REFILL_INTERVAL_MS = "hbase.quota.rate.limiter.refill.interval.ms"; + private static final Logger LOG = LoggerFactory.getLogger(FixedIntervalRateLimiter.class); + private long nextRefillTime = -1L; private final long refillInterval; @@ -52,10 +54,14 @@ public FixedIntervalRateLimiter() { public FixedIntervalRateLimiter(long refillInterval) { super(); - Preconditions.checkArgument(getTimeUnitInMillis() >= refillInterval, - String.format("Refill interval %s must be less than or equal to TimeUnit millis %s", - refillInterval, getTimeUnitInMillis())); - this.refillInterval = refillInterval; + long timeUnit = getTimeUnitInMillis(); + if (refillInterval > timeUnit) { + LOG.warn( + "Refill interval {} is larger than time unit {}. This is invalid. " + + "Instead, we will use the time unit {} as the refill interval", + refillInterval, timeUnit, timeUnit); + } + this.refillInterval = Math.min(timeUnit, refillInterval); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index fb1b6e4b0d96..e6144de2c777 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -19,30 +19,22 @@ import java.io.IOException; import java.time.Duration; -import java.util.ArrayList; import java.util.EnumSet; -import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.security.UserGroupInformation; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -73,18 +65,15 @@ public class QuotaCache implements Stoppable { public static final String QUOTA_USER_REQUEST_ATTRIBUTE_OVERRIDE_KEY = "hbase.quota.user.override.key"; private static final int REFRESH_DEFAULT_PERIOD = 43_200_000; // 12 hours - private static final int EVICT_PERIOD_FACTOR = 5; - // for testing purpose only, enforce the cache to be always refreshed - static boolean TEST_FORCE_REFRESH = false; - // for testing purpose only, block cache refreshes to reliably verify state - static boolean TEST_BLOCK_REFRESH = false; + private final Object initializerLock = new Object(); + private volatile boolean initialized = false; + + private volatile Map namespaceQuotaCache = new ConcurrentHashMap<>(); + private volatile Map tableQuotaCache = new ConcurrentHashMap<>(); + private volatile Map userQuotaCache = new ConcurrentHashMap<>(); + private volatile Map regionServerQuotaCache = new ConcurrentHashMap<>(); - private final ConcurrentMap namespaceQuotaCache = new ConcurrentHashMap<>(); - private final ConcurrentMap tableQuotaCache = new ConcurrentHashMap<>(); - private final ConcurrentMap userQuotaCache = new ConcurrentHashMap<>(); - private final ConcurrentMap regionServerQuotaCache = - new ConcurrentHashMap<>(); private volatile boolean exceedThrottleQuotaEnabled = false; // factors used to divide cluster scope quota into machine scope quota private volatile double machineQuotaFactor = 1; @@ -96,57 +85,6 @@ public class QuotaCache implements Stoppable { private QuotaRefresherChore refreshChore; private boolean stopped = true; - private final Fetcher userQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final String user) { - final Set namespaces = QuotaCache.this.namespaceQuotaCache.keySet(); - final Set tables = QuotaCache.this.tableQuotaCache.keySet(); - return QuotaUtil.makeGetForUserQuotas(user, tables, namespaces); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), gets, tableMachineQuotaFactors, - machineQuotaFactor); - } - }; - - private final Fetcher regionServerQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final String regionServer) { - return QuotaUtil.makeGetForRegionServerQuotas(regionServer); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchRegionServerQuotas(rsServices.getConnection(), gets); - } - }; - - private final Fetcher tableQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final TableName table) { - return QuotaUtil.makeGetForTableQuotas(table); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), gets, tableMachineQuotaFactors); - } - }; - - private final Fetcher namespaceQuotaStateFetcher = new Fetcher<>() { - @Override - public Get makeGet(final String namespace) { - return QuotaUtil.makeGetForNamespaceQuotas(namespace); - } - - @Override - public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), gets, machineQuotaFactor); - } - }; - public QuotaCache(final RegionServerServices rsServices) { this.rsServices = rsServices; this.userOverrideRequestAttributeKey = @@ -158,10 +96,8 @@ public void start() throws IOException { Configuration conf = rsServices.getConfiguration(); // Refresh the cache every 12 hours, and every time a quota is changed, and every time a - // configuration - // reload is triggered. Periodic reloads are kept to a minimum to avoid flooding the - // RegionServer - // holding the hbase:quota table with requests. + // configuration reload is triggered. Periodic reloads are kept to a minimum to avoid + // flooding the RegionServer holding the hbase:quota table with requests. int period = conf.getInt(REFRESH_CONF_KEY, REFRESH_DEFAULT_PERIOD); refreshChore = new QuotaRefresherChore(conf, period, this); rsServices.getChoreService().scheduleChore(refreshChore); @@ -181,6 +117,37 @@ public boolean isStopped() { return stopped; } + private void ensureInitialized() { + if (!initialized) { + synchronized (initializerLock) { + if (!initialized) { + refreshChore.chore(); + initialized = true; + } + } + } + } + + private Map fetchUserQuotaStateEntries() throws IOException { + return QuotaUtil.fetchUserQuotas(rsServices.getConfiguration(), rsServices.getConnection(), + tableMachineQuotaFactors, machineQuotaFactor); + } + + private Map fetchRegionServerQuotaStateEntries() throws IOException { + return QuotaUtil.fetchRegionServerQuotas(rsServices.getConfiguration(), + rsServices.getConnection()); + } + + private Map fetchTableQuotaStateEntries() throws IOException { + return QuotaUtil.fetchTableQuotas(rsServices.getConfiguration(), rsServices.getConnection(), + tableMachineQuotaFactors); + } + + private Map fetchNamespaceQuotaStateEntries() throws IOException { + return QuotaUtil.fetchNamespaceQuotas(rsServices.getConfiguration(), rsServices.getConnection(), + machineQuotaFactor); + } + /** * Returns the limiter associated to the specified user/table. * @param ugi the user to limit @@ -201,12 +168,13 @@ public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableNa */ public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) { String user = getQuotaUserName(ugi); - if (!userQuotaCache.containsKey(user)) { - userQuotaCache.put(user, - QuotaUtil.buildDefaultUserQuotaState(rsServices.getConfiguration(), 0L)); - fetch("user", userQuotaCache, userQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to userQuotaCache + Map cache = userQuotaCache; + if (!cache.containsKey(user)) { + cache.put(user, QuotaUtil.buildDefaultUserQuotaState(rsServices.getConfiguration())); } - return userQuotaCache.get(user); + return cache.get(user); } /** @@ -215,11 +183,13 @@ public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) { * @return the limiter associated to the specified table */ public QuotaLimiter getTableLimiter(final TableName table) { - if (!tableQuotaCache.containsKey(table)) { - tableQuotaCache.put(table, new QuotaState()); - fetch("table", tableQuotaCache, tableQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to tableQuotaCache + Map cache = tableQuotaCache; + if (!cache.containsKey(table)) { + cache.put(table, new QuotaState()); } - return tableQuotaCache.get(table).getGlobalLimiter(); + return cache.get(table).getGlobalLimiter(); } /** @@ -228,11 +198,13 @@ public QuotaLimiter getTableLimiter(final TableName table) { * @return the limiter associated to the specified namespace */ public QuotaLimiter getNamespaceLimiter(final String namespace) { - if (!namespaceQuotaCache.containsKey(namespace)) { - namespaceQuotaCache.put(namespace, new QuotaState()); - fetch("namespace", namespaceQuotaCache, namespaceQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to namespaceQuotaCache + Map cache = namespaceQuotaCache; + if (!cache.containsKey(namespace)) { + cache.put(namespace, new QuotaState()); } - return namespaceQuotaCache.get(namespace).getGlobalLimiter(); + return cache.get(namespace).getGlobalLimiter(); } /** @@ -241,47 +213,25 @@ public QuotaLimiter getNamespaceLimiter(final String namespace) { * @return the limiter associated to the specified region server */ public QuotaLimiter getRegionServerQuotaLimiter(final String regionServer) { - if (!regionServerQuotaCache.containsKey(regionServer)) { - regionServerQuotaCache.put(regionServer, new QuotaState()); - fetch("regionServer", regionServerQuotaCache, regionServerQuotaStateFetcher); + ensureInitialized(); + // local reference because the chore thread may assign to regionServerQuotaCache + Map cache = regionServerQuotaCache; + if (!cache.containsKey(regionServer)) { + cache.put(regionServer, new QuotaState()); } - return regionServerQuotaCache.get(regionServer).getGlobalLimiter(); + return cache.get(regionServer).getGlobalLimiter(); } protected boolean isExceedThrottleQuotaEnabled() { return exceedThrottleQuotaEnabled; } - private void fetch(final String type, final Map quotasMap, - final Fetcher fetcher) { - // Find the quota entries to update - List gets = quotasMap.keySet().stream().map(fetcher::makeGet).collect(Collectors.toList()); - - // fetch and update the quota entries - if (!gets.isEmpty()) { - try { - for (Map.Entry entry : fetcher.fetchEntries(gets).entrySet()) { - V quotaInfo = quotasMap.putIfAbsent(entry.getKey(), entry.getValue()); - if (quotaInfo != null) { - quotaInfo.update(entry.getValue()); - } - - if (LOG.isTraceEnabled()) { - LOG.trace("Loading {} key={} quotas={}", type, entry.getKey(), quotaInfo); - } - } - } catch (IOException e) { - LOG.warn("Unable to read {} from quota table", type, e); - } - } - } - /** * Applies a request attribute user override if available, otherwise returns the UGI's short * username * @param ugi The request's UserGroupInformation */ - private String getQuotaUserName(final UserGroupInformation ugi) { + String getQuotaUserName(final UserGroupInformation ugi) { if (userOverrideRequestAttributeKey == null) { return ugi.getShortUserName(); } @@ -306,18 +256,22 @@ void forceSynchronousCacheRefresh() { refreshChore.chore(); } + /** visible for testing */ Map getNamespaceQuotaCache() { return namespaceQuotaCache; } + /** visible for testing */ Map getRegionServerQuotaCache() { return regionServerQuotaCache; } + /** visible for testing */ Map getTableQuotaCache() { return tableQuotaCache; } + /** visible for testing */ Map getUserQuotaCache() { return userQuotaCache; } @@ -354,39 +308,49 @@ public synchronized boolean triggerNow() { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "GC_UNRELATED_TYPES", - justification = "I do not understand why the complaints, it looks good to me -- FIX") protected void chore() { - while (TEST_BLOCK_REFRESH) { - LOG.info("TEST_BLOCK_REFRESH=true, so blocking QuotaCache refresh until it is false"); + synchronized (this) { + LOG.info("Reloading quota cache from hbase:quota table"); + updateQuotaFactors(); + try { - Thread.sleep(10); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - // Prefetch online tables/namespaces - for (TableName table : ((HRegionServer) QuotaCache.this.rsServices).getOnlineTables()) { - if (table.isSystemTable()) { - continue; + Map newUserQuotaCache = + new ConcurrentHashMap<>(fetchUserQuotaStateEntries()); + updateNewCacheFromOld(userQuotaCache, newUserQuotaCache); + userQuotaCache = newUserQuotaCache; + } catch (IOException e) { + LOG.error("Error while fetching user quotas", e); } - QuotaCache.this.tableQuotaCache.computeIfAbsent(table, key -> new QuotaState()); - final String ns = table.getNamespaceAsString(); + try { + Map newRegionServerQuotaCache = + new ConcurrentHashMap<>(fetchRegionServerQuotaStateEntries()); + updateNewCacheFromOld(regionServerQuotaCache, newRegionServerQuotaCache); + regionServerQuotaCache = newRegionServerQuotaCache; + } catch (IOException e) { + LOG.error("Error while fetching region server quotas", e); + } - QuotaCache.this.namespaceQuotaCache.computeIfAbsent(ns, key -> new QuotaState()); - } + try { + Map newTableQuotaCache = + new ConcurrentHashMap<>(fetchTableQuotaStateEntries()); + updateNewCacheFromOld(tableQuotaCache, newTableQuotaCache); + tableQuotaCache = newTableQuotaCache; + } catch (IOException e) { + LOG.error("Error while refreshing table quotas", e); + } - QuotaCache.this.regionServerQuotaCache - .computeIfAbsent(QuotaTableUtil.QUOTA_REGION_SERVER_ROW_KEY, key -> new QuotaState()); + try { + Map newNamespaceQuotaCache = + new ConcurrentHashMap<>(fetchNamespaceQuotaStateEntries()); + updateNewCacheFromOld(namespaceQuotaCache, newNamespaceQuotaCache); + namespaceQuotaCache = newNamespaceQuotaCache; + } catch (IOException e) { + LOG.error("Error while refreshing namespace quotas", e); + } - updateQuotaFactors(); - fetchAndEvict("namespace", QuotaCache.this.namespaceQuotaCache, namespaceQuotaStateFetcher); - fetchAndEvict("table", QuotaCache.this.tableQuotaCache, tableQuotaStateFetcher); - fetchAndEvict("user", QuotaCache.this.userQuotaCache, userQuotaStateFetcher); - fetchAndEvict("regionServer", QuotaCache.this.regionServerQuotaCache, - regionServerQuotaStateFetcher); - fetchExceedThrottleQuota(); + fetchExceedThrottleQuota(); + } } private void fetchExceedThrottleQuota() { @@ -398,48 +362,6 @@ private void fetchExceedThrottleQuota() { } } - private void fetchAndEvict(final String type, - final ConcurrentMap quotasMap, final Fetcher fetcher) { - long now = EnvironmentEdgeManager.currentTime(); - long evictPeriod = getPeriod() * EVICT_PERIOD_FACTOR; - // Find the quota entries to update - List gets = new ArrayList<>(); - List toRemove = new ArrayList<>(); - for (Map.Entry entry : quotasMap.entrySet()) { - long lastQuery = entry.getValue().getLastQuery(); - if (lastQuery > 0 && (now - lastQuery) >= evictPeriod) { - toRemove.add(entry.getKey()); - } else { - gets.add(fetcher.makeGet(entry.getKey())); - } - } - - for (final K key : toRemove) { - if (LOG.isTraceEnabled()) { - LOG.trace("evict " + type + " key=" + key); - } - quotasMap.remove(key); - } - - // fetch and update the quota entries - if (!gets.isEmpty()) { - try { - for (Map.Entry entry : fetcher.fetchEntries(gets).entrySet()) { - V quotaInfo = quotasMap.putIfAbsent(entry.getKey(), entry.getValue()); - if (quotaInfo != null) { - quotaInfo.update(entry.getValue()); - } - - if (LOG.isTraceEnabled()) { - LOG.trace("refresh " + type + " key=" + entry.getKey() + " quotas=" + quotaInfo); - } - } - } catch (IOException e) { - LOG.warn("Unable to read " + type + " from quota table", e); - } - } - } - /** * Update quota factors which is used to divide cluster scope quota into machine scope quota For * user/namespace/user over namespace quota, use [1 / RSNum] as machine factor. For table/user @@ -515,6 +437,20 @@ private void updateMachineQuotaFactors(int rsSize) { } } + /** visible for testing */ + static void updateNewCacheFromOld(Map oldCache, + Map newCache) { + for (Map.Entry entry : oldCache.entrySet()) { + K key = entry.getKey(); + if (newCache.containsKey(key)) { + V newState = newCache.get(key); + V oldState = entry.getValue(); + oldState.update(newState); + newCache.put(key, oldState); + } + } + } + static class RefreshableExpiringValueCache { private final String name; private final LoadingCache> cache; @@ -555,9 +491,4 @@ static interface ThrowingSupplier { T get() throws Exception; } - interface Fetcher { - Get makeGet(Key key); - - Map fetchEntries(List gets) throws IOException; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java index 762896773fc7..63d8df65d25d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.quotas; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -25,8 +26,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class QuotaLimiterFactory { - public static QuotaLimiter fromThrottle(final Throttle throttle) { - return TimeBasedLimiter.fromThrottle(throttle); + public static QuotaLimiter fromThrottle(Configuration conf, final Throttle throttle) { + return TimeBasedLimiter.fromThrottle(conf, throttle); } public static QuotaLimiter update(final QuotaLimiter a, final QuotaLimiter b) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java index 7c9445e15587..4a0b634abec5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.quotas; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,33 +32,14 @@ justification = "FindBugs seems confused; says globalLimiter and lastUpdate " + "are mostly synchronized...but to me it looks like they are totally synchronized") public class QuotaState { - protected long lastUpdate = 0; - protected long lastQuery = 0; - protected QuotaLimiter globalLimiter = NoopQuotaLimiter.get(); - public QuotaState() { - this(0); - } - - public QuotaState(final long updateTs) { - lastUpdate = updateTs; - } - - public synchronized long getLastUpdate() { - return lastUpdate; - } - - public synchronized long getLastQuery() { - return lastQuery; - } - @Override public synchronized String toString() { StringBuilder builder = new StringBuilder(); - builder.append("QuotaState(ts=" + getLastUpdate()); + builder.append("QuotaState("); if (isBypass()) { - builder.append(" bypass"); + builder.append("bypass"); } else { if (globalLimiter != NoopQuotaLimiter.get()) { // builder.append(" global-limiter"); @@ -77,14 +58,19 @@ public synchronized boolean isBypass() { /** * Setup the global quota information. (This operation is part of the QuotaState setup) */ - public synchronized void setQuotas(final Quotas quotas) { + public synchronized void setQuotas(Configuration conf, final Quotas quotas) { if (quotas.hasThrottle()) { - globalLimiter = QuotaLimiterFactory.fromThrottle(quotas.getThrottle()); + globalLimiter = QuotaLimiterFactory.fromThrottle(conf, quotas.getThrottle()); } else { globalLimiter = NoopQuotaLimiter.get(); } } + /** visible for testing */ + void setGlobalLimiter(QuotaLimiter globalLimiter) { + this.globalLimiter = globalLimiter; + } + /** * Perform an update of the quota info based on the other quota info object. (This operation is * executed by the QuotaCache) @@ -97,7 +83,6 @@ public synchronized void update(final QuotaState other) { } else { globalLimiter = QuotaLimiterFactory.update(globalLimiter, other.globalLimiter); } - lastUpdate = other.lastUpdate; } /** @@ -105,15 +90,7 @@ public synchronized void update(final QuotaState other) { * @return the quota limiter */ public synchronized QuotaLimiter getGlobalLimiter() { - lastQuery = EnvironmentEdgeManager.currentTime(); return globalLimiter; } - /** - * Return the limiter associated with this quota without updating internal last query stats - * @return the quota limiter - */ - synchronized QuotaLimiter getGlobalLimiterWithoutUpdatingLastQuery() { - return globalLimiter; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 687522783832..8497f861f70c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -38,12 +38,13 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -329,60 +330,58 @@ private static void deleteQuotas(final Connection connection, final byte[] rowKe doDelete(connection, delete); } - public static Map fetchUserQuotas(final Connection connection, - final List gets, Map tableMachineQuotaFactors, double factor) + public static Map fetchUserQuotas(final Configuration conf, + final Connection connection, Map tableMachineQuotaFactors, double factor) throws IOException { - long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(connection, gets); - - Map userQuotas = new HashMap<>(results.length); - for (int i = 0; i < results.length; ++i) { - byte[] key = gets.get(i).getRow(); - assert isUserRowKey(key); - String user = getUserFromRowKey(key); - - if (results[i].isEmpty()) { - userQuotas.put(user, buildDefaultUserQuotaState(connection.getConfiguration(), nowTs)); - continue; - } - - final UserQuotaState quotaInfo = new UserQuotaState(nowTs); - userQuotas.put(user, quotaInfo); - - assert Bytes.equals(key, results[i].getRow()); - - try { - parseUserResult(user, results[i], new UserQuotasVisitor() { - @Override - public void visitUserQuotas(String userName, String namespace, Quotas quotas) { - quotas = updateClusterQuotaToMachineQuota(quotas, factor); - quotaInfo.setQuotas(namespace, quotas); - } - - @Override - public void visitUserQuotas(String userName, TableName table, Quotas quotas) { - quotas = updateClusterQuotaToMachineQuota(quotas, - tableMachineQuotaFactors.containsKey(table) - ? tableMachineQuotaFactors.get(table) - : 1); - quotaInfo.setQuotas(table, quotas); - } - - @Override - public void visitUserQuotas(String userName, Quotas quotas) { - quotas = updateClusterQuotaToMachineQuota(quotas, factor); - quotaInfo.setQuotas(quotas); + Map userQuotas = new HashMap<>(); + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_USER_ROW_KEY_PREFIX); + try (ResultScanner resultScanner = table.getScanner(scan)) { + for (Result result : resultScanner) { + byte[] key = result.getRow(); + assert isUserRowKey(key); + String user = getUserFromRowKey(key); + + final UserQuotaState quotaInfo = new UserQuotaState(); + userQuotas.put(user, quotaInfo); + + try { + parseUserResult(user, result, new UserQuotasVisitor() { + @Override + public void visitUserQuotas(String userName, String namespace, Quotas quotas) { + quotas = updateClusterQuotaToMachineQuota(quotas, factor); + quotaInfo.setQuotas(conf, namespace, quotas); + } + + @Override + public void visitUserQuotas(String userName, TableName table, Quotas quotas) { + quotas = updateClusterQuotaToMachineQuota(quotas, + tableMachineQuotaFactors.containsKey(table) + ? tableMachineQuotaFactors.get(table) + : 1); + quotaInfo.setQuotas(conf, table, quotas); + } + + @Override + public void visitUserQuotas(String userName, Quotas quotas) { + quotas = updateClusterQuotaToMachineQuota(quotas, factor); + quotaInfo.setQuotas(conf, quotas); + } + }); + } catch (IOException e) { + LOG.error("Unable to parse user '" + user + "' quotas", e); + userQuotas.remove(user); } - }); - } catch (IOException e) { - LOG.error("Unable to parse user '" + user + "' quotas", e); - userQuotas.remove(user); + } } } + return userQuotas; } - protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf, long nowTs) { + protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf) { QuotaProtos.Throttle.Builder throttleBuilder = QuotaProtos.Throttle.newBuilder(); buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_READ_NUM) @@ -406,10 +405,10 @@ protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf, l buildDefaultTimedQuota(conf, QUOTA_DEFAULT_USER_MACHINE_REQUEST_HANDLER_USAGE_MS) .ifPresent(throttleBuilder::setReqHandlerUsageMs); - UserQuotaState state = new UserQuotaState(nowTs); + UserQuotaState state = new UserQuotaState(); QuotaProtos.Quotas defaultQuotas = QuotaProtos.Quotas.newBuilder().setThrottle(throttleBuilder.build()).build(); - state.setQuotas(defaultQuotas); + state.setQuotas(conf, defaultQuotas); return state; } @@ -422,9 +421,12 @@ private static Optional buildDefaultTimedQuota(Configuration conf, S java.util.concurrent.TimeUnit.SECONDS, org.apache.hadoop.hbase.quotas.QuotaScope.MACHINE)); } - public static Map fetchTableQuotas(final Connection connection, - final List gets, Map tableMachineFactors) throws IOException { - return fetchGlobalQuotas("table", connection, gets, new KeyFromRow() { + public static Map fetchTableQuotas(final Configuration conf, + final Connection connection, Map tableMachineFactors) throws IOException { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_TABLE_ROW_KEY_PREFIX); + return fetchGlobalQuotas(conf, "table", scan, connection, new KeyFromRow() { @Override public TableName getKeyFromRow(final byte[] row) { assert isTableRowKey(row); @@ -438,9 +440,12 @@ public double getFactor(TableName tableName) { }); } - public static Map fetchNamespaceQuotas(final Connection connection, - final List gets, double factor) throws IOException { - return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow() { + public static Map fetchNamespaceQuotas(final Configuration conf, + final Connection connection, double factor) throws IOException { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_NAMESPACE_ROW_KEY_PREFIX); + return fetchGlobalQuotas(conf, "namespace", scan, connection, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isNamespaceRowKey(row); @@ -454,9 +459,12 @@ public double getFactor(String s) { }); } - public static Map fetchRegionServerQuotas(final Connection connection, - final List gets) throws IOException { - return fetchGlobalQuotas("regionServer", connection, gets, new KeyFromRow() { + public static Map fetchRegionServerQuotas(final Configuration conf, + final Connection connection) throws IOException { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + scan.setStartStopRowForPrefixScan(QUOTA_REGION_SERVER_ROW_KEY_PREFIX); + return fetchGlobalQuotas(conf, "regionServer", scan, connection, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isRegionServerRowKey(row); @@ -470,32 +478,35 @@ public double getFactor(String s) { }); } - public static Map fetchGlobalQuotas(final String type, - final Connection connection, final List gets, final KeyFromRow kfr) throws IOException { - long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(connection, gets); + public static Map fetchGlobalQuotas(final Configuration conf, + final String type, final Scan scan, final Connection connection, final KeyFromRow kfr) + throws IOException { - Map globalQuotas = new HashMap<>(results.length); - for (int i = 0; i < results.length; ++i) { - byte[] row = gets.get(i).getRow(); - K key = kfr.getKeyFromRow(row); + Map globalQuotas = new HashMap<>(); + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + try (ResultScanner resultScanner = table.getScanner(scan)) { + for (Result result : resultScanner) { - QuotaState quotaInfo = new QuotaState(nowTs); - globalQuotas.put(key, quotaInfo); + byte[] row = result.getRow(); + K key = kfr.getKeyFromRow(row); - if (results[i].isEmpty()) continue; - assert Bytes.equals(row, results[i].getRow()); + QuotaState quotaInfo = new QuotaState(); + globalQuotas.put(key, quotaInfo); - byte[] data = results[i].getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - if (data == null) continue; + byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); + if (data == null) { + continue; + } - try { - Quotas quotas = quotasFromData(data); - quotas = updateClusterQuotaToMachineQuota(quotas, kfr.getFactor(key)); - quotaInfo.setQuotas(quotas); - } catch (IOException e) { - LOG.error("Unable to parse " + type + " '" + key + "' quotas", e); - globalQuotas.remove(key); + try { + Quotas quotas = quotasFromData(data); + quotas = updateClusterQuotaToMachineQuota(quotas, kfr.getFactor(key)); + quotaInfo.setQuotas(conf, quotas); + } catch (IOException e) { + LOG.error("Unable to parse {} '{}' quotas", type, key, e); + globalQuotas.remove(key); + } + } } } return globalQuotas; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index 958793dcdf00..34fc57cb0814 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -91,7 +91,9 @@ public void stop() { } public void reload() { - quotaCache.forceSynchronousCacheRefresh(); + if (isQuotaEnabled()) { + quotaCache.forceSynchronousCacheRefresh(); + } } @Override @@ -197,7 +199,7 @@ public OperationQuota checkScanQuota(final Region region, LOG.debug("Throttling exception for user=" + ugi.getUserName() + " table=" + table + " scan=" + scanRequest.getScannerId() + ": " + e.getMessage()); - rsServices.getMetrics().recordThrottleException(e.getType(), ugi.getShortUserName(), + rsServices.getMetrics().recordThrottleException(e.getType(), quotaCache.getQuotaUserName(ugi), table.getNameAsString()); throw e; @@ -274,7 +276,7 @@ public OperationQuota checkBatchQuota(final Region region, final int numWrites, LOG.debug("Throttling exception for user=" + ugi.getUserName() + " table=" + table + " numWrites=" + numWrites + " numReads=" + numReads + ": " + e.getMessage()); - rsServices.getMetrics().recordThrottleException(e.getType(), ugi.getShortUserName(), + rsServices.getMetrics().recordThrottleException(e.getType(), quotaCache.getQuotaUserName(ugi), table.getNameAsString()); throw e; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java index 232471092c29..38d171f1bf9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.quotas; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,6 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class TimeBasedLimiter implements QuotaLimiter { - private static final Configuration conf = HBaseConfiguration.create(); private RateLimiter reqsLimiter = null; private RateLimiter reqSizeLimiter = null; private RateLimiter writeReqsLimiter = null; @@ -47,12 +45,11 @@ public class TimeBasedLimiter implements QuotaLimiter { private RateLimiter atomicWriteSizeLimiter = null; private RateLimiter reqHandlerUsageTimeLimiter = null; - private TimeBasedLimiter() { - if ( - FixedIntervalRateLimiter.class.getName().equals( - conf.getClass(RateLimiter.QUOTA_RATE_LIMITER_CONF_KEY, AverageIntervalRateLimiter.class) - .getName()) - ) { + private TimeBasedLimiter(Configuration conf) { + String limiterClassName = + conf.getClass(RateLimiter.QUOTA_RATE_LIMITER_CONF_KEY, AverageIntervalRateLimiter.class) + .getName(); + if (FixedIntervalRateLimiter.class.getName().equals(limiterClassName)) { long refillInterval = conf.getLong(FixedIntervalRateLimiter.RATE_LIMITER_REFILL_INTERVAL_MS, RateLimiter.DEFAULT_TIME_UNIT); reqsLimiter = new FixedIntervalRateLimiter(refillInterval); @@ -68,6 +65,22 @@ private TimeBasedLimiter() { atomicReadSizeLimiter = new FixedIntervalRateLimiter(refillInterval); atomicWriteSizeLimiter = new FixedIntervalRateLimiter(refillInterval); reqHandlerUsageTimeLimiter = new FixedIntervalRateLimiter(refillInterval); + } else if (FeedbackAdaptiveRateLimiter.class.getName().equals(limiterClassName)) { + FeedbackAdaptiveRateLimiter.FeedbackAdaptiveRateLimiterFactory feedbackLimiterFactory = + new FeedbackAdaptiveRateLimiter.FeedbackAdaptiveRateLimiterFactory(conf); + reqsLimiter = feedbackLimiterFactory.create(); + reqSizeLimiter = feedbackLimiterFactory.create(); + writeReqsLimiter = feedbackLimiterFactory.create(); + writeSizeLimiter = feedbackLimiterFactory.create(); + readReqsLimiter = feedbackLimiterFactory.create(); + readSizeLimiter = feedbackLimiterFactory.create(); + reqCapacityUnitLimiter = feedbackLimiterFactory.create(); + writeCapacityUnitLimiter = feedbackLimiterFactory.create(); + readCapacityUnitLimiter = feedbackLimiterFactory.create(); + atomicReqLimiter = feedbackLimiterFactory.create(); + atomicReadSizeLimiter = feedbackLimiterFactory.create(); + atomicWriteSizeLimiter = feedbackLimiterFactory.create(); + reqHandlerUsageTimeLimiter = feedbackLimiterFactory.create(); } else { reqsLimiter = new AverageIntervalRateLimiter(); reqSizeLimiter = new AverageIntervalRateLimiter(); @@ -85,8 +98,8 @@ private TimeBasedLimiter() { } } - static QuotaLimiter fromThrottle(final Throttle throttle) { - TimeBasedLimiter limiter = new TimeBasedLimiter(); + static QuotaLimiter fromThrottle(Configuration conf, final Throttle throttle) { + TimeBasedLimiter limiter = new TimeBasedLimiter(conf); boolean isBypass = true; if (throttle.hasReqNum()) { setFromTimedQuota(limiter.reqsLimiter, throttle.getReqNum()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java index a3ec97994363..0704e869239b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java @@ -21,8 +21,8 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -42,24 +42,18 @@ public class UserQuotaState extends QuotaState { private Map tableLimiters = null; private boolean bypassGlobals = false; - public UserQuotaState() { - super(); - } - - public UserQuotaState(final long updateTs) { - super(updateTs); - } - @Override public synchronized String toString() { StringBuilder builder = new StringBuilder(); - builder.append("UserQuotaState(ts=" + getLastUpdate()); - if (bypassGlobals) builder.append(" bypass-globals"); + builder.append("UserQuotaState("); + if (bypassGlobals) { + builder.append("bypass-globals"); + } if (isBypass()) { builder.append(" bypass"); } else { - if (getGlobalLimiterWithoutUpdatingLastQuery() != NoopQuotaLimiter.get()) { + if (getGlobalLimiter() != NoopQuotaLimiter.get()) { builder.append(" global-limiter"); } @@ -86,7 +80,7 @@ public synchronized String toString() { /** Returns true if there is no quota information associated to this object */ @Override public synchronized boolean isBypass() { - return !bypassGlobals && getGlobalLimiterWithoutUpdatingLastQuery() == NoopQuotaLimiter.get() + return !bypassGlobals && getGlobalLimiter() == NoopQuotaLimiter.get() && (tableLimiters == null || tableLimiters.isEmpty()) && (namespaceLimiters == null || namespaceLimiters.isEmpty()); } @@ -96,8 +90,8 @@ public synchronized boolean hasBypassGlobals() { } @Override - public synchronized void setQuotas(final Quotas quotas) { - super.setQuotas(quotas); + public synchronized void setQuotas(Configuration conf, final Quotas quotas) { + super.setQuotas(conf, quotas); bypassGlobals = quotas.getBypassGlobals(); } @@ -105,30 +99,30 @@ public synchronized void setQuotas(final Quotas quotas) { * Add the quota information of the specified table. (This operation is part of the QuotaState * setup) */ - public synchronized void setQuotas(final TableName table, Quotas quotas) { - tableLimiters = setLimiter(tableLimiters, table, quotas); + public synchronized void setQuotas(Configuration conf, final TableName table, Quotas quotas) { + tableLimiters = setLimiter(conf, tableLimiters, table, quotas); } /** * Add the quota information of the specified namespace. (This operation is part of the QuotaState * setup) */ - public void setQuotas(final String namespace, Quotas quotas) { - namespaceLimiters = setLimiter(namespaceLimiters, namespace, quotas); + public void setQuotas(Configuration conf, final String namespace, Quotas quotas) { + namespaceLimiters = setLimiter(conf, namespaceLimiters, namespace, quotas); } public boolean hasTableLimiters() { return tableLimiters != null && !tableLimiters.isEmpty(); } - private Map setLimiter(Map limiters, final K key, - final Quotas quotas) { + private Map setLimiter(Configuration conf, Map limiters, + final K key, final Quotas quotas) { if (limiters == null) { limiters = new HashMap<>(); } QuotaLimiter limiter = - quotas.hasThrottle() ? QuotaLimiterFactory.fromThrottle(quotas.getThrottle()) : null; + quotas.hasThrottle() ? QuotaLimiterFactory.fromThrottle(conf, quotas.getThrottle()) : null; if (limiter != null && !limiter.isBypass()) { limiters.put(key, limiter); } else { @@ -191,7 +185,6 @@ private static Map updateLimiters(final Map columnFamilies; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { HRegion region = rs.getRegion(regionInfo.getEncodedName()); if (region == null) { throw new NotServingRegionException("region=" + regionInfo.getRegionNameAsString()); @@ -64,6 +64,7 @@ protected void doCall() throws Exception { LOG.debug("Closing region operation on {}", region); region.closeRegionOperation(); } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 7936197ff8d8..9b7daee0f668 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5752,6 +5752,15 @@ private long replayRecoveredEdits(final Path edits, Map maxSeqIdIn currentReplaySeqId = (key.getOrigLogSeqNum() > 0) ? key.getOrigLogSeqNum() : currentEditSeqId; + // Start coprocessor replay here. The coprocessor is for each WALEdit + // instead of a KeyValue. + if (coprocessorHost != null) { + status.setStatus("Running pre-WAL-restore hook in coprocessors"); + if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) { + // if bypass this wal entry, ignore it ... + continue; + } + } boolean checkRowWithinBoundary = false; // Check this edit is for this region. if ( @@ -5822,6 +5831,10 @@ private long replayRecoveredEdits(final Path edits, Map maxSeqIdIn internalFlushcache(null, currentEditSeqId, stores.values(), status, false, FlushLifeCycleTracker.DUMMY); } + + if (coprocessorHost != null) { + coprocessorHost.postWALRestore(this.getRegionInfo(), key, val); + } } if (coprocessorHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 7f766c558409..a1bf09cba5d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -764,7 +764,10 @@ private static void writeRegionInfoFileContent(final Configuration conf, final F // First check to get the permissions FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // Write the RegionInfo file content - try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null)) { + // HBASE-29662: Fail .regioninfo file creation, if the region directory doesn't exist, + // avoiding silent masking of missing region directories during region initialization. + // The region directory should already exist when this method is called. + try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null, false)) { out.write(content); } } @@ -848,6 +851,14 @@ private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final b CommonFSUtils.delete(fs, tmpPath, true); } + // Check parent (region) directory exists first to maintain HBASE-29662 protection + if (!fs.exists(getRegionDir())) { + throw new IOException("Region directory does not exist: " + getRegionDir()); + } + if (!fs.exists(getTempDir())) { + fs.mkdirs(getTempDir()); + } + // Write HRI to a file in case we need to recover hbase:meta writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 350baca36f46..cd49ceb753ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1969,6 +1969,9 @@ executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SNAPSHOT_OP executorService.startExecutorService( executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_RELOAD_QUOTAS_OPERATIONS) .setCorePoolSize(rsRefreshQuotasThreads)); + final int logRollThreads = conf.getInt("hbase.regionserver.executor.log.roll.threads", 1); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.RS_LOG_ROLL).setCorePoolSize(logRollThreads)); Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller", uncaughtExceptionHandler); @@ -2203,7 +2206,7 @@ public void stop(final String msg) { */ public void stop(final String msg, final boolean force, final User user) { if (!this.stopped) { - LOG.info("***** STOPPING region server '" + this + "' *****"); + LOG.info("***** STOPPING region server '{}' *****", this); if (this.rsHost != null) { // when forced via abort don't allow CPs to override try { @@ -3551,9 +3554,9 @@ void executeProcedure(long procId, long initiatingMasterActiveTime, .submit(new RSProcedureHandler(this, procId, initiatingMasterActiveTime, callable)); } - public void remoteProcedureComplete(long procId, long initiatingMasterActiveTime, - Throwable error) { - procedureResultReporter.complete(procId, initiatingMasterActiveTime, error); + public void remoteProcedureComplete(long procId, long initiatingMasterActiveTime, Throwable error, + byte[] procResultData) { + procedureResultReporter.complete(procId, initiatingMasterActiveTime, error, procResultData); } void reportProcedureDone(ReportProcedureDoneRequest request) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRollCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRollCallable.java new file mode 100644 index 000000000000..11dc28c2a682 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRollCallable.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.AbstractWALRoller; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.LogRollRemoteProcedureResult; + +@InterfaceAudience.Private +public class LogRollCallable extends BaseRSProcedureCallable { + + private static final Logger LOG = LoggerFactory.getLogger(LogRollCallable.class); + + private int maxRollRetry; + + @Override + protected byte[] doCall() throws Exception { + for (int nAttempt = 0; nAttempt < maxRollRetry; nAttempt++) { + try { + Pair filenumPairBefore = getFilenumPair(); + + rs.getWalRoller().requestRollAll(); + rs.getWalRoller().waitUntilWalRollFinished(); + + Pair filenumPairAfter = getFilenumPair(); + LOG.info( + "Before rolling log, highest filenum = {} default WAL filenum = {}, After " + + "rolling log, highest filenum = {} default WAL filenum = {}", + filenumPairBefore.getFirst(), filenumPairBefore.getSecond(), filenumPairAfter.getFirst(), + filenumPairAfter.getSecond()); + return LogRollRemoteProcedureResult.newBuilder() + .setServerName(ProtobufUtil.toServerName(rs.getServerName())) + .setLastHighestWalFilenum(filenumPairBefore.getFirst()).build().toByteArray(); + } catch (Exception e) { + LOG.warn("Failed rolling log on attempt={}", nAttempt, e); + if (nAttempt == maxRollRetry - 1) { + throw e; + } + } + } + return null; + } + + private Pair getFilenumPair() throws IOException { + long highestFilenum = rs.getWALs().stream() + .mapToLong(wal -> ((AbstractFSWAL) wal).getFilenum()).max().orElse(-1L); + long defaultWALFilenum = ((AbstractFSWAL) rs.getWAL(null)).getFilenum(); + return Pair.newPair(highestFilenum, defaultWALFilenum); + } + + @Override + protected void initParameter(byte[] parameter) throws Exception { + this.maxRollRetry = rs.getConfiguration().getInt(AbstractWALRoller.WAL_ROLL_RETRIES, 1); + } + + @Override + public EventType getEventType() { + return EventType.RS_LOG_ROLL; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java index 1197f7b5359c..94c76cf55a4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java @@ -46,7 +46,8 @@ * Priority function specifically for the region server. */ @InterfaceAudience.Private -class RSAnnotationReadingPriorityFunction extends AnnotationReadingPriorityFunction { +public class RSAnnotationReadingPriorityFunction + extends AnnotationReadingPriorityFunction { private static final Logger LOG = LoggerFactory.getLogger(RSAnnotationReadingPriorityFunction.class); @@ -54,6 +55,9 @@ class RSAnnotationReadingPriorityFunction extends AnnotationReadingPriorityFunct /** Used to control the scan delay, currently sqrt(numNextCall * weight) */ public static final String SCAN_VTIME_WEIGHT_CONF_KEY = "hbase.ipc.server.scan.vtime.weight"; + // QOS for internal meta read requests + public static final int INTERNAL_READ_QOS = 250; + @SuppressWarnings("unchecked") private final Class[] knownArgumentClasses = new Class[] { GetRegionInfoRequest.class, GetStoreFileRequest.class, CloseRegionRequest.class, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index bd232addcec5..fdfea375e096 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -103,6 +103,7 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement; @@ -2113,6 +2114,7 @@ public ReplicateWALEntryResponse replay(final RpcController controller, ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : null; // do not invoke coprocessors if this is a secondary region replica + List> walEntries = new ArrayList<>(); // Skip adding the edits to WAL if this is a secondary region replica boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()); @@ -2134,6 +2136,18 @@ public ReplicateWALEntryResponse replay(final RpcController controller, Pair walEntry = (coprocessorHost == null) ? null : new Pair<>(); List edits = WALSplitUtil.getMutationsFromWALEntry(entry, cells, walEntry, durability); + if (coprocessorHost != null) { + // Start coprocessor replay here. The coprocessor is for each WALEdit instead of a + // KeyValue. + if ( + coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(), + walEntry.getSecond()) + ) { + // if bypass this log entry, ignore it ... + continue; + } + walEntries.add(walEntry); + } if (edits != null && !edits.isEmpty()) { // HBASE-17924 // sort to improve lock efficiency @@ -2156,6 +2170,13 @@ public ReplicateWALEntryResponse replay(final RpcController controller, if (wal != null) { wal.sync(); } + + if (coprocessorHost != null) { + for (Pair entry : walEntries) { + coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), + entry.getSecond()); + } + } return ReplicateWALEntryResponse.newBuilder().build(); } catch (IOException ie) { throw new ServiceException(ie); @@ -3519,10 +3540,6 @@ private void scan(HBaseRpcController controller, ScanRequest request, RegionScan // from block size progress before writing into the response scanMetrics.setCounter(ServerSideScanMetrics.BLOCK_BYTES_SCANNED_KEY_METRIC_NAME, scannerContext.getBlockSizeProgress()); - if (rpcCall != null) { - scanMetrics.setCounter(ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME, - rpcCall.getFsReadTime()); - } } } } finally { @@ -3589,6 +3606,11 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } throw new ServiceException(e); } + boolean trackMetrics = request.hasTrackScanMetrics() && request.getTrackScanMetrics(); + ThreadLocalServerSideScanMetrics.setScanMetricsEnabled(trackMetrics); + if (trackMetrics) { + ThreadLocalServerSideScanMetrics.reset(); + } requestCount.increment(); rpcScanRequestCount.increment(); RegionScannerContext rsx; @@ -3659,7 +3681,6 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque boolean scannerClosed = false; try { List results = new ArrayList<>(Math.min(rows, 512)); - boolean trackMetrics = request.hasTrackScanMetrics() && request.getTrackScanMetrics(); ServerSideScanMetrics scanMetrics = trackMetrics ? new ServerSideScanMetrics() : null; if (rows > 0) { boolean done = false; @@ -3741,6 +3762,7 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque scanMetrics.addToCounter(ServerSideScanMetrics.RPC_SCAN_QUEUE_WAIT_TIME_METRIC_NAME, rpcQueueWaitTime); } + ThreadLocalServerSideScanMetrics.populateServerSideScanMetrics(scanMetrics); Map metrics = scanMetrics.getMetricsMap(); ScanMetrics.Builder metricBuilder = ScanMetrics.newBuilder(); NameInt64Pair.Builder pairBuilder = NameInt64Pair.newBuilder(); @@ -3966,7 +3988,7 @@ private void executeProcedures(RemoteProcedureRequest request) { LOG.warn("Failed to instantiating remote procedure {}, pid={}", request.getProcClass(), request.getProcId(), e); server.remoteProcedureComplete(request.getProcId(), request.getInitiatingMasterActiveTime(), - e); + e, null); return; } callable.init(request.getProcData().toByteArray(), server); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 703f06141bf4..b300496e1d7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -1426,6 +1426,31 @@ public void call(RegionObserver observer) throws IOException { }); } + /** + * Supports Coprocessor 'bypass'. + * @return true if default behavior should be bypassed, false otherwise + */ + public boolean preWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) + throws IOException { + return execOperation( + coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(true) { + @Override + public void call(RegionObserver observer) throws IOException { + observer.preWALRestore(this, info, logKey, logEdit); + } + }); + } + + public void postWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) + throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() { + @Override + public void call(RegionObserver observer) throws IOException { + observer.postWALRestore(this, info, logKey, logEdit); + } + }); + } + /** * @param familyPaths pairs of { CF, file path } submitted for bulk load */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java index aa2809fece6e..c69dc6e2df6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcCallback; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; @@ -96,8 +95,6 @@ public class RegionScannerImpl implements RegionScanner, Shipper, RpcCallback { private RegionServerServices rsServices; - private ServerSideScanMetrics scannerInitMetrics = null; - @Override public RegionInfo getRegionInfo() { return region.getRegionInfo(); @@ -148,16 +145,7 @@ private static boolean hasNonce(HRegion region, long nonce) { } finally { region.smallestReadPointCalcLock.unlock(ReadPointCalculationLock.LockType.RECORDING_LOCK); } - boolean isScanMetricsEnabled = scan.isScanMetricsEnabled(); - ThreadLocalServerSideScanMetrics.setScanMetricsEnabled(isScanMetricsEnabled); - if (isScanMetricsEnabled) { - this.scannerInitMetrics = new ServerSideScanMetrics(); - ThreadLocalServerSideScanMetrics.reset(); - } initializeScanners(scan, additionalScanners); - if (isScanMetricsEnabled) { - ThreadLocalServerSideScanMetrics.populateServerSideScanMetrics(scannerInitMetrics); - } } public ScannerContext getContext() { @@ -291,16 +279,6 @@ public boolean nextRaw(List outResults, ScannerContext sca throw new UnknownScannerException("Scanner was closed"); } boolean moreValues = false; - boolean isScanMetricsEnabled = scannerContext.isTrackingMetrics(); - ThreadLocalServerSideScanMetrics.setScanMetricsEnabled(isScanMetricsEnabled); - if (isScanMetricsEnabled) { - ThreadLocalServerSideScanMetrics.reset(); - ServerSideScanMetrics scanMetrics = scannerContext.getMetrics(); - if (scannerInitMetrics != null) { - scannerInitMetrics.getMetricsMap().forEach(scanMetrics::addToCounter); - scannerInitMetrics = null; - } - } if (outResults.isEmpty()) { // Usually outResults is empty. This is true when next is called // to handle scan or get operation. @@ -310,10 +288,6 @@ public boolean nextRaw(List outResults, ScannerContext sca moreValues = nextInternal(tmpList, scannerContext); outResults.addAll(tmpList); } - if (isScanMetricsEnabled) { - ServerSideScanMetrics scanMetrics = scannerContext.getMetrics(); - ThreadLocalServerSideScanMetrics.populateServerSideScanMetrics(scanMetrics); - } region.addReadRequestsCount(1); if (region.getMetrics() != null) { region.getMetrics().updateReadRequestCount(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java index e134dfda7ac8..de23db37856a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReloadQuotasCallable.java @@ -29,9 +29,10 @@ public class ReloadQuotasCallable extends BaseRSProcedureCallable { private static final Logger LOG = LoggerFactory.getLogger(ReloadQuotasCallable.class); @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { LOG.info("Reloading quotas"); rs.getRegionServerRpcQuotaManager().reload(); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java index 21016fe59dd0..7fcf363a919c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java @@ -28,6 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult; @@ -51,7 +52,8 @@ public RemoteProcedureResultReporter(HRegionServer server) { this.server = server; } - public void complete(long procId, long initiatingMasterActiveTime, Throwable error) { + public void complete(long procId, long initiatingMasterActiveTime, Throwable error, + byte[] procReturnValue) { RemoteProcedureResult.Builder builder = RemoteProcedureResult.newBuilder().setProcId(procId) .setInitiatingMasterActiveTime(initiatingMasterActiveTime); if (error != null) { @@ -62,6 +64,9 @@ public void complete(long procId, long initiatingMasterActiveTime, Throwable err LOG.debug("Successfully complete execution of pid={}", procId); builder.setStatus(RemoteProcedureResult.Status.SUCCESS); } + if (procReturnValue != null) { + builder.setProcResultData(ByteString.copyFrom(procReturnValue)); + } results.add(builder.build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java index 0693aee87508..7158671efb1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotRegionCallable.java @@ -41,7 +41,7 @@ public class SnapshotRegionCallable extends BaseRSProcedureCallable { private ForeignExceptionDispatcher monitor; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { HRegion region = rs.getRegion(regionInfo.getEncodedName()); if (region == null) { throw new NotServingRegionException( @@ -78,6 +78,7 @@ protected void doCall() throws Exception { LOG.debug("Closing snapshot operation on {}", region); region.closeRegionOperation(Region.Operation.SNAPSHOT); } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java index db7908d81be8..76a3c1cf84e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotVerifyCallable.java @@ -32,8 +32,9 @@ public class SnapshotVerifyCallable extends BaseRSProcedureCallable { private RegionInfo region; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { rs.getRsSnapshotVerifier().verifyRegion(snapshot, region); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java index 151c865db794..e6ae50f6e9ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitWALCallable.java @@ -79,7 +79,7 @@ public static class ErrorWALSplitException extends HBaseIOException { } @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { // grab a lock splitWALLock = splitWALLocks.acquireLock(walPath); try { @@ -97,6 +97,7 @@ protected void doCall() throws Exception { } finally { splitWALLock.unlock(); } + return null; } public String getWalPath() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java index 6eacc6b78e6a..3e150144f2c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java @@ -51,14 +51,16 @@ public RSProcedureHandler(HRegionServer rs, long procId, long initiatingMasterAc @Override public void process() { Throwable error = null; + byte[] procResultData = null; try { MDC.put("pid", Long.toString(procId)); - callable.call(); + procResultData = callable.call(); } catch (Throwable t) { - LOG.error("pid=" + this.procId, t); + LOG.error("pid={}", this.procId, t); error = t; } finally { - ((HRegionServer) server).remoteProcedureComplete(procId, initiatingMasterActiveTime, error); + ((HRegionServer) server).remoteProcedureComplete(procId, initiatingMasterActiveTime, error, + procResultData); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusConstants.java new file mode 100644 index 000000000000..09b7c34e3318 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusConstants.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.http; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Constants used by the web UI JSP pages. + */ +@InterfaceAudience.Private +public final class RSStatusConstants { + public static final String FILTER = "filter"; + public static final String FILTER_GENERAL = "general"; + public static final String FORMAT = "format"; + public static final String FORMAT_JSON = "json"; + public static final String FORMAT_HTML = "html"; + public static final String PARENT = "parent"; + public static final String BLOCK_CACHE_NAME = "bcn"; + public static final String BLOCK_CACHE_NAME_L1 = "L1"; + public static final String BLOCK_CACHE_V = "bcv"; + public static final String BLOCK_CACHE_V_FILE = "file"; + + private RSStatusConstants() { + // Do not instantiate. + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java index 43bac8791b48..9b8f9833f0b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java @@ -18,43 +18,20 @@ package org.apache.hadoop.hbase.regionserver.http; import java.io.IOException; -import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl; import org.apache.yetus.audience.InterfaceAudience; +/** + * Only kept for redirecting to regionserver.jsp. + */ @InterfaceAudience.Private public class RSStatusServlet extends HttpServlet { private static final long serialVersionUID = 1L; @Override - protected void doGet(HttpServletRequest req, HttpServletResponse resp) - throws ServletException, IOException { - HRegionServer hrs = - (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); - assert hrs != null : "No RS in context!"; - - String format = req.getParameter("format"); - if ("json".equals(format)) { - resp.setContentType("application/json"); - } else { - resp.setContentType("text/html"); - } - - if (!hrs.isOnline()) { - resp.getWriter().write("The RegionServer is initializing!"); - resp.getWriter().close(); - return; - } - - RSStatusTmpl tmpl = new RSStatusTmpl(); - if (format != null) tmpl.setFormat(format); - if (req.getParameter("filter") != null) tmpl.setFilter(req.getParameter("filter")); - if (req.getParameter("bcn") != null) tmpl.setBcn(req.getParameter("bcn")); - if (req.getParameter("bcv") != null) tmpl.setBcv(req.getParameter("bcv")); - tmpl.render(resp.getWriter(), hrs); + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { + response.sendRedirect(request.getContextPath() + "/regionserver.jsp"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java index 2b7e14f9f7aa..73fa29766186 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java @@ -39,9 +39,10 @@ public EventType getEventType() { } @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { PeerProcedureHandler handler = rs.getReplicationSourceService().getPeerProcedureHandler(); handler.claimReplicationQueue(queueId); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java index 094a61dcdd1f..5d4454c14484 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java @@ -43,7 +43,7 @@ public class RefreshPeerCallable extends BaseRSProcedureCallable { private int stage; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { LOG.info("Received a peer change event, peerId=" + peerId + ", type=" + type); PeerProcedureHandler handler = rs.getReplicationSourceService().getPeerProcedureHandler(); switch (type) { @@ -68,6 +68,7 @@ protected void doCall() throws Exception { default: throw new IllegalArgumentException("Unknown peer modification type: " + type); } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index 427fe80b0c36..ed368e18981d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -69,7 +69,7 @@ public class ReplaySyncReplicationWALCallable extends BaseRSProcedureCallable { private final KeyLocker peersLock = new KeyLocker<>(); @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { LOG.info("Received a replay sync replication wals {} event, peerId={}", wals, peerId); if (rs.getReplicationSinkService() != null) { Lock peerLock = peersLock.acquireLock(wals.get(0)); @@ -81,6 +81,7 @@ protected void doCall() throws Exception { peerLock.unlock(); } } + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java index d09c821b9edc..fd35464e686c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java @@ -34,8 +34,9 @@ public class SwitchRpcThrottleRemoteCallable extends BaseRSProcedureCallable { private boolean rpcThrottleEnabled; @Override - protected void doCall() throws Exception { + protected byte[] doCall() throws Exception { rs.getRegionServerRpcQuotaManager().switchRpcThrottle(rpcThrottleEnabled); + return null; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 55b77b6aed1b..3b446826b775 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -212,6 +212,32 @@ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo */ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path, FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException { + return create(conf, fs, path, perm, favoredNodes, true); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *

    + *
  1. overwrite the file if it exists
  2. + *
  3. apply the umask in the configuration (if it is enabled)
  4. + *
  5. use the fs configured buffer size (or 4096 if not set)
  6. + *
  7. use the configured column family replication or default replication if + * {@link ColumnFamilyDescriptorBuilder#DEFAULT_DFS_REPLICATION}
  8. + *
  9. use the default block size
  10. + *
  11. not track progress
  12. + *
+ * @param conf configurations + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param perm permissions + * @param favoredNodes favored data nodes + * @param isRecursiveCreate recursively create parent directories + * @return output stream to the created file + * @throws IOException if the file cannot be created + */ + public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path, + FsPermission perm, InetSocketAddress[] favoredNodes, boolean isRecursiveCreate) + throws IOException { if (fs instanceof HFileSystem) { FileSystem backingFs = ((HFileSystem) fs).getBackingFs(); if (backingFs instanceof DistributedFileSystem) { @@ -230,7 +256,7 @@ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path } } - return CommonFSUtils.create(fs, path, perm, true); + return CommonFSUtils.create(fs, path, perm, true, isRecursiveCreate); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKStringFormatter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKStringFormatter.java new file mode 100644 index 000000000000..a83f07014b05 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKStringFormatter.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Utility used by both Master and Region Server web UI JSP pages. + */ +@InterfaceAudience.Private +public final class ZKStringFormatter { + + private ZKStringFormatter() { + // Do not instantiate. + } + + public static String formatZKString(ZKWatcher zookeeper) { + StringBuilder quorums = new StringBuilder(); + String zkQuorum = zookeeper.getQuorum(); + + if (null == zkQuorum) { + return quorums.toString(); + } + + String[] zks = zkQuorum.split(","); + + if (zks.length == 0) { + return quorums.toString(); + } + + for (int i = 0; i < zks.length; ++i) { + quorums.append(zks[i].trim()); + + if (i != (zks.length - 1)) { + quorums.append("
"); + } + } + + return quorums.toString(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index c900333af9eb..5e6457211344 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -69,7 +69,7 @@ public abstract class AbstractWALRoller extends Thread impl * Configure for the max count of log rolling retry. The real retry count is also limited by the * timeout of log rolling via {@link #WAL_ROLL_WAIT_TIMEOUT} */ - protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; + public static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; protected final ConcurrentMap wals = new ConcurrentHashMap<>(); protected final T abortable; diff --git a/hbase-server/src/main/resources/hbase-webapps/common/taskMonitor.jsp b/hbase-server/src/main/resources/hbase-webapps/common/taskMonitor.jsp new file mode 100644 index 000000000000..3a84615e3c94 --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/common/taskMonitor.jsp @@ -0,0 +1,90 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="org.apache.hadoop.hbase.master.http.MasterStatusConstants" %> + +<% + String filter = (String) request.getAttribute(MasterStatusConstants.FILTER); + if (filter == null) { + filter = "general"; + } + String format = (String) request.getAttribute(MasterStatusConstants.FORMAT); + if (format == null) { + format = "html"; + } + String parent = (String) request.getAttribute(MasterStatusConstants.PARENT); + if (parent == null) { + parent = ""; + } +%> + +<% if (format.equals("json")) { %> + <% request.setAttribute(MasterStatusConstants.FILTER, filter); %> + +<% } else { %> +

Tasks

+ +
+ +
+
+ View as JSON + <% request.setAttribute(MasterStatusConstants.FILTER, "all"); %> + +
+
+ View as JSON + <% request.setAttribute(MasterStatusConstants.FILTER, "general"); %> + +
+
+ View as JSON + <% request.setAttribute(MasterStatusConstants.FILTER, "handler"); %> + +
+
+ View as JSON + <% request.setAttribute(MasterStatusConstants.FILTER, "rpc"); %> + +
+
+ View as JSON + <% request.setAttribute(MasterStatusConstants.FILTER, "operation"); %> + +
+
+
+<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/common/taskMonitorRenderTasks.jsp b/hbase-server/src/main/resources/hbase-webapps/common/taskMonitorRenderTasks.jsp new file mode 100644 index 000000000000..ad606d8e332b --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/common/taskMonitorRenderTasks.jsp @@ -0,0 +1,87 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.*" + import="org.apache.hadoop.hbase.monitoring.*" + import="org.apache.hadoop.util.StringUtils" + import="org.apache.hadoop.hbase.master.http.MasterStatusConstants" %> + +<%! + public static String stateCss(MonitoredTask.State state) { + if (state == MonitoredTask.State.COMPLETE) { + return "alert alert-success"; + } else if (state == MonitoredTask.State.ABORTED) { + return "alert alert-danger"; + } else { + return ""; + } + } +%> + +<% + TaskMonitor taskMonitor = TaskMonitor.get(); + String filter = (String) request.getAttribute(MasterStatusConstants.FILTER); + String format = (String) request.getAttribute(MasterStatusConstants.FORMAT); + if (format == null) { + format = "html"; + } + + List tasks = taskMonitor.getTasks(filter); + long now = System.currentTimeMillis(); + Collections.sort(tasks, (t1, t2) -> Long.compare(t1.getStateTime(), t2.getStateTime())); + boolean first = true; + %> + +<% if (format.equals("json")) { %> +[<% for (MonitoredTask task : tasks) { %><% if (first) { %><% first = false;%><% } else { %>,<% } %><%= task.toJSON() %><% } %>] +<% } else { %> + <% if (tasks.isEmpty()) { %> +

No tasks currently running on this node.

+ <% } else { %> + + + + + + + + + <% for (MonitoredTask task : tasks) { %> + + + + + + + + <% } %> +
Start TimeDescriptionStateStatusCompletion Time
<%= new Date(task.getStartTime()) %><%= task.getDescription() %><%= task.getState() %> + (since <%= StringUtils.formatTimeDiff(now, task.getStateTime()) %> ago) + <%= task.getStatus() %> + (since <%= StringUtils.formatTimeDiff(now, task.getStatusTime()) %> + ago) + <% if (task.getCompletionTimestamp() < 0) { %> + <%= task.getState() %> + <% } else { %> + <%= new Date(task.getCompletionTimestamp()) %> (since <%= StringUtils.formatTimeDiff(now, task.getCompletionTimestamp()) %> ago) + <% } %> +
+ <% } %> +<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/assignmentManagerStatus.jsp b/hbase-server/src/main/resources/hbase-webapps/master/assignmentManagerStatus.jsp new file mode 100644 index 000000000000..0966d04316b4 --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/assignmentManagerStatus.jsp @@ -0,0 +1,117 @@ +<%-- +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hadoop.hbase.quotas.QuotaUtil" + import="org.apache.hadoop.hbase.HBaseConfiguration" + import="org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer" + import="org.apache.hadoop.hbase.master.assignment.AssignmentManager" + import="org.apache.hadoop.hbase.master.RegionState" + import="java.util.SortedSet" + import="org.apache.hadoop.hbase.master.assignment.RegionStates" + import="org.apache.hadoop.hbase.client.RegionInfoDisplay" %> +<% + HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + AssignmentManager assignmentManager = master.getAssignmentManager(); + int limit = 100; + + SortedSet rit = assignmentManager.getRegionStates().getRegionsInTransitionOrderedByTimestamp(); + +if (!rit.isEmpty()) { + long currentTime = System.currentTimeMillis(); + AssignmentManager.RegionInTransitionStat ritStat = assignmentManager.computeRegionInTransitionStat(); + + int numOfRITs = rit.size(); + int ritsPerPage = Math.min(5, numOfRITs); + int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); +%> +
+

Regions in Transition

+

<%= numOfRITs %> region(s) in transition. + <% if(ritStat.hasRegionsTwiceOverThreshold()) { %> + + <% } else if ( ritStat.hasRegionsOverThreshold()) { %> + + <% } else { %> + + <% } %> + <%= ritStat.getTotalRITsOverThreshold() %> region(s) in transition for + more than <%= ritStat.getRITThreshold() %> milliseconds. + +

+
+
+ <% int recordItr = 0; %> + <% for (RegionState rs : rit) { %> + <% if((recordItr % ritsPerPage) == 0 ) { %> + <% if(recordItr == 0) { %> +
+ <% } else { %> +
+ <% } %> + + + <% } %> + + <% if(ritStat.isRegionTwiceOverThreshold(rs.getRegion())) { %> + + <% } else if ( ritStat.isRegionOverThreshold(rs.getRegion())) { %> + + <% } else { %> + + <% } %> + <% + String retryStatus = "0"; + RegionStates.RegionFailedOpen regionFailedOpen = assignmentManager + .getRegionStates().getFailedOpen(rs.getRegion()); + if (regionFailedOpen != null) { + retryStatus = Integer.toString(regionFailedOpen.getRetries()); + } else if (rs.getState() == RegionState.State.FAILED_OPEN) { + retryStatus = "Failed"; + } + %> + + + + + <% recordItr++; %> + <% if((recordItr % ritsPerPage) == 0) { %> +
RegionStateRIT time (ms) Retries
<%= rs.getRegion().getEncodedName() %> + <%= RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(rs, + assignmentManager.getConfiguration()) %><%= (currentTime - rs.getStamp()) %> <%= retryStatus %>
+
+ <% } %> + <% } %> + + <% if((recordItr % ritsPerPage) != 0) { %> + <% for (; (recordItr % ritsPerPage) != 0 ; recordItr++) { %> + + <% } %> + +
+ <% } %> +
+ + + +
+
+<% } %> + diff --git a/hbase-server/src/main/resources/hbase-webapps/master/backupMasterStatus.jsp b/hbase-server/src/main/resources/hbase-webapps/master/backupMasterStatus.jsp new file mode 100644 index 000000000000..cada34472c95 --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/backupMasterStatus.jsp @@ -0,0 +1,66 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.*" + import="org.apache.hadoop.hbase.ServerName" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hbase.thirdparty.com.google.common.base.Preconditions" %> +<% + HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + if (!master.isActiveMaster()) { + + ServerName active_master = master.getActiveMaster().orElse(null); + Preconditions.checkState(active_master != null, "Failed to retrieve active master's ServerName!"); + int activeInfoPort = master.getActiveMasterInfoPort(); +%> +
+ +
+

Current Active Master: <%= active_master.getHostname() %>

+ <% } else { %> +

Backup Masters

+ + + + + + + + <% + Collection backup_masters = master.getBackupMasters(); + ServerName [] backupServerNames = backup_masters.toArray(new ServerName[backup_masters.size()]); + Arrays.sort(backupServerNames); + for (ServerName serverName : backupServerNames) { + int infoPort = master.getBackupMasterInfoPort(serverName); + %> + + + + + + <% } %> + +
ServerNamePortStart Time
<%= serverName.getHostname() %> + <%= serverName.getPort() %><%= new Date(serverName.getStartCode()) %>
Total:<%= backupServerNames.length %>
+<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp new file mode 100644 index 000000000000..b965241afe2a --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp @@ -0,0 +1,82 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> + +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.*" + import="org.apache.hadoop.hbase.NamespaceDescriptor" + import="org.apache.hadoop.hbase.TableName" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hadoop.hbase.quotas.QuotaUtil" + import="org.apache.hadoop.hbase.security.access.PermissionStorage" + import="org.apache.hadoop.hbase.security.visibility.VisibilityConstants" + import="org.apache.hadoop.hbase.tool.CanaryTool" + import="org.apache.hadoop.hbase.client.*" + import="org.apache.hadoop.hbase.master.http.MasterStatusConstants" %> + +<% + HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + + Map frags = (Map) request.getAttribute(MasterStatusConstants.FRAGS); + + List sysTables = master.isInitialized() ? + master.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) : null; +%> + +<%if (sysTables != null && sysTables.size() > 0) { %> + + + + <% if (frags != null) { %> + + <% } %> + + + <% for (TableDescriptor systemTable : sysTables) { %> + + <% TableName tableName = systemTable.getTableName();%> + + <% if (frags != null) { %> + + <% } %> + <% String description = null; + if (tableName.equals(TableName.META_TABLE_NAME)){ + description = "The hbase:meta table holds references to all User Table regions."; + } else if (tableName.equals(CanaryTool.DEFAULT_WRITE_TABLE_NAME)){ + description = "The hbase:canary table is used to sniff the write availability of" + + " each regionserver."; + } else if (tableName.equals(PermissionStorage.ACL_TABLE_NAME)){ + description = "The hbase:acl table holds information about acl."; + } else if (tableName.equals(VisibilityConstants.LABELS_TABLE_NAME)){ + description = "The hbase:labels table holds information about visibility labels."; + } else if (tableName.equals(QuotaUtil.QUOTA_TABLE_NAME)){ + description = "The hbase:quota table holds quota information about number" + + " or size of requests in a given time frame."; + } else if (tableName.equals(TableName.valueOf("hbase:rsgroup"))){ + description = "The hbase:rsgroup table holds information about regionserver groups."; + } else if (tableName.equals(TableName.valueOf("hbase:replication"))) { + description = "The hbase:replication table tracks cross cluster replication through " + + "WAL file offsets."; + } + %> + + + <% } %> +
Table NameFrag.Description
<%= tableName %><%= frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) + "%" : "n/a" %><%= description %>
+<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/deadRegionServers.jsp b/hbase-server/src/main/resources/hbase-webapps/master/deadRegionServers.jsp new file mode 100644 index 000000000000..436801f544af --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/deadRegionServers.jsp @@ -0,0 +1,89 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="org.apache.hadoop.hbase.ServerName" + import="java.util.*" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hadoop.hbase.rsgroup.RSGroupUtil" + import="org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager" + import="org.apache.hadoop.hbase.master.DeadServer" + import="org.apache.hadoop.hbase.rsgroup.RSGroupInfo" + import="org.apache.hadoop.hbase.master.ServerManager" %> +<% + HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER); + + ServerManager serverManager = master.getServerManager(); + + Set deadServers = null; + + if (master.isActiveMaster()) { + if (serverManager != null) { + deadServers = serverManager.getDeadServers().copyServerNames(); + } + } +%> + +<% if (deadServers != null && deadServers.size() > 0) { %> +

Dead Region Servers

+ + + + + + <% if (!master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null) { %> + <% if (RSGroupUtil.isRSGroupEnabled(master.getConfiguration())) { %> + + <% } %> + <% } %> + +<% + RSGroupInfoManager inMgr = null; + DeadServer deadServerUtil = master.getServerManager().getDeadServers(); + ServerName [] deadServerNames = deadServers.toArray(new ServerName[deadServers.size()]); + Arrays.sort(deadServerNames); + if (!master.isInMaintenanceMode() && master.getMasterCoprocessorHost() != null + && RSGroupUtil.isRSGroupEnabled(master.getConfiguration())) { + inMgr = master.getRSGroupInfoManager(); + } + for (ServerName deadServerName: deadServerNames) { + String rsGroupName = null; + if (inMgr != null){ + RSGroupInfo groupInfo = inMgr.getRSGroupOfServer(deadServerName.getAddress()); + rsGroupName = groupInfo == null ? RSGroupInfo.DEFAULT_GROUP : groupInfo.getName(); + } + %> + + + + + <% if (rsGroupName != null) { %> + + <% } %> + + <% + } + %> + + + + + +
ServerNameStop timeRSGroup
<%= deadServerName %><%= deadServerUtil.getTimeOfDeath(deadServerName) %><%= rsGroupName %>
Total: servers: <%= deadServers.size() %>
+<% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp index a6c6c2d17e66..210438ba7fed 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp @@ -383,7 +383,7 @@ private static String formatServerName(HMaster master, int infoPort = master.getRegionServerInfoPort(serverName); if (infoPort > 0) { return "" + sn + ""; + infoPort + "/regionserver.jsp>" + sn + ""; } else { return "" + sn + ""; } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/header.jsp b/hbase-server/src/main/resources/hbase-webapps/master/header.jsp index f2656612cf86..dba1b611fc24 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/header.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/header.jsp @@ -43,13 +43,13 @@