diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index b2e29ba7217c..197eacc03b62 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public final class ClusterMetricsBuilder { public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) { @@ -105,7 +105,7 @@ public static ClusterMetrics toClusterMetrics(ClusterStatusProtos.ClusterStatus .collect(Collectors.toList())) .setUnknownServerNames(proto.getUnknownServersList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) - .setBackerMasterNames(proto.getBackupMastersList().stream().map(ProtobufUtil::toServerName) + .setBackupMasterNames(proto.getBackupMastersList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) .setRegionsInTransition(proto.getRegionsInTransitionList().stream() .map(ClusterStatusProtos.RegionInTransition::getRegionState).map(RegionState::convert) @@ -256,7 +256,7 @@ public static ClusterMetricsBuilder newBuilder() { private String hbaseVersion; private List deadServerNames = Collections.emptyList(); private List unknownServerNames = Collections.emptyList(); - private Map liveServerMetrics = new TreeMap<>(); + private Map liveServerMetrics = Collections.emptyMap(); @Nullable private ServerName masterName; private List backupMasterNames = Collections.emptyList(); @@ -292,7 +292,7 @@ public ClusterMetricsBuilder setUnknownServerNames(List value) { } public ClusterMetricsBuilder setLiveServerMetrics(Map value) { - liveServerMetrics.putAll(value); + this.liveServerMetrics = new TreeMap<>(value); return this; } @@ -301,7 +301,7 @@ public ClusterMetricsBuilder setMasterName(ServerName value) { return this; } - public ClusterMetricsBuilder setBackerMasterNames(List value) { + public ClusterMetricsBuilder setBackupMasterNames(List value) { this.backupMasterNames = value; return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java index d6d277808838..3f22f0f04e20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.coprocessor.ClientMetaCoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.security.access.NoopAccessChecker; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; +import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil; import org.apache.hadoop.hbase.util.DNS; import org.apache.hadoop.hbase.util.OOMEChecker; import org.apache.hadoop.hbase.util.ReservoirSample; @@ -389,7 +391,13 @@ public UpdateConfigurationResponse updateConfiguration(RpcController controller, requirePermission("updateConfiguration", Permission.Action.ADMIN); this.server.updateConfiguration(); - clientMetaCoprocessorHost = new ClientMetaCoprocessorHost(getConfiguration()); + if ( + CoprocessorConfigurationUtil.checkConfigurationChange(clientMetaCoprocessorHost, + getConfiguration(), CoprocessorHost.CLIENT_META_COPROCESSOR_CONF_KEY) + ) { + LOG.info("Updating client meta coprocessors, because the configuration has changed."); + clientMetaCoprocessorHost = new ClientMetaCoprocessorHost(getConfiguration()); + } } catch (Exception e) { throw new ServiceException(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index d0e451508b43..60e484603dc5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; +import org.apache.hadoop.hbase.ClusterMetricsBuilder; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -1672,7 +1673,15 @@ default void preGetClusterMetrics(ObserverContext * Called after get cluster status. */ default void postGetClusterMetrics(ObserverContext ctx, - ClusterMetrics status) throws IOException { + ClusterMetrics metrics) throws IOException { + } + + /** + * Called after get cluster status. + */ + default void postGetClusterMetrics(ObserverContext ctx, + ClusterMetricsBuilder metricsBuilder) throws IOException { + postGetClusterMetrics(ctx, metricsBuilder.build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/kubernetes/ExternalKubernetesCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/kubernetes/ExternalKubernetesCoprocessor.java new file mode 100644 index 000000000000..ae0136362c5f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/kubernetes/ExternalKubernetesCoprocessor.java @@ -0,0 +1,411 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.kubernetes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.TreeMap; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.ClusterMetrics; +import org.apache.hadoop.hbase.ClusterMetricsBuilder; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerMetrics; +import org.apache.hadoop.hbase.ServerMetricsBuilder; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.coprocessor.ClientMetaCoprocessor; +import org.apache.hadoop.hbase.coprocessor.ClientMetaCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ClientMetaObserver; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.MasterObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.ObserverRpcCallContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public class ExternalKubernetesCoprocessor implements ClientMetaCoprocessor, MasterCoprocessor, + RegionCoprocessor, ClientMetaObserver, MasterObserver, RegionObserver { + private static final Logger LOG = LoggerFactory.getLogger(ExternalKubernetesCoprocessor.class); + + private static final String KUBERNETES_HEADER = HConstants.CLIENT_HEADER_PREFIX + "kubernetes"; + + private volatile ExternalMapping mapping; + + public ExternalKubernetesCoprocessor() { + } + + @Override + public Optional getClientMetaObserver() { + return Optional.of(this); + } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + mapping = new ExternalMapping(env.getConfiguration()); + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + ExternalMapping current = mapping; + + if (current != null) { + current.close(); + } + } + + private static boolean isInternalClient(ObserverContext ctx) { + Optional rpcCallContext = ctx.getRpcCallContext(); + + if (rpcCallContext.isEmpty()) { + return true; + } + + byte[] value = rpcCallContext.get().getAttributes().get(KUBERNETES_HEADER); + + return !Objects.equals(Bytes.toString(value), "true"); + } + + private String map(String hostname) { + ExternalMapping current = mapping; + + if (current == null) { + LOG.warn("External hostname mapping is not configured."); + return hostname; + } + + String newHostname = current.get(hostname); + + if (newHostname == null) { + LOG.warn("Missing external hostname mapping for '{}'.", hostname); + return hostname; + } + + return newHostname; + } + + private Address transformAddress(Address address) { + String newHostname = map(address.getHostName()); + HostAndPort hostAndPort = HostAndPort.fromString(newHostname); + + if (hostAndPort.hasPort()) { + return Address.fromParts(hostAndPort.getHost(), hostAndPort.getPort()); + } else { + return Address.fromParts(hostAndPort.getHost(), address.getPort()); + } + } + + private ServerName transformServerName(ServerName serverName) { + Address newAddress = transformAddress(serverName.getAddress()); + return ServerName.valueOf(newAddress, serverName.getStartCode()); + } + + private List transformServerNames(List serverNames) { + if (serverNames == null) { + return null; + } + + List newServerNames = new ArrayList<>(serverNames.size()); + + for (ServerName serverName : serverNames) { + ServerName newServerName = transformServerName(serverName); + newServerNames.add(newServerName); + } + + return newServerNames; + } + + private HRegionLocation transformRegionLocation(HRegionLocation regionLocation) { + ServerName newServerName = transformServerName(regionLocation.getServerName()); + return new HRegionLocation(regionLocation.getRegion(), newServerName, + regionLocation.getSeqNum()); + } + + @Override + public ServerName postGetActiveMaster(ObserverContext ctx, + ServerName serverName) throws IOException { + if (isInternalClient(ctx)) { + return serverName; + } + + return transformServerName(serverName); + } + + @Override + public Map postGetMasters( + ObserverContext ctx, Map serverNames) + throws IOException { + if (isInternalClient(ctx)) { + return serverNames; + } + + Map newServerNames = new LinkedHashMap<>(serverNames.size()); + + serverNames + .forEach((serverName, active) -> newServerNames.put(transformServerName(serverName), active)); + + return newServerNames; + } + + @Override + public List postGetMetaLocations( + ObserverContext ctx, List metaLocations) + throws IOException { + if (isInternalClient(ctx)) { + return metaLocations; + } + + return metaLocations.stream().map(this::transformRegionLocation).collect(Collectors.toList()); + } + + @Override + public List postGetBootstrapNodes( + ObserverContext ctx, List bootstrapNodes) + throws IOException { + if (isInternalClient(ctx)) { + return bootstrapNodes; + } + + return bootstrapNodes.stream().map(this::transformServerName).collect(Collectors.toList()); + } + + @Override + public void postGetClusterMetrics(ObserverContext ctx, + ClusterMetricsBuilder metricsBuilder) throws IOException { + if (isInternalClient(ctx)) { + return; + } + + ClusterMetrics metrics = metricsBuilder.build(); + + ServerName masterName = metrics.getMasterName(); + + if (masterName != null) { + metricsBuilder.setMasterName(transformServerName(masterName)); + } + + List newBackupMasterNames = transformServerNames(metrics.getBackupMasterNames()); + metricsBuilder.setBackupMasterNames(newBackupMasterNames); + + List newServersName = transformServerNames(metrics.getServersName()); + metricsBuilder.setServerNames(newServersName); + + Map liveServerMetrics = metrics.getLiveServerMetrics(); + Map newLiveServerMetrics = new TreeMap<>(); + + for (ServerMetrics liveServerMetric : liveServerMetrics.values()) { + ServerName newServerName = transformServerName(liveServerMetric.getServerName()); + ServerMetrics newLiveServerMetric = + ServerMetricsBuilder.newBuilder(newServerName).setVersion(liveServerMetric.getVersion()) + .setVersionNumber(liveServerMetric.getVersionNumber()) + .setRequestCountPerSecond(liveServerMetric.getRequestCountPerSecond()) + .setReadRequestCount(liveServerMetric.getReadRequestsCount()) + .setWriteRequestCount(liveServerMetric.getWriteRequestsCount()) + .setUsedHeapSize(liveServerMetric.getUsedHeapSize()) + .setMaxHeapSize(liveServerMetric.getMaxHeapSize()) + .setInfoServerPort(liveServerMetric.getInfoServerPort()) + .setReplicationLoadSources(liveServerMetric.getReplicationLoadSourceList()) + .setReplicationLoadSink(liveServerMetric.getReplicationLoadSink()) + .setRegionMetrics(new ArrayList<>(liveServerMetric.getRegionMetrics().values())) + .setUserMetrics(new ArrayList<>(liveServerMetric.getUserMetrics().values())) + .setCoprocessorNames(new ArrayList<>(liveServerMetric.getCoprocessorNames())) + .setReportTimestamp(liveServerMetric.getReportTimestamp()) + .setLastReportTimestamp(liveServerMetric.getLastReportTimestamp()) + .setTasks(liveServerMetric.getTasks()) + .setRegionCachedInfo(liveServerMetric.getRegionCachedInfo()).build(); + + newLiveServerMetrics.put(newServerName, newLiveServerMetric); + } + + metricsBuilder.setLiveServerMetrics(newLiveServerMetrics); + + List newDeadServerNames = transformServerNames(metrics.getDeadServerNames()); + metricsBuilder.setDeadServerNames(newDeadServerNames); + + List newDecommissionedServerNames = + transformServerNames(metrics.getDecommissionedServerNames()); + metricsBuilder.setDecommissionedServerNames(newDecommissionedServerNames); + + List newUnknownServerNames = transformServerNames(metrics.getUnknownServerNames()); + metricsBuilder.setUnknownServerNames(newUnknownServerNames); + + List regionStatesInTransition = metrics.getRegionStatesInTransition(); + List newRegionStatesInTransition = + new ArrayList<>(regionStatesInTransition.size()); + + for (RegionState regionStateInTransition : regionStatesInTransition) { + ServerName serverName = regionStateInTransition.getServerName(); + ServerName newServerName = transformServerName(serverName); + + RegionState newRegionState = new RegionState(regionStateInTransition.getRegion(), + regionStateInTransition.getState(), regionStateInTransition.getStamp(), newServerName, + regionStateInTransition.getRitDuration()); + newRegionStatesInTransition.add(newRegionState); + } + + metricsBuilder.setRegionsInTransition(newRegionStatesInTransition); + } + + private static boolean isInfoFamily(Cell cell) { + return Bytes.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), + HConstants.CATALOG_FAMILY, 0, HConstants.CATALOG_FAMILY.length) == 0; + } + + private static boolean isServerQualifier(Cell cell) { + return Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), HConstants.SERVER_QUALIFIER, 0, HConstants.SERVER_QUALIFIER.length) + == 0; + } + + private static boolean isSnQualifier(Cell cell) { + return Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), HConstants.SERVERNAME_QUALIFIER, 0, + HConstants.SERVERNAME_QUALIFIER.length) == 0; + } + + private static byte[] copyRow(Cell cell) { + return Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + } + + private Cell transformServerCell(Cell cell) { + byte[] row = copyRow(cell); + + String value = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + Address address = Address.fromString(value); + + Address newAddress = transformAddress(address); + byte[] newValue = Bytes.toBytes(newAddress.toString()); + + return new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, + cell.getTimestamp(), newValue); + } + + private Cell transformSnCell(Cell cell) { + byte[] row = copyRow(cell); + + String value = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + ServerName serverName = ServerName.valueOf(value); + + ServerName newServerName = transformServerName(serverName); + byte[] newValue = Bytes.toBytes(newServerName.toString()); + + return new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.SERVERNAME_QUALIFIER, + cell.getTimestamp(), newValue); + } + + private Cell transformCell(Cell cell) { + if (!isInfoFamily(cell) || cell.getType() != Cell.Type.Put) { + return cell; + } + + if (isServerQualifier(cell)) { + return transformServerCell(cell); + } else if (isSnQualifier(cell)) { + return transformSnCell(cell); + } else { + return cell; + } + } + + @Override + public void postGetOp(ObserverContext ctx, Get get, + List result) throws IOException { + if (isInternalClient(ctx)) { + return; + } + + TableName tableName = ctx.getEnvironment().getRegionInfo().getTable(); + + if (!TableName.isMetaTableName(tableName)) { + return; + } + + ListIterator iterator = result.listIterator(); + + while (iterator.hasNext()) { + Cell cell = iterator.next(); + Cell replacedCell = transformCell(cell); + + if (cell != replacedCell) { + iterator.set(replacedCell); + } + } + } + + @Override + public boolean postScannerNext(ObserverContext ctx, + InternalScanner s, List result, int limit, boolean hasNext) throws IOException { + if (isInternalClient(ctx)) { + return hasNext; + } + + TableName tableName = ctx.getEnvironment().getRegionInfo().getTable(); + + if (!TableName.isMetaTableName(tableName)) { + return hasNext; + } + + for (Result r : result) { + Cell[] cells = r.rawCells(); + + for (int i = 0; i < cells.length; i++) { + cells[i] = transformCell(cells[i]); + } + } + + return hasNext; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/kubernetes/ExternalMapping.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/kubernetes/ExternalMapping.java new file mode 100644 index 000000000000..cf59d70f5578 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/kubernetes/ExternalMapping.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.kubernetes; + +import com.google.errorprone.annotations.RestrictedApi; +import java.io.Closeable; +import java.io.IOException; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardWatchEventKinds; +import java.nio.file.WatchEvent; +import java.nio.file.WatchKey; +import java.nio.file.WatchService; +import java.util.Properties; +import org.apache.hadoop.conf.Configuration; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ExternalMapping implements Closeable { + private static final Logger LOG = LoggerFactory.getLogger(ExternalMapping.class); + + private static final String MAPPING_KEY = "hbase.kubernetes.external.mapping"; + + private final Path mappingPath; + private final WatchService watchService; + private final WatchKey watchKey; + + private volatile Properties mapping; + + public ExternalMapping(Configuration configuration) throws IOException { + String mappingFile = configuration.get(MAPPING_KEY); + + if (mappingFile == null) { + throw new IOException(ExternalKubernetesCoprocessor.class.getSimpleName() + + " is in use, but missing '" + MAPPING_KEY + "' configuration property."); + } + + mappingPath = Paths.get(mappingFile); + Path parent = mappingPath.getParent(); + + if (parent == null) { + throw new IOException("Mapping file '" + mappingPath + "' must be absolute."); + } + + watchService = mappingPath.getFileSystem().newWatchService(); + + try { + watchKey = parent.register(watchService, StandardWatchEventKinds.ENTRY_CREATE, + StandardWatchEventKinds.ENTRY_DELETE, StandardWatchEventKinds.ENTRY_MODIFY); + + read(); + } catch (Throwable e) { + watchService.close(); + throw e; + } + } + + @RestrictedApi(explanation = "Should only be called in tests", + allowedOnPath = ".*/TestExternalMapping.java") + protected WatchService getWatchService() { + return watchService; + } + + private void read() throws IOException { + LOG.debug("Reading external mapping file '{}'.", mappingPath); + + Properties mapping = new Properties(); + + try (Reader reader = Files.newBufferedReader(mappingPath, StandardCharsets.UTF_8)) { + mapping.load(reader); + } + + this.mapping = mapping; + } + + private void processEvents() { + boolean changed = false; + + for (WatchEvent event : watchKey.pollEvents()) { + Path path = (Path) event.context(); + + if (path == null) { + continue; + } + + if (mappingPath.getFileName().equals(path.getFileName())) { + changed = true; + break; + } + } + + watchKey.reset(); + + if (!changed) { + return; + } + + LOG.debug("External mapping file '{}' has changed.", mappingPath); + + if (!Files.isRegularFile(mappingPath)) { + LOG.warn("Could not refresh external mapping file '{}'," + + "because it is not a regular file anymore. Using previous mapping.", mappingPath); + return; + } + + try { + read(); + } catch (IOException e) { + LOG.warn("Could not refresh external mapping file '{}'. Using previous mapping.", mappingPath, + e); + } + } + + public String get(String key) { + processEvents(); + + if (mapping == null) { + return null; + } + + return mapping.getProperty(key); + } + + @Override + public void close() throws IOException { + watchService.close(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c997f1c6e822..278c782c6c47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3021,12 +3021,15 @@ public void reloadRegionServerQuotas() { .forEach(sn -> procedureExecutor.submitProcedure(new ReloadQuotasProcedure(sn))); } - public ClusterMetrics getClusterMetricsWithoutCoprocessor() throws InterruptedIOException { + public ClusterMetrics getClusterMetricsWithoutCoprocessor() { return getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class)); } - public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet