Skip to content

Commit efad65a

Browse files
committed
Merge branch 'master' into HDDS-13224
2 parents c721c6d + 2ff58b3 commit efad65a

File tree

12 files changed

+161
-56
lines changed

12 files changed

+161
-56
lines changed

hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
import java.io.File;
2626
import java.io.IOException;
27+
import java.net.Inet6Address;
2728
import java.net.InetAddress;
2829
import java.net.NetworkInterface;
2930
import java.nio.file.Path;
@@ -108,8 +109,8 @@ public static List<InetAddress> getValidInetsForCurrentHost()
108109
InetAddress addr = enumAdds.nextElement();
109110

110111
String hostAddress = addr.getHostAddress();
111-
if (!INVALID_IPS.contains(hostAddress)
112-
&& ipValidator.isValid(hostAddress)) {
112+
if (!INVALID_IPS.contains(hostAddress) && ipValidator.isValid(hostAddress)
113+
&& !isScopedOrMaskingIPv6Address(addr)) {
113114
LOG.info("Adding ip:{},host:{}", hostAddress, addr.getHostName());
114115
hostIps.add(addr);
115116
} else {
@@ -122,6 +123,41 @@ public static List<InetAddress> getValidInetsForCurrentHost()
122123
return hostIps;
123124
}
124125

126+
/**
127+
* Determines if the supplied address is an IPv6 address, with a defined scope-id and/or with a defined prefix length.
128+
* <p>
129+
* This method became necessary after Commons Validator was upgraded from 1.6 version to 1.10. In 1.10 version the
130+
* IPv6 addresses with a scope-id and/or with a prefix specifier became valid IPv6 addresses, but as these features
131+
* are changing the string representation to do not represent only the 16 octet that specifies the address, the
132+
* string representation can not be used as it is as a SAN extension in X.509 anymore as in RFC-5280 this type of
133+
* Subject Alternative Name is exactly 4 octets in case of an IPv4 address, and 16 octets in case of an IPv6 address.
134+
* BouncyCastle does not have support to deal with these in an IPAddress typed GeneralName, so we need to keep the
135+
* previous behaviour, and skip IPv6 addresses with a prefix length and/or a scope-id.
136+
* <p>
137+
* According to RFC-4007 and the InetAddress contract the scope-id is at the end of the address' strin
138+
* representation, separated by a '%' character from the address.
139+
* According to RFC-4632 there is a possibility to specify a prefix length at the end of the address to specify
140+
* routing related information. RFC-4007 specifies the prefix length to come after the scope-id.
141+
* <p>
142+
*
143+
* @param addr the InetAddress to check
144+
* @return if the InetAddress is an IPv6 address and if so it contains a scope-id and/or a prefix length.
145+
* @see <a href="https://datatracker.ietf.org/doc/html/rfc4007">RFC-4007 - Scoped IPv6 Addresses</a>
146+
* @see <a href="https://datatracker.ietf.org/doc/html/rfc4632#section-5.1">RFC-4632 - CIDR addressing strategy -
147+
* prefix length</a>
148+
* @see <a href="https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.6">RFC-5280 - SAN description</a>
149+
* @see <a href="https://issues.apache.org/jira/browse/VALIDATOR-445">VALIDATOR-445 - Commons Validator change</a>
150+
* @see <a href="https://github.com/bcgit/bc-java/issues/2024">BouncyCastle issue discussion about scoped IPv6
151+
* addresses</a>
152+
*/
153+
public static boolean isScopedOrMaskingIPv6Address(InetAddress addr) {
154+
if (addr instanceof Inet6Address) {
155+
String hostAddress = addr.getHostAddress();
156+
return hostAddress.contains("/") || hostAddress.contains("%");
157+
}
158+
return false;
159+
}
160+
125161
/**
126162
* Convert list of string encoded certificates to list of X509Certificate.
127163
* @param pemEncodedCerts

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeScannerMetrics.java renamed to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/BackgroundVolumeScannerMetrics.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,12 @@
2626
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
2727

2828
/**
29-
* This class captures the Storage Volume Scanner Metrics.
29+
* This class captures the Background Storage Volume Scanner Metrics.
3030
**/
3131
@InterfaceAudience.Private
32-
@Metrics(about = "Storage Volume Scanner Metrics", context = "dfs")
33-
public class StorageVolumeScannerMetrics {
34-
public static final String SOURCE_NAME = StorageVolumeScannerMetrics.class.getSimpleName();
32+
@Metrics(about = "Background Volume Scanner Metrics", context = "dfs")
33+
public class BackgroundVolumeScannerMetrics {
34+
public static final String SOURCE_NAME = BackgroundVolumeScannerMetrics.class.getSimpleName();
3535

3636
@Metric("number of volumes scanned in the last iteration")
3737
private MutableGaugeLong numVolumesScannedInLastIteration;
@@ -49,12 +49,12 @@ public class StorageVolumeScannerMetrics {
4949
"since the last iteration had not elapsed")
5050
private MutableCounterLong numIterationsSkipped;
5151

52-
public StorageVolumeScannerMetrics() {
52+
public BackgroundVolumeScannerMetrics() {
5353
}
5454

55-
public static StorageVolumeScannerMetrics create() {
55+
public static BackgroundVolumeScannerMetrics create() {
5656
MetricsSystem ms = DefaultMetricsSystem.instance();
57-
return ms.register(SOURCE_NAME, "Storage Volume Scanner Metrics", new StorageVolumeScannerMetrics());
57+
return ms.register(SOURCE_NAME, "Background Volume Scanner Metrics", new BackgroundVolumeScannerMetrics());
5858
}
5959

6060
/**

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ public class StorageVolumeChecker {
6565

6666
private AsyncChecker<Boolean, VolumeCheckResult> delegateChecker;
6767

68-
private final StorageVolumeScannerMetrics metrics;
68+
private final BackgroundVolumeScannerMetrics metrics;
6969

7070
/**
7171
* Max allowed time for a disk check in milliseconds. If the check
@@ -105,7 +105,7 @@ public class StorageVolumeChecker {
105105
public StorageVolumeChecker(ConfigurationSource conf, Timer timer,
106106
String threadNamePrefix) {
107107

108-
metrics = StorageVolumeScannerMetrics.create();
108+
metrics = BackgroundVolumeScannerMetrics.create();
109109

110110
this.timer = timer;
111111

@@ -441,7 +441,7 @@ void setDelegateChecker(
441441
}
442442

443443
@VisibleForTesting
444-
public StorageVolumeScannerMetrics getMetrics() {
444+
public BackgroundVolumeScannerMetrics getMetrics() {
445445
return metrics;
446446
}
447447
}

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ public void testPeriodicVolumeChecker(TestInfo testInfo) throws Exception {
7979
FakeTimer timer = new FakeTimer();
8080

8181
StorageVolumeChecker volumeChecker = new StorageVolumeChecker(conf, timer, "");
82-
StorageVolumeScannerMetrics metrics = volumeChecker.getMetrics();
82+
BackgroundVolumeScannerMetrics metrics = volumeChecker.getMetrics();
8383

8484
try {
8585
volumeChecker.registerVolumeSet(new ImmutableVolumeSet(makeVolumes(2, HEALTHY)));

hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java

Lines changed: 26 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -308,16 +308,13 @@ private void startBalancingThread(int nextIterationIndex,
308308
}
309309

310310
/**
311-
* Validates balancer's state based on the specified expectedRunning.
311+
* Validates balancer's eligibility based on SCM state.
312312
* Confirms SCM is leader-ready and out of safe mode.
313313
*
314-
* @param expectedRunning true if ContainerBalancer is expected to be
315-
* running, else false
316314
* @throws IllegalContainerBalancerStateException if SCM is not
317-
* leader-ready, is in safe mode, or state does not match the specified
318-
* expected state
315+
* leader-ready or is in safe mode
319316
*/
320-
private void validateState(boolean expectedRunning)
317+
private void validateEligibility()
321318
throws IllegalContainerBalancerStateException {
322319
if (!scmContext.isLeaderReady()) {
323320
LOG.warn("SCM is not leader ready");
@@ -328,6 +325,19 @@ private void validateState(boolean expectedRunning)
328325
LOG.warn("SCM is in safe mode");
329326
throw new IllegalContainerBalancerStateException("SCM is in safe mode");
330327
}
328+
}
329+
330+
/**
331+
* Validates balancer's state based on the specified expectedRunning.
332+
*
333+
* @param expectedRunning true if ContainerBalancer is expected to be
334+
* running, else false
335+
* @throws IllegalContainerBalancerStateException if state does not
336+
* match the specified expected state
337+
*/
338+
private void validateState(boolean expectedRunning)
339+
throws IllegalContainerBalancerStateException {
340+
validateEligibility();
331341
if (!expectedRunning && !canBalancerStart()) {
332342
throw new IllegalContainerBalancerStateException(
333343
"Expect ContainerBalancer as not running state" +
@@ -387,18 +397,22 @@ private static void blockTillTaskStop(Thread balancingThread) {
387397
*/
388398
public void stopBalancer()
389399
throws IOException, IllegalContainerBalancerStateException {
390-
Thread balancingThread;
400+
Thread balancingThread = null;
391401
lock.lock();
392402
try {
393-
validateState(true);
403+
validateEligibility();
394404
saveConfiguration(config, false, 0);
395-
LOG.info("Trying to stop ContainerBalancer service.");
396-
task.stop();
397-
balancingThread = currentBalancingThread;
405+
if (isBalancerRunning()) {
406+
LOG.info("Trying to stop ContainerBalancer service.");
407+
task.stop();
408+
balancingThread = currentBalancingThread;
409+
}
398410
} finally {
399411
lock.unlock();
400412
}
401-
blockTillTaskStop(balancingThread);
413+
if (balancingThread != null) {
414+
blockTillTaskStop(balancingThread);
415+
}
402416
}
403417

404418
public void saveConfiguration(ContainerBalancerConfiguration configuration,

hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL;
2121
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT;
2222
import static org.assertj.core.api.Assertions.assertThat;
23+
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
2324
import static org.junit.jupiter.api.Assertions.assertEquals;
2425
import static org.junit.jupiter.api.Assertions.assertFalse;
2526
import static org.junit.jupiter.api.Assertions.assertSame;
@@ -128,6 +129,8 @@ public void testShouldRun() throws Exception {
128129

129130
@Test
130131
public void testStartBalancerStop() throws Exception {
132+
//stop should not throw an exception as it is idempotent
133+
assertDoesNotThrow(() -> containerBalancer.stopBalancer());
131134
startBalancer(balancerConfiguration);
132135
assertThrows(IllegalContainerBalancerStateException.class,
133136
() -> containerBalancer.startBalancer(balancerConfiguration),
@@ -142,9 +145,9 @@ public void testStartBalancerStop() throws Exception {
142145
stopBalancer();
143146
assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus());
144147

145-
assertThrows(Exception.class,
146-
() -> containerBalancer.stopBalancer(),
147-
"Exception should be thrown when stop again");
148+
// If the balancer is already stopped, the stop command should do nothing
149+
// and return successfully as stopBalancer is idempotent
150+
assertDoesNotThrow(() -> containerBalancer.stopBalancer());
148151
}
149152

150153
@Test

hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,6 @@
2626
import io.opentelemetry.api.trace.StatusCode;
2727
import java.io.IOException;
2828
import java.io.InputStream;
29-
import java.time.Duration;
30-
import java.time.Instant;
3129
import java.util.LinkedList;
3230
import java.util.List;
3331
import java.util.concurrent.ExecutorService;
@@ -327,8 +325,7 @@ public void init() {
327325
LongSupplier supplier;
328326
if (duration != null) {
329327
maxValue = durationInSecond;
330-
supplier = () -> Duration.between(
331-
Instant.ofEpochMilli(startTime), Instant.now()).getSeconds();
328+
supplier = () -> (Time.monotonicNow() - startTime) / 1000;
332329
} else {
333330
maxValue = testNo;
334331
supplier = () -> successCounter.get() + failureCounter.get();

hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java

Lines changed: 31 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@
9797
import java.util.Map;
9898
import java.util.Set;
9999
import java.util.stream.Collectors;
100+
import java.util.stream.IntStream;
100101
import javax.xml.bind.DatatypeConverter;
101102
import org.apache.commons.io.IOUtils;
102103
import org.apache.commons.lang3.RandomStringUtils;
@@ -128,6 +129,8 @@
128129
import org.junit.jupiter.api.TestInstance;
129130
import org.junit.jupiter.api.TestMethodOrder;
130131
import org.junit.jupiter.api.io.TempDir;
132+
import org.junit.jupiter.params.ParameterizedTest;
133+
import org.junit.jupiter.params.provider.ValueSource;
131134

132135
/**
133136
* This is an abstract class to test the AWS Java S3 SDK operations.
@@ -141,6 +144,9 @@
141144
@TestMethodOrder(MethodOrderer.MethodName.class)
142145
public abstract class AbstractS3SDKV1Tests extends OzoneTestBase {
143146

147+
// server-side limitation
148+
private static final int MAX_UPLOADS_LIMIT = 1000;
149+
144150
/**
145151
* There are still some unsupported S3 operations.
146152
* Current unsupported S3 operations (non-exhaustive):
@@ -758,6 +764,7 @@ public void testListMultipartUploads() {
758764
uploadIds.add(uploadId3);
759765

760766
ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName);
767+
listMultipartUploadsRequest.setMaxUploads(5000);
761768

762769
MultipartUploadListing result = s3Client.listMultipartUploads(listMultipartUploadsRequest);
763770

@@ -768,26 +775,31 @@ public void testListMultipartUploads() {
768775
assertEquals(uploadIds, listUploadIds);
769776
}
770777

771-
@Test
772-
public void testListMultipartUploadsPagination() {
773-
final String bucketName = getBucketName();
778+
@ParameterizedTest
779+
@ValueSource(ints = {10, 5000})
780+
public void testListMultipartUploadsPagination(int requestedMaxUploads) {
781+
final String bucketName = getBucketName() + "-" + requestedMaxUploads;
774782
final String multipartKeyPrefix = getKeyName("multipart");
775783

776784
s3Client.createBucket(bucketName);
777785

778-
// Create 25 multipart uploads to test pagination
786+
// Create multipart uploads to test pagination
779787
List<String> allKeys = new ArrayList<>();
780788
Map<String, String> keyToUploadId = new HashMap<>();
781789

782-
for (int i = 0; i < 25; i++) {
783-
String key = String.format("%s-%03d", multipartKeyPrefix, i);
790+
final int effectiveMaxUploads = Math.min(requestedMaxUploads, MAX_UPLOADS_LIMIT);
791+
final int uploadsCreated = 2 * effectiveMaxUploads + 5;
792+
final int expectedPages = uploadsCreated / effectiveMaxUploads + 1;
793+
794+
for (int i = 0; i < uploadsCreated; i++) {
795+
String key = String.format("%s-%04d", multipartKeyPrefix, i);
784796
allKeys.add(key);
785797
String uploadId = initiateMultipartUpload(bucketName, key, null, null, null);
786798
keyToUploadId.put(key, uploadId);
787799
}
788800
Collections.sort(allKeys);
789801

790-
// Test pagination with maxUploads=10
802+
// Test pagination
791803
Set<String> retrievedKeys = new HashSet<>();
792804
String keyMarker = null;
793805
String uploadIdMarker = null;
@@ -796,18 +808,19 @@ public void testListMultipartUploadsPagination() {
796808

797809
do {
798810
ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(bucketName)
799-
.withMaxUploads(10)
811+
.withMaxUploads(requestedMaxUploads)
800812
.withKeyMarker(keyMarker)
801813
.withUploadIdMarker(uploadIdMarker);
802814

803815
MultipartUploadListing result = s3Client.listMultipartUploads(request);
816+
pageCount++;
804817

805818
// Verify page size
806-
if (pageCount < 2) {
807-
assertEquals(10, result.getMultipartUploads().size());
819+
if (pageCount < expectedPages) {
820+
assertEquals(effectiveMaxUploads, result.getMultipartUploads().size());
808821
assertTrue(result.isTruncated());
809822
} else {
810-
assertEquals(5, result.getMultipartUploads().size());
823+
assertEquals(uploadsCreated % effectiveMaxUploads, result.getMultipartUploads().size());
811824
assertFalse(result.isTruncated());
812825
}
813826

@@ -822,7 +835,7 @@ public void testListMultipartUploadsPagination() {
822835
assertNull(result.getPrefix());
823836
assertEquals(result.getUploadIdMarker(), uploadIdMarker);
824837
assertEquals(result.getKeyMarker(), keyMarker);
825-
assertEquals(result.getMaxUploads(), 10);
838+
assertEquals(effectiveMaxUploads, result.getMaxUploads());
826839

827840
// Verify next markers content
828841
if (result.isTruncated()) {
@@ -840,32 +853,29 @@ public void testListMultipartUploadsPagination() {
840853
uploadIdMarker = result.getNextUploadIdMarker();
841854

842855
truncated = result.isTruncated();
843-
pageCount++;
844856

845857
} while (truncated);
846858

847859
// Verify pagination results
848-
assertEquals(3, pageCount, "Should have exactly 3 pages");
849-
assertEquals(25, retrievedKeys.size(), "Should retrieve all uploads");
860+
assertEquals(expectedPages, pageCount);
861+
assertEquals(uploadsCreated, retrievedKeys.size(), "Should retrieve all uploads");
850862
assertEquals(
851863
allKeys,
852864
retrievedKeys.stream().sorted().collect(Collectors.toList()),
853865
"Retrieved keys should match expected keys in order");
854866

855867
// Test with prefix
856-
String prefix = multipartKeyPrefix + "-01";
868+
String prefix = multipartKeyPrefix + "-001";
857869
ListMultipartUploadsRequest prefixRequest = new ListMultipartUploadsRequest(bucketName)
858870
.withPrefix(prefix);
859871

860872
MultipartUploadListing prefixResult = s3Client.listMultipartUploads(prefixRequest);
861873

862874
assertEquals(prefix, prefixResult.getPrefix());
863875
assertEquals(
864-
Arrays.asList(multipartKeyPrefix + "-010", multipartKeyPrefix + "-011",
865-
multipartKeyPrefix + "-012", multipartKeyPrefix + "-013",
866-
multipartKeyPrefix + "-014", multipartKeyPrefix + "-015",
867-
multipartKeyPrefix + "-016", multipartKeyPrefix + "-017",
868-
multipartKeyPrefix + "-018", multipartKeyPrefix + "-019"),
876+
IntStream.rangeClosed(0, 9)
877+
.mapToObj(i -> prefix + i)
878+
.collect(Collectors.toList()),
869879
prefixResult.getMultipartUploads().stream()
870880
.map(MultipartUpload::getKey)
871881
.collect(Collectors.toList()));

0 commit comments

Comments
 (0)