Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,15 @@

package org.apache.hadoop.ozone.container.diskbalancer;

import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DiskBalancerRunningStatus;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.VolumeReportProto;

/**
* DiskBalancer's information to persist.
* DiskBalancer's information to persist and for report.
* Report-only fields (idealUsage, volumeInfo) are NOT persisted to YAML.
*/
public class DiskBalancerInfo {
private DiskBalancerRunningStatus operationalState;
Expand All @@ -35,6 +39,10 @@ public class DiskBalancerInfo {
private long bytesToMove;
private long balancedBytes;
private double volumeDataDensity;
// Report-only: ideal usage from volume snapshot. NOT persisted.
private double idealUsage;
// Report-only: per-volume info. NOT persisted.
private List<VolumeReportProto> volumeInfo;

public DiskBalancerInfo(DiskBalancerRunningStatus operationalState, double threshold,
long bandwidthInMB, int parallelThread, boolean stopAfterDiskEven) {
Expand Down Expand Up @@ -208,6 +216,22 @@ public void setVolumeDataDensity(double volumeDataDensity) {
this.volumeDataDensity = volumeDataDensity;
}

public double getIdealUsage() {
return idealUsage;
}

public void setIdealUsage(double idealUsage) {
this.idealUsage = idealUsage;
}

public List<VolumeReportProto> getVolumeInfo() {
return volumeInfo != null ? volumeInfo : Collections.emptyList();
}

public void setVolumeInfo(List<VolumeReportProto> volumeInfo) {
this.volumeInfo = volumeInfo;
}

@Override
public boolean equals(Object o) {
if (this == o) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ public DatanodeDiskBalancerInfoProto getDiskBalancerInfo(GetDiskBalancerInfoRequ
.setBytesToMove(info.getBytesToMove())
.setBytesMoved(info.getBalancedBytes())
.setRunningStatus(info.getOperationalState())
.setIdealUsage(info.getIdealUsage())
.addAllVolumeInfo(info.getVolumeInfo())
.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
Expand All @@ -47,6 +49,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DiskBalancerRunningStatus;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.VolumeReportProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.server.ServerUtils;
Expand Down Expand Up @@ -685,9 +688,39 @@ public DiskBalancerInfo getDiskBalancerInfo() {
bytesToMove = calculateBytesToMove(volumeUsages);
}

return new DiskBalancerInfo(operationalState, threshold, bandwidthInMB,
DiskBalancerInfo info = new DiskBalancerInfo(operationalState, threshold, bandwidthInMB,
parallelThread, stopAfterDiskEven, version, metrics.getSuccessCount(),
metrics.getFailureCount(), bytesToMove, metrics.getSuccessBytes(), volumeDataDensity);
info.setIdealUsage(getIdealUsage(volumeUsages));
info.setVolumeInfo(buildVolumeReportProto(volumeUsages));
return info;
}

/**
* Build a list of VolumeReportProto from a list of VolumeFixedUsage.
* VolumeReportProto consists of information like StorageID,
* volume utilization and committed bytes to the client.
*
* @param volumeSet snapshot of VolumeFixedUsage which contains the usage information of each volume
* @return a list of VolumeReportProto which will be sent to clients for reporting volume status
*/
public static List<VolumeReportProto> buildVolumeReportProto(List<VolumeFixedUsage> volumeSet) {
if (volumeSet == null || volumeSet.isEmpty()) {
return Collections.emptyList();
}

List<VolumeReportProto> result = new ArrayList<>();
for (VolumeFixedUsage v : volumeSet) {
HddsVolume volume = v.getVolume();
String path = volume.getStorageDir() != null ? volume.getStorageDir().getPath() : "";
result.add(VolumeReportProto.newBuilder()
.setStorageId(volume.getStorageID())
.setUtilization(v.getUtilization())
.setCommittedBytes(volume.getCommittedBytes())
.setStoragePath(path)
.build());
}
return result;
}

public long calculateBytesToMove(List<VolumeFixedUsage> inputVolumeSet) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,14 @@
import static org.mockito.Mockito.when;

import java.io.IOException;
import java.util.Arrays;
import java.util.UUID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.DiskBalancerProtocolProtos.GetDiskBalancerInfoRequestProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDiskBalancerInfoProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DiskBalancerConfigurationProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DiskBalancerRunningStatus;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.VolumeReportProto;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.diskbalancer.DiskBalancerProtocolServer.PrivilegedOperation;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
Expand All @@ -51,6 +53,16 @@ class TestDiskBalancerProtocolServer {
private static final int TEST_THREADS = 5;
private static final boolean TEST_STOP_AFTER_DISK_EVEN = true;
private static final double TEST_VOLUME_DENSITY = 15.5;
private static final double TEST_IDEAL_USAGE = 0.6;
private static final String TEST_STORAGE_ID_1 = "vol1";
private static final String TEST_STORAGE_PATH_1 = "/data/hdds1";
private static final String TEST_STORAGE_ID_2 = "vol2";
private static final String TEST_STORAGE_PATH_2 = "/data/hdds2";
private static final double TEST_UTILIZATION_1 = 0.57;
private static final double TEST_UTILIZATION_2 = 0.63;
private static final long TEST_COMMITTED_BYTES_1 = 10L * 1024 * 1024;
private static final long TEST_COMMITTED_BYTES_2 = 25L * 1024 * 1024;
private static final int TEST_VOLUME_INFO_COUNT = 2;

private DatanodeStateMachine datanodeStateMachine;
private DiskBalancerService diskBalancerService;
Expand Down Expand Up @@ -80,6 +92,21 @@ void setup() throws IOException {
0L, // balancedBytes
TEST_VOLUME_DENSITY
);
diskBalancerInfo.setIdealUsage(TEST_IDEAL_USAGE);
diskBalancerInfo.setVolumeInfo(Arrays.asList(
VolumeReportProto.newBuilder()
.setStorageId(TEST_STORAGE_ID_1)
.setStoragePath(TEST_STORAGE_PATH_1)
.setUtilization(TEST_UTILIZATION_1)
.setCommittedBytes(TEST_COMMITTED_BYTES_1)
.build(),
VolumeReportProto.newBuilder()
.setStorageId(TEST_STORAGE_ID_2)
.setStoragePath(TEST_STORAGE_PATH_2)
.setUtilization(TEST_UTILIZATION_2)
.setCommittedBytes(TEST_COMMITTED_BYTES_2)
.build()));

when(ozoneContainer.getDiskBalancerInfo()).thenReturn(diskBalancerInfo);

// Create datanode details
Expand All @@ -102,12 +129,23 @@ void setup() throws IOException {

@Test
void testGetDiskBalancerInfoReport() throws IOException {
// Test REPORT type - should only return volume density
DatanodeDiskBalancerInfoProto report = server.getDiskBalancerInfo(REQUEST_WITH_OLD_CLIENT_VERSION);

VolumeReportProto volReport0 = report.getVolumeInfo(0);
VolumeReportProto volReport1 = report.getVolumeInfo(1);

assertNotNull(report);
assertNotNull(report.getNode());
assertEquals(TEST_VOLUME_DENSITY, report.getCurrentVolumeDensitySum());
assertEquals(TEST_IDEAL_USAGE, report.getIdealUsage());
assertEquals(TEST_VOLUME_INFO_COUNT, report.getVolumeInfoCount());
assertEquals(TEST_STORAGE_ID_1, volReport0.getStorageId());
assertEquals(TEST_STORAGE_PATH_1, volReport0.getStoragePath());
assertEquals(TEST_UTILIZATION_1, volReport0.getUtilization());
assertEquals(TEST_COMMITTED_BYTES_1, volReport0.getCommittedBytes());
assertEquals(TEST_STORAGE_ID_2, volReport1.getStorageId());
assertEquals(TEST_STORAGE_PATH_2, volReport1.getStoragePath());
assertEquals(TEST_UTILIZATION_2, volReport1.getUtilization());
assertEquals(TEST_COMMITTED_BYTES_2, volReport1.getCommittedBytes());
}

@Test
Expand Down
9 changes: 9 additions & 0 deletions hadoop-hdds/interface-client/src/main/proto/hdds.proto
Original file line number Diff line number Diff line change
Expand Up @@ -579,6 +579,13 @@ enum DiskBalancerRunningStatus {
PAUSED = 3;
}

message VolumeReportProto {
optional string storageId = 1;
optional string storagePath = 2;
optional double utilization = 3;
optional uint64 committedBytes = 4;
}

message DatanodeDiskBalancerInfoProto {
required DatanodeDetailsProto node = 1;
required double currentVolumeDensitySum = 2;
Expand All @@ -588,4 +595,6 @@ message DatanodeDiskBalancerInfoProto {
optional uint64 failureMoveCount = 6;
optional uint64 bytesToMove = 7;
optional uint64 bytesMoved = 8;
optional double idealUsage = 9;
repeated VolumeReportProto volumeInfo = 10;
}
Loading