Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,10 @@ private boolean tryAccessCluster() {
RssConf rssConf = RssSparkConfig.toRssConf(sparkConf);
List<String> excludeProperties =
rssConf.get(RssClientConf.RSS_CLIENT_REPORT_EXCLUDE_PROPERTIES);
List<String> includeProperties =
rssConf.get(RssClientConf.RSS_CLIENT_REPORT_INCLUDE_PROPERTIES);
rssConf.getAll().stream()
.filter(entry -> includeProperties == null || includeProperties.contains(entry.getKey()))
.filter(entry -> !excludeProperties.contains(entry.getKey()))
.forEach(entry -> extraProperties.put(entry.getKey(), (String) entry.getValue()));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,10 @@ private boolean tryAccessCluster() {
RssConf rssConf = RssSparkConfig.toRssConf(sparkConf);
List<String> excludeProperties =
rssConf.get(RssClientConf.RSS_CLIENT_REPORT_EXCLUDE_PROPERTIES);
List<String> includeProperties =
rssConf.get(RssClientConf.RSS_CLIENT_REPORT_INCLUDE_PROPERTIES);
rssConf.getAll().stream()
.filter(entry -> includeProperties == null || includeProperties.contains(entry.getKey()))
.filter(entry -> !excludeProperties.contains(entry.getKey()))
.forEach(entry -> extraProperties.put(entry.getKey(), (String) entry.getValue()));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,19 @@
import org.apache.spark.SparkConf;
import org.apache.spark.shuffle.sort.SortShuffleManager;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;

import org.apache.uniffle.client.api.CoordinatorClient;
import org.apache.uniffle.client.request.RssAccessClusterRequest;
import org.apache.uniffle.common.config.RssClientConf;
import org.apache.uniffle.storage.util.StorageType;

import static org.apache.uniffle.common.rpc.StatusCode.ACCESS_DENIED;
import static org.apache.uniffle.common.rpc.StatusCode.SUCCESS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.verify;

public class DelegationRssShuffleManagerTest extends RssShuffleManagerTestBase {

Expand Down Expand Up @@ -131,6 +136,87 @@ public void testTryAccessCluster() throws Exception {
assertCreateSortShuffleManager(secondConf);
}

@Test
public void testDefaultIncludeExcludeProperties() throws Exception {
final CoordinatorClient mockClient = setupMockedRssShuffleUtils(SUCCESS);
SparkConf conf = new SparkConf();
conf.set(RssSparkConfig.RSS_CLIENT_ACCESS_RETRY_INTERVAL_MS, 3000L);
conf.set(RssSparkConfig.RSS_CLIENT_ACCESS_RETRY_TIMES, 3);
conf.set(RssSparkConfig.RSS_DYNAMIC_CLIENT_CONF_ENABLED.key(), "false");
conf.set(RssSparkConfig.RSS_ACCESS_ID.key(), "mockId");
conf.set(RssSparkConfig.RSS_COORDINATOR_QUORUM.key(), "m1:8001,m2:8002");
conf.set("spark.rss.storage.type", StorageType.LOCALFILE.name());
conf.set(RssSparkConfig.RSS_TEST_MODE_ENABLE, true);
final int confInitKeyCount = conf.getAll().length;
assertCreateRssShuffleManager(conf);

// default case: access cluster should include all properties in conf and an extra one.
ArgumentCaptor<RssAccessClusterRequest> argumentCaptor =
ArgumentCaptor.forClass(RssAccessClusterRequest.class);
verify(mockClient).accessCluster(argumentCaptor.capture());
RssAccessClusterRequest request = argumentCaptor.getValue();
assertEquals(confInitKeyCount + 1, request.getExtraProperties().size());
}

@Test
public void testIncludeProperties() throws Exception {
final CoordinatorClient mockClient = setupMockedRssShuffleUtils(SUCCESS);
SparkConf conf = new SparkConf();
conf.set(RssSparkConfig.RSS_CLIENT_ACCESS_RETRY_INTERVAL_MS, 3000L);
conf.set(RssSparkConfig.RSS_CLIENT_ACCESS_RETRY_TIMES, 3);
conf.set(RssSparkConfig.RSS_DYNAMIC_CLIENT_CONF_ENABLED.key(), "false");
conf.set(RssSparkConfig.RSS_ACCESS_ID.key(), "mockId");
conf.set(RssSparkConfig.RSS_COORDINATOR_QUORUM.key(), "m1:8001,m2:8002");
conf.set("spark.rss.storage.type", StorageType.LOCALFILE.name());
conf.set(RssSparkConfig.RSS_TEST_MODE_ENABLE, true);
// test include properties
conf.set(
RssSparkConfig.SPARK_RSS_CONFIG_PREFIX
+ RssClientConf.RSS_CLIENT_REPORT_INCLUDE_PROPERTIES.key(),
RssSparkConfig.RSS_ACCESS_ID
.key()
.substring(RssSparkConfig.SPARK_RSS_CONFIG_PREFIX.length()));
assertCreateRssShuffleManager(conf);

ArgumentCaptor<RssAccessClusterRequest> argumentCaptor =
ArgumentCaptor.forClass(RssAccessClusterRequest.class);

verify(mockClient).accessCluster(argumentCaptor.capture());
RssAccessClusterRequest request = argumentCaptor.getValue();
// only accessId and extra one
assertEquals(1 + 1, request.getExtraProperties().size());
}

@Test
public void testExcludeProperties() throws Exception {
final CoordinatorClient mockClient = setupMockedRssShuffleUtils(SUCCESS);
SparkConf conf = new SparkConf();
conf.set(RssSparkConfig.RSS_CLIENT_ACCESS_RETRY_INTERVAL_MS, 3000L);
conf.set(RssSparkConfig.RSS_CLIENT_ACCESS_RETRY_TIMES, 3);
conf.set(RssSparkConfig.RSS_DYNAMIC_CLIENT_CONF_ENABLED.key(), "false");
conf.set(RssSparkConfig.RSS_ACCESS_ID.key(), "mockId");
conf.set(RssSparkConfig.RSS_COORDINATOR_QUORUM.key(), "m1:8001,m2:8002");
conf.set("spark.rss.storage.type", StorageType.LOCALFILE.name());
conf.set(RssSparkConfig.RSS_TEST_MODE_ENABLE, true);
// test exclude properties
conf.set(
RssSparkConfig.SPARK_RSS_CONFIG_PREFIX
+ RssClientConf.RSS_CLIENT_REPORT_EXCLUDE_PROPERTIES.key(),
RssSparkConfig.RSS_ACCESS_ID
.key()
.substring(RssSparkConfig.SPARK_RSS_CONFIG_PREFIX.length()));
final int confInitKeyCount = conf.getAll().length;
assertCreateRssShuffleManager(conf);

ArgumentCaptor<RssAccessClusterRequest> argumentCaptor =
ArgumentCaptor.forClass(RssAccessClusterRequest.class);

verify(mockClient).accessCluster(argumentCaptor.capture());
RssAccessClusterRequest request = argumentCaptor.getValue();
// all accessId and extra one except the excluded one
assertEquals(confInitKeyCount + 1 - 1, request.getExtraProperties().size());
}

private void assertCreateSortShuffleManager(SparkConf conf) throws Exception {
DelegationRssShuffleManager delegationRssShuffleManager =
new DelegationRssShuffleManager(conf, true);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ protected CoordinatorClient createCoordinatorClient(StatusCode status) {
return mockedCoordinatorClient;
}

void setupMockedRssShuffleUtils(StatusCode status) {
CoordinatorClient setupMockedRssShuffleUtils(StatusCode status) {
CoordinatorClient mockCoordinatorClient = createCoordinatorClient(status);
List<CoordinatorClient> coordinatorClients = Lists.newArrayList();
coordinatorClients.add(mockCoordinatorClient);
Expand All @@ -65,5 +65,6 @@ void setupMockedRssShuffleUtils(StatusCode status) {
mockedStaticRssShuffleUtils
.when(() -> RssSparkShuffleUtils.createCoordinatorClients(any()))
.thenReturn(client);
return mockCoordinatorClient;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -310,4 +310,11 @@ public class RssClientConf {
.asList()
.defaultValues()
.withDescription("the report exclude properties could be configured by this option");

public static final ConfigOption<List<String>> RSS_CLIENT_REPORT_INCLUDE_PROPERTIES =
ConfigOptions.key("rss.client.reportIncludeProperties")
.stringType()
.asList()
.noDefaultValue()
.withDescription("the report include properties could be configured by this option");
}
2 changes: 2 additions & 0 deletions docs/client_guide/client_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ The important configuration of client is listed as following. These configuratio
| <client_type>.rss.client.rpc.netty.maxOrder | 3 | The value of maxOrder for PooledByteBufAllocator when using gRPC internal Netty on the client-side. This configuration will only take effect when rss.rpc.server.type is set to GRPC_NETTY. |
| <client_type>.rss.client.rpc.netty.smallCacheSize | 1024 | The value of smallCacheSize for PooledByteBufAllocator when using gRPC internal Netty on the client-side. This configuration will only take effect when rss.rpc.server.type is set to GRPC_NETTY. |
| <client_type>.rss.client.blockIdManagerClass | - | The block id manager class of server for this application, the implementation of this interface to manage the shuffle block ids |
| <client_type>.rss.client.reportExcludeProperties | - | The value of exclude properties specify a list of client configuration properties that should not be reported to the coordinator by the DelegationRssShuffleManager. |
| <client_type>.rss.client.reportIncludeProperties | - | The value of include properties specify a list of client configuration properties that should be exclusively reported to the coordinator by the DelegationRssShuffleManager. |

Notice:

Expand Down
Loading