diff --git a/cpp/velox/substrait/SubstraitToVeloxPlan.cc b/cpp/velox/substrait/SubstraitToVeloxPlan.cc index 6f32c5237ec9..ab76f2c56ca1 100644 --- a/cpp/velox/substrait/SubstraitToVeloxPlan.cc +++ b/cpp/velox/substrait/SubstraitToVeloxPlan.cc @@ -740,23 +740,7 @@ core::PlanNodePtr SubstraitToVeloxPlanConverter::toVeloxPlan(const ::substrait:: // Spark's default compression code is snappy. const auto& compressionKind = writerOptions->compressionKind.value_or(common::CompressionKind::CompressionKind_SNAPPY); - std::shared_ptr tableHandle; - if (useCudfTableHandle(splitInfos_) && veloxCfg_->get(kCudfEnableTableScan, kCudfEnableTableScanDefault) && - veloxCfg_->get(kCudfEnabled, kCudfEnabledDefault)) { - #ifdef GLUTEN_ENABLE_GPU - tableHandle = std::make_shared( - kCudfHiveConnectorId, - makeCudfHiveInsertTableHandle( - tableColumnNames, /*inputType->names() clolumn name is different*/ - inputType->children(), - std::make_shared( - writePath, cudf_velox::connector::hive::LocationHandle::TableType::kNew, fileName), - compressionKind, - {}, - writerOptions)); -#endif - } else { - tableHandle = std::make_shared( + std::shared_ptr tableHandle = std::make_shared( kHiveConnectorId, makeHiveInsertTableHandle( tableColumnNames, /*inputType->names() clolumn name is different*/ @@ -767,7 +751,6 @@ core::PlanNodePtr SubstraitToVeloxPlanConverter::toVeloxPlan(const ::substrait:: writerOptions, fileFormat, compressionKind)); - } return std::make_shared( nextPlanNodeId(), inputType, @@ -1358,21 +1341,21 @@ core::PlanNodePtr SubstraitToVeloxPlanConverter::toVeloxPlan(const ::substrait:: auto dataColumns = ROW(std::move(names), std::move(types)); connector::ConnectorTableHandlePtr tableHandle; auto remainingFilter = readRel.has_filter() ? exprConverter_->toVeloxExpr(readRel.filter(), dataColumns) : nullptr; - if (useCudfTableHandle(splitInfos_)) { + auto connectorId = kHiveConnectorId; + if (useCudfTableHandle(splitInfos_) && veloxCfg_->get(kCudfEnableTableScan, kCudfEnableTableScanDefault) && + veloxCfg_->get(kCudfEnabled, kCudfEnabledDefault)) { #ifdef GLUTEN_ENABLE_GPU - tableHandle = std::make_shared( - kCudfHiveConnectorId, "cudf_hive_table", filterPushdownEnabled, nullptr, remainingFilter, dataColumns); + connectorId = kCudfHiveConnectorId; #endif - } else { - common::SubfieldFilters subfieldFilters; - tableHandle = std::make_shared( - kHiveConnectorId, - "hive_table", - filterPushdownEnabled, - std::move(subfieldFilters), - remainingFilter, - dataColumns); } + common::SubfieldFilters subfieldFilters; + tableHandle = std::make_shared( + connectorId, + "hive_table", + filterPushdownEnabled, + std::move(subfieldFilters), + remainingFilter, + dataColumns); // Get assignments and out names. std::vector outNames; diff --git a/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala b/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala index b8776853a6b0..5cdaaeda1adb 100644 --- a/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala +++ b/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala @@ -483,7 +483,8 @@ object GlutenConfig extends ConfigRegistry { "spark.gluten.sql.columnar.backend.velox.memoryUseHugePages", "spark.gluten.sql.columnar.backend.velox.cachePrefetchMinPct", "spark.gluten.sql.columnar.backend.velox.memoryPoolCapacityTransferAcrossTasks", - "spark.gluten.sql.columnar.backend.velox.preferredBatchBytes" + "spark.gluten.sql.columnar.backend.velox.preferredBatchBytes", + "spark.gluten.sql.columnar.backend.velox.cudf.enableTableScan" ) /**