diff --git a/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala b/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala index 7419d595f3b4..113097b8162c 100644 --- a/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala +++ b/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala @@ -31,6 +31,7 @@ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.Statistics import org.apache.spark.sql.catalyst.plans.physical._ +import org.apache.spark.sql.catalyst.util.truncatedString import org.apache.spark.sql.execution.CoalesceExec.EmptyPartition import org.apache.spark.sql.execution.exchange._ import org.apache.spark.sql.execution.metric.SQLShuffleWriteMetricsReporter @@ -148,12 +149,16 @@ case class ColumnarShuffleExchangeExec( cachedShuffleRDD } - override def verboseString(maxFields: Int): String = toString(super.verboseString(maxFields)) + override def verboseString(maxFields: Int): String = + toString(super.verboseString(maxFields), maxFields) - override def simpleString(maxFields: Int): String = toString(super.simpleString(maxFields)) - - private def toString(original: String): String = { - original + ", [OUTPUT] " + output.map(attr => attr.name + ":" + attr.dataType).toString() + private def toString(original: String, maxFields: Int): String = { + original + ", [output=" + truncatedString( + output.map(_.verboseString(maxFields)), + "[", + ", ", + "]", + maxFields) + "]" } override def output: Seq[Attribute] = if (projectOutputAttributes != null) {