@@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight, Normalize
2929import  org .apache .spark .sql .catalyst .plans ._ 
3030import  org .apache .spark .sql .catalyst .plans .physical .{HashPartitioning , Partitioning , RangePartitioning , RoundRobinPartitioning , SinglePartition }
3131import  org .apache .spark .sql .catalyst .util .CharVarcharCodegenUtils 
32- import  org .apache .spark .sql .comet .{CometBroadcastExchangeExec , CometNativeScanExec ,  CometScanExec , CometSinkPlaceHolder , CometSparkToColumnarExec , DecimalPrecision }
32+ import  org .apache .spark .sql .comet .{CometBroadcastExchangeExec , CometScanExec , CometSinkPlaceHolder , CometSparkToColumnarExec , DecimalPrecision }
3333import  org .apache .spark .sql .comet .execution .shuffle .CometShuffleExchangeExec 
3434import  org .apache .spark .sql .execution 
3535import  org .apache .spark .sql .execution ._ 
@@ -2507,12 +2507,15 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde with CometExprShim
25072507              partitions.foreach(p =>  {
25082508                val  inputPartitions  =  p.asInstanceOf [DataSourceRDDPartition ].inputPartitions
25092509                inputPartitions.foreach(partition =>  {
2510-                   partition2Proto(partition.asInstanceOf [FilePartition ], nativeScanBuilder)
2510+                   partition2Proto(
2511+                     partition.asInstanceOf [FilePartition ],
2512+                     nativeScanBuilder,
2513+                     scan.relation.partitionSchema)
25112514                })
25122515              })
25132516            case  rdd : FileScanRDD  => 
25142517              rdd.filePartitions.foreach(partition =>  {
2515-                 partition2Proto(partition, nativeScanBuilder)
2518+                 partition2Proto(partition, nativeScanBuilder, scan.relation.partitionSchema )
25162519              })
25172520            case  _ => 
25182521          }
@@ -2521,9 +2524,15 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde with CometExprShim
25212524            new  SparkToParquetSchemaConverter (conf).convert(scan.requiredSchema)
25222525          val  dataSchemaParquet  = 
25232526            new  SparkToParquetSchemaConverter (conf).convert(scan.relation.dataSchema)
2527+           val  partitionSchema  =  scan.relation.partitionSchema.fields.flatMap { field => 
2528+             serializeDataType(field.dataType)
2529+           }
2530+           //  In `CometScanRule`, we ensure partitionSchema is supported.
2531+           assert(partitionSchema.length ==  scan.relation.partitionSchema.fields.length)
25242532
25252533          nativeScanBuilder.setRequiredSchema(requiredSchemaParquet.toString)
25262534          nativeScanBuilder.setDataSchema(dataSchemaParquet.toString)
2535+           nativeScanBuilder.addAllPartitionSchema(partitionSchema.toIterable.asJava)
25272536
25282537          Some (result.setNativeScan(nativeScanBuilder).build())
25292538
@@ -3191,10 +3200,27 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde with CometExprShim
31913200
31923201  private  def  partition2Proto (
31933202      partition : FilePartition ,
3194-       nativeScanBuilder : OperatorOuterClass .NativeScan .Builder ):  Unit  =  {
3203+       nativeScanBuilder : OperatorOuterClass .NativeScan .Builder ,
3204+       partitionSchema : StructType ):  Unit  =  {
31953205    val  partitionBuilder  =  OperatorOuterClass .SparkFilePartition .newBuilder()
31963206    partition.files.foreach(file =>  {
3207+       //  Process the partition values
3208+       val  partitionValues  =  file.partitionValues
3209+       assert(partitionValues.numFields ==  partitionSchema.length)
3210+       val  partitionVals  = 
3211+         partitionValues.toSeq(partitionSchema).zipWithIndex.map { case  (value, i) => 
3212+           val  attr  =  partitionSchema(i)
3213+           val  valueProto  =  exprToProto(Literal (value, attr.dataType), Seq .empty)
3214+           //  In `CometScanRule`, we have already checked that all partition values are
3215+           //  supported. So, we can safely use `get` here.
3216+           assert(
3217+             valueProto.isDefined,
3218+             s " Unsupported partition value:  $value, type:  ${attr.dataType}" )
3219+           valueProto.get
3220+         }
3221+ 
31973222      val  fileBuilder  =  OperatorOuterClass .SparkPartitionedFile .newBuilder()
3223+       partitionVals.foreach(fileBuilder.addPartitionValues)
31983224      fileBuilder
31993225        .setFilePath(file.pathUri.toString)
32003226        .setStart(file.start)
0 commit comments