Skip to content

Commit f24e873

Browse files
committed
Add equals(), hashCode() and description() to CSVScan
1 parent e7b3304 commit f24e873

File tree

1 file changed

+16
-2
lines changed
  • sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/csv

1 file changed

+16
-2
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/csv/CSVScan.scala

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ case class CSVScan(
3939
readDataSchema: StructType,
4040
readPartitionSchema: StructType,
4141
options: CaseInsensitiveStringMap,
42-
filters: Seq[Filter])
42+
pushedFilters: Array[Filter])
4343
extends TextBasedFileScan(sparkSession, fileIndex, readDataSchema, readPartitionSchema, options) {
4444

4545
private lazy val parsedOptions: CSVOptions = new CSVOptions(
@@ -87,6 +87,20 @@ case class CSVScan(
8787
// The partition values are already truncated in `FileScan.partitions`.
8888
// We should use `readPartitionSchema` as the partition schema here.
8989
CSVPartitionReaderFactory(sparkSession.sessionState.conf, broadcastedConf,
90-
dataSchema, readDataSchema, readPartitionSchema, parsedOptions, filters)
90+
dataSchema, readDataSchema, readPartitionSchema, parsedOptions, pushedFilters)
91+
}
92+
93+
override def equals(obj: Any): Boolean = obj match {
94+
case o: CSVScan =>
95+
fileIndex == o.fileIndex && dataSchema == o.dataSchema &&
96+
readDataSchema == o.readDataSchema && readPartitionSchema == o.readPartitionSchema &&
97+
options == o.options && equivalentFilters(pushedFilters, o.pushedFilters)
98+
case _ => false
99+
}
100+
101+
override def hashCode(): Int = getClass.hashCode()
102+
103+
override def description(): String = {
104+
super.description() + ", PushedFilters: " + pushedFilters.mkString("[", ", ", "]")
91105
}
92106
}

0 commit comments

Comments
 (0)