File tree Expand file tree Collapse file tree 2 files changed +4
-6
lines changed
v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver
v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver Expand file tree Collapse file tree 2 files changed +4
-6
lines changed Original file line number Diff line number Diff line change @@ -202,13 +202,12 @@ private[hive] class SparkExecuteStatementOperation(
202
202
hiveContext.sparkContext.setLocalProperty(" spark.scheduler.pool" , pool)
203
203
}
204
204
iter = {
205
- val resultRdd = result.queryExecution.toRdd
206
205
val useIncrementalCollect =
207
206
hiveContext.getConf(" spark.sql.thriftServer.incrementalCollect" , " false" ).toBoolean
208
207
if (useIncrementalCollect) {
209
- resultRdd.map(_.copy()) .toLocalIterator
208
+ result .toLocalIterator
210
209
} else {
211
- resultRdd.map(_.copy()) .collect().iterator
210
+ result .collect().iterator
212
211
}
213
212
}
214
213
dataTypes = result.queryExecution.analyzed.output.map(_.dataType).toArray
Original file line number Diff line number Diff line change @@ -87,13 +87,12 @@ private[hive] class SparkExecuteStatementOperation(
87
87
val groupId = round(random * 1000000 ).toString
88
88
hiveContext.sparkContext.setJobGroup(groupId, statement)
89
89
iter = {
90
- val resultRdd = result.queryExecution.toRdd
91
90
val useIncrementalCollect =
92
91
hiveContext.getConf(" spark.sql.thriftServer.incrementalCollect" , " false" ).toBoolean
93
92
if (useIncrementalCollect) {
94
- resultRdd.map(_.copy()) .toLocalIterator
93
+ result .toLocalIterator
95
94
} else {
96
- resultRdd.map(_.copy()) .collect().iterator
95
+ result .collect().iterator
97
96
}
98
97
}
99
98
dataTypes = result.queryExecution.analyzed.output.map(_.dataType).toArray
You can’t perform that action at this time.
0 commit comments