@@ -37,7 +37,7 @@ import org.apache.spark.sql.catalyst.plans.QueryPlan
3737import org .apache .spark .sql .execution ._
3838import org .apache .spark .sql .execution .metric .SQLMetrics
3939import org .apache .spark .sql .hive ._
40- import org .apache .spark .sql .hive .client .HiveClientImpl
40+ import org .apache .spark .sql .hive .client .{ HiveClient , HiveClientImpl }
4141import org .apache .spark .sql .internal .SQLConf
4242import org .apache .spark .sql .types .{BooleanType , DataType }
4343import org .apache .spark .util .Utils
@@ -87,6 +87,8 @@ case class HiveTableScanExec(
8787 BindReferences .bindReference(pred, relation.partitionCols)
8888 }
8989
90+ @ transient private lazy val hiveClient : HiveClient = sparkSession.sharedState.externalCatalog
91+ .unwrapped.asInstanceOf [HiveExternalCatalog ].client
9092 @ transient private lazy val hiveQlTable = HiveClientImpl .toHiveTable(relation.tableMeta)
9193 @ transient private lazy val tableDesc = new TableDesc (
9294 hiveQlTable.getInputFormatClass,
@@ -95,7 +97,7 @@ case class HiveTableScanExec(
9597
9698 // Create a local copy of hadoopConf,so that scan specific modifications should not impact
9799 // other queries
98- @ transient private lazy val hadoopConf = {
100+ @ transient private lazy val hadoopConf = hiveClient.withHiveState {
99101 val c = sparkSession.sessionState.newHadoopConf()
100102 // append columns ids and names before broadcast
101103 addColumnMetadataToConf(c)
0 commit comments