@@ -48,28 +48,6 @@ private[sql] class SharedState(
48
48
initialConfigs : scala.collection.Map [String , String ])
49
49
extends Logging {
50
50
51
- // This variable should be lazy, because in the first place we need to load hive-site.xml into
52
- // hadoopConf and determine the warehouse path which will be set into both spark conf and hadoop
53
- // conf avoiding be affected by any SparkSession level options
54
- private lazy val (conf, hadoopConf) = {
55
- val confClone = sparkContext.conf.clone()
56
- val hadoopConfClone = new Configuration (sparkContext.hadoopConfiguration)
57
- // If `SparkSession` is instantiated using an existing `SparkContext` instance and no existing
58
- // `SharedState`, all `SparkSession` level configurations have higher priority to generate a
59
- // `SharedState` instance. This will be done only once then shared across `SparkSession`s
60
- initialConfigs.foreach {
61
- case (k, _) if k == " hive.metastore.warehouse.dir" || k == WAREHOUSE_PATH .key =>
62
- logWarning(s " Not allowing to set ${WAREHOUSE_PATH .key} or hive.metastore.warehouse.dir " +
63
- s " in SparkSession's options, it should be set statically for cross-session usages " )
64
- case (k, v) =>
65
- logDebug(s " Applying initial SparkSession options to SparkConf/HadoopConf: $k -> $v" )
66
- confClone.set(k, v)
67
- hadoopConfClone.set(k, v)
68
-
69
- }
70
- (confClone, hadoopConfClone)
71
- }
72
-
73
51
// Load hive-site.xml into hadoopConf and determine the warehouse path we want to use, based on
74
52
// the config from both hive and Spark SQL. Finally set the warehouse config value to sparkConf.
75
53
val warehousePath : String = {
@@ -105,6 +83,27 @@ private[sql] class SharedState(
105
83
}
106
84
logInfo(s " Warehouse path is ' $warehousePath'. " )
107
85
86
+ // This variable should be initiated after `warehousePath`, because in the first place we need
87
+ // to load hive-site.xml into hadoopConf and determine the warehouse path which will be set into
88
+ // both spark conf and hadoop conf avoiding be affected by any SparkSession level options
89
+ private val (conf, hadoopConf) = {
90
+ val confClone = sparkContext.conf.clone()
91
+ val hadoopConfClone = new Configuration (sparkContext.hadoopConfiguration)
92
+ // If `SparkSession` is instantiated using an existing `SparkContext` instance and no existing
93
+ // `SharedState`, all `SparkSession` level configurations have higher priority to generate a
94
+ // `SharedState` instance. This will be done only once then shared across `SparkSession`s
95
+ initialConfigs.foreach {
96
+ case (k, _) if k == " hive.metastore.warehouse.dir" || k == WAREHOUSE_PATH .key =>
97
+ logWarning(s " Not allowing to set ${WAREHOUSE_PATH .key} or hive.metastore.warehouse.dir " +
98
+ s " in SparkSession's options, it should be set statically for cross-session usages " )
99
+ case (k, v) =>
100
+ logDebug(s " Applying initial SparkSession options to SparkConf/HadoopConf: $k -> $v" )
101
+ confClone.set(k, v)
102
+ hadoopConfClone.set(k, v)
103
+
104
+ }
105
+ (confClone, hadoopConfClone)
106
+ }
108
107
109
108
/**
110
109
* Class for caching query results reused in future executions.
@@ -115,7 +114,7 @@ private[sql] class SharedState(
115
114
* A status store to query SQL status/metrics of this Spark application, based on SQL-specific
116
115
* [[org.apache.spark.scheduler.SparkListenerEvent ]]s.
117
116
*/
118
- lazy val statusStore : SQLAppStatusStore = {
117
+ val statusStore : SQLAppStatusStore = {
119
118
val kvStore = sparkContext.statusStore.store.asInstanceOf [ElementTrackingStore ]
120
119
val listener = new SQLAppStatusListener (conf, kvStore, live = true )
121
120
sparkContext.listenerBus.addToStatusQueue(listener)
0 commit comments