@@ -22,7 +22,6 @@ import scala.collection.JavaConverters._
2222import org .apache .hadoop .conf .Configuration
2323import org .apache .hadoop .hive .ql .metadata .{Partition => HivePartition }
2424import org .apache .hadoop .hive .ql .plan .TableDesc
25- import org .apache .hadoop .hive .ql .session .SessionState
2625import org .apache .hadoop .hive .serde .serdeConstants
2726import org .apache .hadoop .hive .serde2 .objectinspector ._
2827import org .apache .hadoop .hive .serde2 .objectinspector .ObjectInspectorUtils .ObjectInspectorCopyOption
@@ -121,16 +120,7 @@ case class HiveTableScanExec(
121120
122121 HiveShim .appendReadColumns(hiveConf, neededColumnIDs, output.map(_.name))
123122
124- val currentState = SessionState .get()
125- val deserializer = if (currentState != null ) {
126- val originClassLoader = currentState.getConf.getClassLoader
127- currentState.getConf.setClassLoader(sparkSession.sharedState.jarClassLoader)
128- val instance = tableDesc.getDeserializerClass.getConstructor().newInstance()
129- currentState.getConf.setClassLoader(originClassLoader)
130- instance
131- } else {
132- tableDesc.getDeserializerClass.getConstructor().newInstance()
133- }
123+ val deserializer = tableDesc.getDeserializerClass.getConstructor().newInstance()
134124 deserializer.initialize(hiveConf, tableDesc.getProperties)
135125
136126 // Specifies types and object inspectors of columns to be scanned.
@@ -191,7 +181,6 @@ case class HiveTableScanExec(
191181 }
192182
193183 protected override def doExecute (): RDD [InternalRow ] = {
194- Thread .currentThread().setContextClassLoader(sparkSession.sharedState.jarClassLoader)
195184 // Using dummyCallSite, as getCallSite can turn out to be expensive with
196185 // multiple partitions.
197186 val rdd = if (! relation.isPartitioned) {
0 commit comments