Skip to content

Commit eb60d1e

Browse files
committed
initial commit
1 parent a99a47c commit eb60d1e

File tree

2 files changed

+29
-4
lines changed

2 files changed

+29
-4
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -195,16 +195,21 @@ case class AlterTableRenameCommand(
195195
DDLUtils.verifyAlterTableType(catalog, table, isView)
196196
// If an exception is thrown here we can just assume the table is uncached;
197197
// this can happen with Hive tables when the underlying catalog is in-memory.
198-
val wasCached = Try(sparkSession.catalog.isCached(oldName.unquotedString)).getOrElse(false)
199-
if (wasCached) {
198+
// If `optStorageLevel` is defined, the old table was cached.
199+
val optStorageLevel = Try {
200+
val optCachedData = sparkSession.sharedState.cacheManager.lookupCachedData(
201+
sparkSession.table(oldName.unquotedString))
202+
optCachedData.map(_.cachedRepresentation.cacheBuilder.storageLevel)
203+
}.getOrElse(None)
204+
optStorageLevel.foreach { _ =>
200205
CommandUtils.uncacheTableOrView(sparkSession, oldName.unquotedString)
201206
}
202207
// Invalidate the table last, otherwise uncaching the table would load the logical plan
203208
// back into the hive metastore cache
204209
catalog.refreshTable(oldName)
205210
catalog.renameTable(oldName, newName)
206-
if (wasCached) {
207-
sparkSession.catalog.cacheTable(newName.unquotedString)
211+
optStorageLevel.foreach { storageLevel =>
212+
sparkSession.catalog.cacheTable(newName.unquotedString, storageLevel)
208213
}
209214
}
210215
Seq.empty[Row]

sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1285,4 +1285,24 @@ class CachedTableSuite extends QueryTest with SQLTestUtils
12851285
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1, 2")).isDefined)
12861286
}
12871287
}
1288+
1289+
test("SPARK-33786: Cache's storage level should be respected when a table name is altered.") {
1290+
withTable("old", "new") {
1291+
withTempPath { path =>
1292+
def getStorageLevel(tableName: String): StorageLevel = {
1293+
val table = spark.table(tableName)
1294+
val cachedData = spark.sharedState.cacheManager.lookupCachedData(table).get
1295+
cachedData.cachedRepresentation.cacheBuilder.storageLevel
1296+
}
1297+
Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath)
1298+
sql(s"CREATE TABLE old USING parquet LOCATION '${path.toURI}'")
1299+
sql("CACHE TABLE old OPTIONS('storageLevel' 'MEMORY_ONLY')")
1300+
val oldStorageLevel = getStorageLevel("old")
1301+
1302+
sql("ALTER TABLE old RENAME TO new")
1303+
val newStorageLevel = getStorageLevel("new")
1304+
assert(oldStorageLevel === newStorageLevel)
1305+
}
1306+
}
1307+
}
12881308
}

0 commit comments

Comments
 (0)