Skip to content

Commit 5ec4563

Browse files
Udbhav30dongjoon-hyun
authored andcommitted
[SPARK-24669][SQL] Invalidate tables in case of DROP DATABASE CASCADE
## What changes were proposed in this pull request? Before dropping database refresh the tables of that database, so as to refresh all cached entries associated with those tables. We follow the same when dropping a table. UT is added Closes #23905 from Udbhav30/SPARK-24669. Authored-by: Udbhav30 <u.agrawal30@gmail.com> Signed-off-by: Dongjoon Hyun <dhyun@apple.com> (cherry picked from commit 9bddf71) Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
1 parent b583bfe commit 5ec4563

File tree

2 files changed

+42
-1
lines changed

2 files changed

+42
-1
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,11 @@ class SessionCatalog(
218218
if (dbName == DEFAULT_DATABASE) {
219219
throw new AnalysisException(s"Can not drop default database")
220220
}
221+
if (cascade && databaseExists(dbName)) {
222+
listTables(dbName).foreach { t =>
223+
invalidateCachedTable(QualifiedTableName(dbName, t.table))
224+
}
225+
}
221226
externalCatalog.dropDatabase(dbName, ignoreIfNotExists, cascade)
222227
}
223228

sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
package org.apache.spark.sql.execution.command
1919

20-
import java.io.File
20+
import java.io.{File, PrintWriter}
2121
import java.net.URI
2222
import java.util.Locale
2323

@@ -2715,4 +2715,40 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
27152715
}
27162716
}
27172717
}
2718+
2719+
test("Refresh table before drop database cascade") {
2720+
withTempDir { tempDir =>
2721+
val file1 = new File(tempDir + "/first.csv")
2722+
val writer1 = new PrintWriter(file1)
2723+
writer1.write("first")
2724+
writer1.close()
2725+
2726+
val file2 = new File(tempDir + "/second.csv")
2727+
val writer2 = new PrintWriter(file2)
2728+
writer2.write("second")
2729+
writer2.close()
2730+
2731+
withDatabase("foo") {
2732+
withTable("foo.first") {
2733+
sql("CREATE DATABASE foo")
2734+
sql(
2735+
s"""CREATE TABLE foo.first (id STRING)
2736+
|USING csv OPTIONS (path='${file1.toURI}')
2737+
""".stripMargin)
2738+
sql("SELECT * FROM foo.first")
2739+
checkAnswer(spark.table("foo.first"), Row("first"))
2740+
2741+
// Dropping the database and again creating same table with different path
2742+
sql("DROP DATABASE foo CASCADE")
2743+
sql("CREATE DATABASE foo")
2744+
sql(
2745+
s"""CREATE TABLE foo.first (id STRING)
2746+
|USING csv OPTIONS (path='${file2.toURI}')
2747+
""".stripMargin)
2748+
sql("SELECT * FROM foo.first")
2749+
checkAnswer(spark.table("foo.first"), Row("second"))
2750+
}
2751+
}
2752+
}
2753+
}
27182754
}

0 commit comments

Comments
 (0)