From 91baab77f7e0a5102ac069846f0e2920bb2dd15a Mon Sep 17 00:00:00 2001 From: Kousuke Saruta Date: Thu, 3 Dec 2020 23:47:43 -0800 Subject: [PATCH] [SPARK-33656][TESTS] Add option to keep container after tests finish for DockerJDBCIntegrationSuites for debug ### What changes were proposed in this pull request? This PR add an option to keep container after DockerJDBCIntegrationSuites (e.g. DB2IntegrationSuite, PostgresIntegrationSuite) finish. By setting a system property `spark.test.docker.keepContainer` to `true`, we can use this option. ### Why are the changes needed? If some error occur during the tests, it would be useful to keep the container for debug. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? I confirmed that the container is kept after the test by the following commands. ``` # With sbt $ build/sbt -Dspark.test.docker.keepContainer=true -Pdocker-integration-tests -Phive -Phive-thriftserver package "testOnly org.apache.spark.sql.jdbc.MariaDBKrbIntegrationSuite" # With Maven $ build/mvn -Dspark.test.docker.keepContainer=true -Pdocker-integration-tests -Phive -Phive-thriftserver -Dtest=none -DwildcardSuites=org.apache.spark.sql.jdbc.MariaDBKrbIntegrationSuite test $ docker container ls ``` I also confirmed that there are no regression for all the subclasses of `DockerJDBCIntegrationSuite` with sbt/Maven. * MariaDBKrbIntegrationSuite * DB2KrbIntegrationSuite * PostgresKrbIntegrationSuite * MySQLIntegrationSuite * PostgresIntegrationSuite * DB2IntegrationSuite * MsSqlServerintegrationsuite * OracleIntegrationSuite * v2.MySQLIntegrationSuite * v2.PostgresIntegrationSuite * v2.DB2IntegrationSuite * v2.MsSqlServerIntegrationSuite * v2.OracleIntegrationSuite NOTE: `DB2IntegrationSuite`, `v2.DB2IntegrationSuite` and `DB2KrbIntegrationSuite` can fail due to the too much short connection timeout. It's a separate issue and I'll fix it in #30583 Closes #30601 from sarutak/keepContainer. Authored-by: Kousuke Saruta Signed-off-by: Dongjoon Hyun --- .../sql/jdbc/DockerJDBCIntegrationSuite.scala | 39 ++++++++++++------- pom.xml | 2 + 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala index 00b7b413a964d..d6270313cabea 100644 --- a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala +++ b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala @@ -25,6 +25,7 @@ import scala.collection.JavaConverters._ import scala.util.control.NonFatal import com.spotify.docker.client._ +import com.spotify.docker.client.DockerClient.ListContainersParam import com.spotify.docker.client.exceptions.ImageNotFoundException import com.spotify.docker.client.messages.{ContainerConfig, HostConfig, PortBinding} import org.scalatest.concurrent.Eventually @@ -95,7 +96,9 @@ abstract class DockerJDBCIntegrationSuite extends SharedSparkSession with Eventu protected val dockerIp = DockerUtils.getDockerIp() val db: DatabaseOnDocker - val connectionTimeout = timeout(2.minutes) + val connectionTimeout = timeout(5.minutes) + val keepContainer = + sys.props.getOrElse("spark.test.docker.keepContainer", "false").toBoolean private var docker: DockerClient = _ // Configure networking (necessary for boot2docker / Docker Machine) @@ -176,20 +179,11 @@ abstract class DockerJDBCIntegrationSuite extends SharedSparkSession with Eventu override def afterAll(): Unit = { try { + cleanupContainer() + } finally { if (docker != null) { - try { - if (containerId != null) { - docker.killContainer(containerId) - docker.removeContainer(containerId) - } - } catch { - case NonFatal(e) => - logWarning(s"Could not stop container $containerId", e) - } finally { - docker.close() - } + docker.close() } - } finally { super.afterAll() } } @@ -205,4 +199,23 @@ abstract class DockerJDBCIntegrationSuite extends SharedSparkSession with Eventu * Prepare databases and tables for testing. */ def dataPreparation(connection: Connection): Unit + + private def cleanupContainer(): Unit = { + if (docker != null && containerId != null && !keepContainer) { + try { + docker.killContainer(containerId) + } catch { + case NonFatal(e) => + val exitContainerIds = + docker.listContainers(ListContainersParam.withStatusExited()).asScala.map(_.id()) + if (exitContainerIds.contains(containerId)) { + logWarning(s"Container $containerId already stopped") + } else { + logWarning(s"Could not stop container $containerId", e) + } + } finally { + docker.removeContainer(containerId) + } + } + } } diff --git a/pom.xml b/pom.xml index 4d6e3bbc95378..80097aec0f429 100644 --- a/pom.xml +++ b/pom.xml @@ -250,6 +250,7 @@ --> ${session.executionRootDirectory} + false 1g @@ -2626,6 +2627,7 @@ false true ${spark.test.webdriver.chrome.driver} + ${spark.test.docker.keepContainer} __not_used__