Skip to content

Commit

Permalink
[HUDI-8547] Disable flakey tests (apache#12286)
Browse files Browse the repository at this point in the history
Co-authored-by: Jonathan Vexler <=>
  • Loading branch information
jonvex authored Nov 19, 2024
1 parent b31c858 commit a92c21e
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,20 @@ import org.apache.hudi.testutils.SparkClientFunctionalTestHarness
import org.apache.hudi.testutils.SparkClientFunctionalTestHarness.getSparkSqlConf
import org.apache.hudi.util.{JFunction, JavaConversions}
import org.apache.hudi.{DataSourceReadOptions, DataSourceWriteOptions, HoodieFileIndex, HoodieSparkUtils}

import org.apache.spark.SparkConf
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, EqualTo, Expression, Literal}
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{DataFrame, Row}
import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue}
import org.junit.jupiter.api.{Tag, Test}
import org.junit.jupiter.api.{Disabled, Tag, Test}
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.Arguments.arguments
import org.junit.jupiter.params.provider.{Arguments, EnumSource, MethodSource}
import org.scalatest.Assertions.{assertResult, assertThrows}

import java.util.concurrent.Executors

import scala.collection.JavaConverters
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
Expand Down Expand Up @@ -990,6 +992,7 @@ class TestSecondaryIndexPruning extends SparkClientFunctionalTestHarness {
/**
* Test case to write with updates and validate secondary index with clustering.
*/
@Disabled("[HUDI-8549]")
@ParameterizedTest
@EnumSource(classOf[HoodieTableType])
def testSecondaryIndexWithClusteringAndCleaning(tableType: HoodieTableType): Unit = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import org.apache.hudi.testutils.{DataSourceTestUtils, HoodieSparkClientTestBase
import org.apache.spark.sql._
import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery, Trigger}
import org.apache.spark.sql.types.StructType
import org.junit.jupiter.api.{BeforeEach, Test}
import org.junit.jupiter.api.{BeforeEach, Disabled, Test}
import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue}
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.{EnumSource, ValueSource}
Expand Down Expand Up @@ -203,6 +203,7 @@ class TestStructuredStreaming extends HoodieSparkClientTestBase {
Await.result(f2, Duration("120s"))
}

@Disabled("[HUDI-8548]")
@ParameterizedTest
@EnumSource(value = classOf[HoodieTableType])
def testStructuredStreaming(tableType: HoodieTableType): Unit = {
Expand Down Expand Up @@ -256,6 +257,7 @@ class TestStructuredStreaming extends HoodieSparkClientTestBase {
!isAsyncClustering, isAsyncClustering, HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH, checkClusteringResult)
}

@Disabled("[HUDI-8548]")
@ParameterizedTest
@ValueSource(booleans = Array(true, false))
def testStructuredStreamingWithCompaction(isAsyncCompaction: Boolean): Unit = {
Expand Down

0 comments on commit a92c21e

Please sign in to comment.