diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0245282bc5..83dc4afd78 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,36 +1,83 @@ version: 2 updates: - - package-ecosystem: "gradle" - directory: "/" + - package-ecosystem: gradle + directory: / schedule: - interval: "weekly" - day: "saturday" + interval: weekly + day: saturday ignore: - - dependency-name: "*" - update-types: ["version-update:semver-major", "version-update:semver-minor"] + - dependency-name: '*' + update-types: + - version-update:semver-major + - version-update:semver-minor open-pull-requests-limit: 10 - labels: ["type: dependency-upgrade"] + labels: + - 'type: dependency-upgrade' groups: - development-dependencies: - update-types: ["patch"] - patterns: - - "com.gradle.enterprise" - - "io.spring.*" - - "org.ajoberstar.grgit" - - "org.antora" - - "io.micrometer:micrometer-docs-generator" - - "org.hibernate.validator:hibernate-validator" - - "org.awaitility:awaitility" - - "com.google.code.findbugs:jsr305" - - "org.springframework.boot*" + development-dependencies: + update-types: + - patch + patterns: + - com.gradle.* + - io.spring.* + - org.ajoberstar.grgit + - org.antora + - io.micrometer:micrometer-docs-generator + - org.hibernate.validator:hibernate-validator + - org.awaitility:awaitility + - com.github.spotbugs + - org.springframework.boot* + + - package-ecosystem: gradle + target-branch: 3.2.x + directory: / + schedule: + interval: weekly + day: saturday + ignore: + - dependency-name: '*' + update-types: + - version-update:semver-major + - version-update:semver-minor + open-pull-requests-limit: 10 + labels: + - 'type: dependency-upgrade' + groups: + development-dependencies: + update-types: + - patch + patterns: + - com.gradle.* + - io.spring.* + - org.ajoberstar.grgit + - org.antora + - io.micrometer:micrometer-docs-generator + - org.hibernate.validator:hibernate-validator + - org.awaitility:awaitility + - com.github.spotbugs + - org.springframework.boot* + + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + day: saturday + labels: + - 'type: task' + groups: + development-dependencies: + patterns: + - '*' - - package-ecosystem: "github-actions" - directory: "/" + - package-ecosystem: github-actions + target-branch: 3.2.x + directory: / schedule: - interval: "weekly" - day: "saturday" - labels: ["type: task"] + interval: weekly + day: saturday + labels: + - 'type: task' groups: development-dependencies: patterns: - - "*" \ No newline at end of file + - '*' diff --git a/.github/workflows/announce-milestone-planning.yml b/.github/workflows/announce-milestone-planning.yml new file mode 100644 index 0000000000..f05fb42337 --- /dev/null +++ b/.github/workflows/announce-milestone-planning.yml @@ -0,0 +1,11 @@ +name: Announce Milestone Planning in Chat + +on: + milestone: + types: [ created, edited ] + +jobs: + announce-milestone-planning: + uses: spring-io/spring-github-workflows/.github/workflows/spring-announce-milestone-planning.yml@v5 + secrets: + SPRING_RELEASE_CHAT_WEBHOOK_URL: ${{ secrets.SPRING_RELEASE_GCHAT_WEBHOOK_URL }} diff --git a/.github/workflows/auto-cherry-pick.yml b/.github/workflows/auto-cherry-pick.yml index 4a9c4479ef..6ba14dde29 100644 --- a/.github/workflows/auto-cherry-pick.yml +++ b/.github/workflows/auto-cherry-pick.yml @@ -8,6 +8,6 @@ on: jobs: cherry-pick-commit: - uses: spring-io/spring-github-workflows/.github/workflows/spring-cherry-pick.yml@main + uses: spring-io/spring-github-workflows/.github/workflows/spring-cherry-pick.yml@v5 secrets: GH_ACTIONS_REPO_TOKEN: ${{ secrets.GH_ACTIONS_REPO_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/backport-issue.yml b/.github/workflows/backport-issue.yml index ae3ea05130..71e42771d5 100644 --- a/.github/workflows/backport-issue.yml +++ b/.github/workflows/backport-issue.yml @@ -7,6 +7,6 @@ on: jobs: backport-issue: - uses: spring-io/spring-github-workflows/.github/workflows/spring-backport-issue.yml@main + uses: spring-io/spring-github-workflows/.github/workflows/spring-backport-issue.yml@v5 secrets: GH_ACTIONS_REPO_TOKEN: ${{ secrets.GH_ACTIONS_REPO_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/ci-snapshot.yml b/.github/workflows/ci-snapshot.yml index 98e223d8ca..6bb7b785fb 100644 --- a/.github/workflows/ci-snapshot.yml +++ b/.github/workflows/ci-snapshot.yml @@ -17,12 +17,10 @@ concurrency: jobs: build-snapshot: - uses: spring-io/spring-github-workflows/.github/workflows/spring-artifactory-gradle-snapshot.yml@main + uses: spring-io/spring-github-workflows/.github/workflows/spring-artifactory-gradle-snapshot.yml@v5 with: gradleTasks: ${{ github.event_name == 'schedule' && '--rerun-tasks' || '' }} secrets: - GRADLE_ENTERPRISE_CACHE_USER: ${{ secrets.GRADLE_ENTERPRISE_CACHE_USER }} - GRADLE_ENTERPRISE_CACHE_PASSWORD: ${{ secrets.GRADLE_ENTERPRISE_CACHE_PASSWORD }} - GRADLE_ENTERPRISE_SECRET_ACCESS_KEY: ${{ secrets.GRADLE_ENTERPRISE_SECRET_ACCESS_KEY }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} ARTIFACTORY_USERNAME: ${{ secrets.ARTIFACTORY_USERNAME }} ARTIFACTORY_PASSWORD: ${{ secrets.ARTIFACTORY_PASSWORD }} \ No newline at end of file diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index 1771c58265..2065ee7187 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -16,4 +16,4 @@ permissions: jobs: dispatch-docs-build: if: github.repository_owner == 'spring-projects' - uses: spring-io/spring-github-workflows/.github/workflows/spring-dispatch-docs-build.yml@main + uses: spring-io/spring-github-workflows/.github/workflows/spring-dispatch-docs-build.yml@v5 diff --git a/.github/workflows/merge-dependabot-pr.yml b/.github/workflows/merge-dependabot-pr.yml index da9e0bf656..f513c72567 100644 --- a/.github/workflows/merge-dependabot-pr.yml +++ b/.github/workflows/merge-dependabot-pr.yml @@ -4,6 +4,7 @@ on: pull_request: branches: - main + - '*.x' run-name: Merge Dependabot PR ${{ github.ref_name }} @@ -11,6 +12,7 @@ jobs: merge-dependabot-pr: permissions: write-all - uses: spring-io/spring-github-workflows/.github/workflows/spring-merge-dependabot-pr.yml@main + uses: spring-io/spring-github-workflows/.github/workflows/spring-merge-dependabot-pr.yml@v5 with: - mergeArguments: --auto --squash \ No newline at end of file + mergeArguments: --auto --squash + autoMergeSnapshots: true \ No newline at end of file diff --git a/.github/workflows/pr-build.yml b/.github/workflows/pr-build.yml index e5457892cc..c9f71e8bdf 100644 --- a/.github/workflows/pr-build.yml +++ b/.github/workflows/pr-build.yml @@ -4,7 +4,8 @@ on: pull_request: branches: - main + - '*.x' jobs: build-pull-request: - uses: spring-io/spring-github-workflows/.github/workflows/spring-gradle-pull-request-build.yml@main + uses: spring-io/spring-github-workflows/.github/workflows/spring-gradle-pull-request-build.yml@v5 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fd2fbeb864..be8a9e40c6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,19 +12,16 @@ jobs: contents: write issues: write - uses: spring-io/spring-github-workflows/.github/workflows/spring-artifactory-gradle-release.yml@main + uses: spring-io/spring-github-workflows/.github/workflows/spring-artifactory-gradle-release.yml@v5 secrets: GH_ACTIONS_REPO_TOKEN: ${{ secrets.GH_ACTIONS_REPO_TOKEN }} - GRADLE_ENTERPRISE_CACHE_USER: ${{ secrets.GRADLE_ENTERPRISE_CACHE_USER }} - GRADLE_ENTERPRISE_CACHE_PASSWORD: ${{ secrets.GRADLE_ENTERPRISE_CACHE_PASSWORD }} - GRADLE_ENTERPRISE_SECRET_ACCESS_KEY: ${{ secrets.GRADLE_ENTERPRISE_SECRET_ACCESS_KEY }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} JF_ARTIFACTORY_SPRING: ${{ secrets.JF_ARTIFACTORY_SPRING }} ARTIFACTORY_USERNAME: ${{ secrets.ARTIFACTORY_USERNAME }} ARTIFACTORY_PASSWORD: ${{ secrets.ARTIFACTORY_PASSWORD }} - OSSRH_URL: ${{ secrets.OSSRH_URL }} OSSRH_S01_TOKEN_USERNAME: ${{ secrets.OSSRH_S01_TOKEN_USERNAME }} OSSRH_S01_TOKEN_PASSWORD: ${{ secrets.OSSRH_S01_TOKEN_PASSWORD }} OSSRH_STAGING_PROFILE_NAME: ${{ secrets.OSSRH_STAGING_PROFILE_NAME }} GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} - SPRING_RELEASE_SLACK_WEBHOOK_URL: ${{ secrets.SPRING_RELEASE_SLACK_WEBHOOK_URL }} \ No newline at end of file + SPRING_RELEASE_CHAT_WEBHOOK_URL: ${{ secrets.SPRING_RELEASE_GCHAT_WEBHOOK_URL }} \ No newline at end of file diff --git a/.github/workflows/verify-staged-artifacts.yml b/.github/workflows/verify-staged-artifacts.yml index 19a42e5bdd..c3d36136fd 100644 --- a/.github/workflows/verify-staged-artifacts.yml +++ b/.github/workflows/verify-staged-artifacts.yml @@ -9,9 +9,7 @@ on: type: string env: - GRADLE_ENTERPRISE_CACHE_USERNAME: ${{ secrets.GRADLE_ENTERPRISE_CACHE_USER }} - GRADLE_ENTERPRISE_CACHE_PASSWORD: ${{ secrets.GRADLE_ENTERPRISE_CACHE_PASSWORD }} - GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GRADLE_ENTERPRISE_SECRET_ACCESS_KEY }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} ARTIFACTORY_USERNAME: ${{ secrets.ARTIFACTORY_USERNAME }} ARTIFACTORY_PASSWORD: ${{ secrets.ARTIFACTORY_PASSWORD }} diff --git a/CONTRIBUTING.adoc b/CONTRIBUTING.adoc index a864e3c614..4ac0afa449 100644 --- a/CONTRIBUTING.adoc +++ b/CONTRIBUTING.adoc @@ -56,7 +56,7 @@ _you should see branches on origin as well as upstream, including 'main'_ - For example, to create and switch to a new branch for issue GH-123: `git checkout -b GH-123` * You might be working on several different topic branches at any given time, but when at a stopping point for one of those branches, commit (a local operation). * Please follow the "Commit Guidelines" described in -https://git-scm.com/book/en/Distributed-Git-Contributing-to-a-Project[this chapter of Pro Git]. +https://git-scm.com/book/ms/v2/Distributed-Git-Contributing-to-a-Project[this chapter of Pro Git]. * Then to begin working on another issue (say GH-101): `git checkout GH-101`. The _-b_ flag is not needed if that branch already exists in your local repository. * When ready to resolve an issue or to collaborate with others, you can push your branch to origin (your fork), diff --git a/build.gradle b/build.gradle index d1df1c8481..9de0d05c3a 100644 --- a/build.gradle +++ b/build.gradle @@ -1,10 +1,9 @@ buildscript { - ext.kotlinVersion = '1.9.22' + ext.kotlinVersion = '1.9.25' ext.isCI = System.getenv('GITHUB_ACTION') repositories { - mavenCentral() gradlePluginPortal() - maven { url 'https://repo.spring.io/plugins-release-local' } + mavenCentral() if (version.endsWith('SNAPSHOT')) { maven { url 'https://repo.spring.io/snapshot' } } @@ -17,12 +16,12 @@ buildscript { plugins { id 'base' - id 'project-report' id 'idea' - id 'org.ajoberstar.grgit' version '5.2.1' + id 'org.ajoberstar.grgit' version '5.3.0' id 'io.spring.nohttp' version '0.0.11' - id 'io.spring.dependency-management' version '1.1.4' apply false - id 'com.github.spotbugs' version '6.0.7' + id 'io.spring.dependency-management' version '1.1.7' apply false + id 'com.github.spotbugs' version '6.0.27' + id 'io.freefair.aggregate-javadoc' version '8.10.2' } apply plugin: 'io.spring.nohttp' @@ -51,28 +50,28 @@ ext { } modifiedFiles.finalizeValueOnRead() - assertjVersion = '3.24.2' - awaitilityVersion = '4.2.0' - hamcrestVersion = '2.2' - hibernateValidationVersion = '8.0.1.Final' - jacksonBomVersion = '2.15.4' - jaywayJsonPathVersion = '2.8.0' + assertjVersion = '3.25.3' + awaitilityVersion = '4.2.2' + hamcrestVersion = '3.0' + hibernateValidationVersion = '8.0.2.Final' + jacksonBomVersion = '2.18.2' + jaywayJsonPathVersion = '2.9.0' junit4Version = '4.13.2' - junitJupiterVersion = '5.10.2' - kafkaVersion = '3.6.1' - kotlinCoroutinesVersion = '1.7.3' - log4jVersion = '2.22.1' - micrometerDocsVersion = '1.0.2' - micrometerVersion = '1.13.0-M1' - micrometerTracingVersion = '1.3.0-M1' - mockitoVersion = '5.8.0' - reactorVersion = '2023.0.3' + junitJupiterVersion = '5.11.4' + kafkaVersion = '3.8.1' + kotlinCoroutinesVersion = '1.8.1' + log4jVersion = '2.24.3' + micrometerDocsVersion = '1.0.4' + micrometerVersion = '1.14.2' + micrometerTracingVersion = '1.4.1' + mockitoVersion = '5.14.2' + reactorVersion = '2024.0.1' scalaVersion = '2.13' - springBootVersion = '3.2.2' // docs module - springDataVersion = '2024.0.0-M1' - springRetryVersion = '2.0.5' - springVersion = '6.1.4' - zookeeperVersion = '3.8.3' + springBootVersion = '3.3.7' // docs module + springDataVersion = '2024.1.1' + springRetryVersion = '2.0.11' + springVersion = '6.2.1' + zookeeperVersion = '3.8.4' idPrefix = 'kafka' @@ -122,14 +121,14 @@ allprojects { configure(javaProjects) { subproject -> apply plugin: 'java-library' - apply plugin: 'java' - apply from: "${rootProject.projectDir}/gradle/publish-maven.gradle" apply plugin: 'eclipse' apply plugin: 'idea' apply plugin: 'checkstyle' apply plugin: 'kotlin' apply plugin: 'kotlin-spring' + apply from: "${rootProject.projectDir}/gradle/publish-maven.gradle" + java { withJavadocJar() withSourcesJar() @@ -139,12 +138,12 @@ configure(javaProjects) { subproject -> } compileJava { - sourceCompatibility = 17 - targetCompatibility = 17 + options.release = 17 } compileTestJava { - sourceCompatibility = 17 + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 options.encoding = 'UTF-8' } @@ -179,8 +178,8 @@ configure(javaProjects) { subproject -> test { testLogging { - events "skipped", "failed" - showStandardStreams = project.hasProperty("showStandardStreams") ?: false + events 'skipped', 'failed' + showStandardStreams = project.hasProperty('showStandardStreams') ?: false showExceptions = true showStackTraces = true exceptionFormat = 'full' @@ -188,12 +187,11 @@ configure(javaProjects) { subproject -> maxHeapSize = '1536m' useJUnitPlatform() - } checkstyle { - configDirectory.set(rootProject.file("src/checkstyle")) - toolVersion = '10.12.5' + configDirectory.set(rootProject.file('src/checkstyle')) + toolVersion = '10.18.2' } publishing { @@ -205,7 +203,7 @@ configure(javaProjects) { subproject -> } } - task updateCopyrights { + tasks.register('updateCopyrights') { onlyIf { !isCI } inputs.files(modifiedFiles.filter { f -> f.path.contains(subproject.name) }) outputs.dir('build/classes') @@ -243,7 +241,7 @@ configure(javaProjects) { subproject -> 'Created-By': "JDK ${System.properties['java.version']} (${System.properties['java.specification.vendor']})", 'Implementation-Title': subproject.name, 'Implementation-Vendor-Id': subproject.group, - 'Implementation-Vendor': 'VMware Inc.', + 'Implementation-Vendor': 'Broadcom Inc.', 'Implementation-URL': linkHomepage, 'Automatic-Module-Name': subproject.name.replace('-', '.') // for Jigsaw ) @@ -278,6 +276,7 @@ project ('spring-kafka') { exclude group: 'org.springframework' } api "org.apache.kafka:kafka-clients:$kafkaVersion" + api 'io.micrometer:micrometer-observation' optionalApi "org.apache.kafka:kafka-streams:$kafkaVersion" optionalApi "org.jetbrains.kotlinx:kotlinx-coroutines-reactor:$kotlinCoroutinesVersion" optionalApi 'com.fasterxml.jackson.core:jackson-core' @@ -290,7 +289,7 @@ project ('spring-kafka') { } // Spring Data projection message binding support - optionalApi ("org.springframework.data:spring-data-commons") { + optionalApi ('org.springframework.data:spring-data-commons') { exclude group: 'org.springframework' exclude group: 'io.micrometer' } @@ -299,20 +298,14 @@ project ('spring-kafka') { optionalApi 'io.projectreactor:reactor-core' optionalApi 'io.projectreactor.kafka:reactor-kafka' optionalApi 'io.micrometer:micrometer-core' - api 'io.micrometer:micrometer-observation' optionalApi 'io.micrometer:micrometer-tracing' testImplementation project (':spring-kafka-test') testImplementation 'io.projectreactor:reactor-test' testImplementation "org.mockito:mockito-junit-jupiter:$mockitoVersion" testImplementation "org.hibernate.validator:hibernate-validator:$hibernateValidationVersion" - testImplementation ('io.micrometer:micrometer-observation-test') { - exclude group: "org.mockito" - } - testImplementation 'io.micrometer:micrometer-tracing-bridge-brave' - testImplementation 'io.micrometer:micrometer-tracing-test' testImplementation ('io.micrometer:micrometer-tracing-integration-test') { - exclude group: "org.mockito" + exclude group: 'org.mockito' } } } @@ -326,9 +319,7 @@ project('spring-kafka-bom') { dependencies { constraints { javaProjects.sort { "$it.name" }.each { - if (it.name != 'spring-kafka-docs') { - api it - } + api it } } } @@ -348,15 +339,14 @@ project ('spring-kafka-test') { dependencies { api 'org.springframework:spring-context' api 'org.springframework:spring-test' - api ("org.springframework.retry:spring-retry:$springRetryVersion") { - exclude group: 'org.springframework' - } + api "org.springframework.retry:spring-retry:$springRetryVersion" api ("org.apache.zookeeper:zookeeper:$zookeeperVersion") { exclude group: 'org.slf4j', module: 'slf4j-log4j12' exclude group: 'log4j' } api "org.apache.kafka:kafka-clients:$kafkaVersion:test" + api "org.apache.kafka:kafka-server:$kafkaVersion" api "org.apache.kafka:kafka-metadata:$kafkaVersion" api "org.apache.kafka:kafka-server-common:$kafkaVersion" api "org.apache.kafka:kafka-server-common:$kafkaVersion:test" @@ -406,15 +396,19 @@ tasks.register('filterMetricsDocsContent', Copy) { filter { line -> line.replaceAll('org.springframework.kafka.support.micrometer.', '').replaceAll('^Fully qualified n', 'N') } } -tasks.register('api', Javadoc) { - group = 'Documentation' - description = 'Generates aggregated Javadoc API documentation.' +dependencies { + javaProjects.each { + javadoc it + } +} + +javadoc { title = "${rootProject.description} ${version} API" options { encoding = 'UTF-8' - memberLevel = org.gradle.external.javadoc.JavadocMemberLevel.PROTECTED + memberLevel = JavadocMemberLevel.PROTECTED author = true - header = rootProject.description + header = project.description use = true overview = 'src/api/overview.html' splitIndex = true @@ -422,14 +416,14 @@ tasks.register('api', Javadoc) { addBooleanOption('Xdoclint:syntax', true) // only check syntax with doclint } - source javaProjects.collect { project -> - project.sourceSets.main.allJava - } - - classpath = files(javaProjects.collect { project -> - project.sourceSets.main.compileClasspath - }) destinationDir = file('build/api') + classpath = files().from { files(javaProjects.collect { it.sourceSets.main.compileClasspath }) } +} + +tasks.register('api') { + group = 'Documentation' + description = 'Generates aggregated Javadoc API documentation.' + dependsOn javadoc } tasks.register('docsZip', Zip) { @@ -442,7 +436,7 @@ tasks.register('docsZip', Zip) { include 'changelog.txt' } - from(api) { + from(javadoc) { into 'api' } } diff --git a/gradle.properties b/gradle.properties index a1412f9432..c3ccf6d51b 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ -version=3.2.0-M1 +version=3.3.2-SNAPSHOT org.gradle.jvmargs=-Xmx1536M -Dfile.encoding=UTF-8 org.gradle.caching=true org.gradle.parallel=true diff --git a/gradle/docs.gradle b/gradle/docs.gradle index 11a679bf49..cd8b83a708 100644 --- a/gradle/docs.gradle +++ b/gradle/docs.gradle @@ -1,36 +1,37 @@ -node { - version = '16.16.0' -} - antora { version = '3.2.0-alpha.2' playbook = file('src/main/antora/antora-playbook.yml') - options = ['to-dir' : project.layout.buildDirectory.dir('site').get().toString(), clean: true, fetch: !project.gradle.startParameter.offline, stacktrace: true] + options = [ + 'to-dir' : project.layout.buildDirectory.dir('site').get().toString(), + clean : true, + fetch : !project.gradle.startParameter.offline, + stacktrace: true + ] dependencies = [ - '@antora/atlas-extension': '1.0.0-alpha.1', - '@antora/collector-extension': '1.0.0-alpha.3', - '@asciidoctor/tabs': '1.0.0-beta.3', - '@springio/antora-extensions': '1.4.2', + '@antora/atlas-extension' : '1.0.0-alpha.1', + '@antora/collector-extension' : '1.0.0-alpha.3', + '@asciidoctor/tabs' : '1.0.0-beta.3', + '@springio/antora-extensions' : '1.4.2', '@springio/asciidoctor-extensions': '1.0.0-alpha.8', ] } -tasks.named("generateAntoraYml") { - asciidocAttributes = project.provider( { - return ['project-version' : project.version, - 'revnumber': project.version, - 'spring-version': project.version, +tasks.named('generateAntoraYml') { + asciidocAttributes = project.provider({ + return ['project-version': project.version, + 'revnumber' : project.version, + 'spring-version' : project.version, ] - } ) + }) baseAntoraYmlFile = file('src/main/antora/antora.yml') } -tasks.create(name: 'createAntoraPartials', type: Sync) { +tasks.register('createAntoraPartials', Sync) { from { project.rootProject.tasks.filterMetricsDocsContent.outputs } into layout.buildDirectory.dir('generated-antora-resources/modules/ROOT/partials') } -tasks.create('generateAntoraResources') { +tasks.register('generateAntoraResources') { dependsOn 'createAntoraPartials' dependsOn 'generateAntoraYml' } diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index d64cd49177..2c3521197d 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index db8c3baafe..68e8816d71 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=9d926787066a081739e8200858338b4a69e837c3a821a33aca9db09dd4a41026 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip +distributionSha256Sum=d725d707bfabd4dfdc958c624003b3c80accc03f7037b5122c4b1d0ef15cecab +distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-bin.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/gradlew b/gradlew index 1aa94a4269..f5feea6d6b 100755 --- a/gradlew +++ b/gradlew @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# ############################################################################## # @@ -55,7 +57,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -84,7 +86,8 @@ done # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) -APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum diff --git a/gradlew.bat b/gradlew.bat index 6689b85bee..9b42019c79 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -13,6 +13,8 @@ @rem See the License for the specific language governing permissions and @rem limitations under the License. @rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem @if "%DEBUG%"=="" @echo off @rem ########################################################################## @@ -43,11 +45,11 @@ set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if %ERRORLEVEL% equ 0 goto execute -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail @@ -57,11 +59,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto execute -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail diff --git a/samples/README.adoc b/samples/README.adoc index fff00cbed6..dadada8f7b 100644 --- a/samples/README.adoc +++ b/samples/README.adoc @@ -1,7 +1,9 @@ == Samples -* sample-01 - simple producer/consumer with dead-letter topic -* sample-02 - multi-method listener -* sample-03 - transactions -* sample-04 - topic based (non-blocking) retry -* sample-05 - global embedded Kafka testing +* sample-01 - Simple producer/consumer with dead-letter topic +* sample-02 - Multi-method listener +* sample-03 - Transactions +* sample-04 - Topic based (non-blocking) retry +* sample-05 - Global embedded Kafka testing +* sample-06 - Kafka Streams tests with TopologyTestDriver +* sample-07 - The New consumer rebalance protocol in spring-kafka diff --git a/samples/sample-01/pom.xml b/samples/sample-01/pom.xml index 4df914835b..ab08e189cc 100644 --- a/samples/sample-01/pom.xml +++ b/samples/sample-01/pom.xml @@ -5,7 +5,7 @@ com.example kafka-sample-01 - 3.0.2-SNAPSHOT + 3.2.0-SNAPSHOT jar kafka-sample-01 @@ -14,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 3.2.1 + 3.2.5 diff --git a/samples/sample-02/pom.xml b/samples/sample-02/pom.xml index 700557b103..32dcc5c9ec 100644 --- a/samples/sample-02/pom.xml +++ b/samples/sample-02/pom.xml @@ -5,7 +5,7 @@ com.example kafka-sample-02 - 3.0.2-SNAPSHOT + 3.2.0-SNAPSHOT jar kafka-sample-02 @@ -14,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 3.2.1 + 3.2.5 diff --git a/samples/sample-03/pom.xml b/samples/sample-03/pom.xml index dd75f45268..d522310aa8 100644 --- a/samples/sample-03/pom.xml +++ b/samples/sample-03/pom.xml @@ -5,7 +5,7 @@ com.example kafka-sample-03 - 3.0.2-SNAPSHOT + 3.2.0-SNAPSHOT jar kafka-sample-03 @@ -14,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 3.2.1 + 3.2.5 diff --git a/samples/sample-04/pom.xml b/samples/sample-04/pom.xml index 5e2a6bd237..9c2d91151c 100644 --- a/samples/sample-04/pom.xml +++ b/samples/sample-04/pom.xml @@ -5,7 +5,7 @@ com.example kafka-sample-04 - 3.0.2-SNAPSHOT + 3.2.0-SNAPSHOT jar kafka-sample-04 @@ -14,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 3.2.1 + 3.2.5 diff --git a/samples/sample-05/pom.xml b/samples/sample-05/pom.xml index 01e48ba9cb..66ce23ee95 100644 --- a/samples/sample-05/pom.xml +++ b/samples/sample-05/pom.xml @@ -5,13 +5,13 @@ org.springframework.boot spring-boot-starter-parent - 3.2.1 + 3.2.5 com.example kafka-sample-05 - 3.0.2-SNAPSHOT + 3.2.0-SNAPSHOT kafka-sample-05 Kafka Sample 5 diff --git a/samples/sample-05/src/test/java/com/example/Sample05Application1Tests.java b/samples/sample-05/src/test/java/com/example/Sample05Application1Tests.java index 8c61d12f43..537aa7997c 100644 --- a/samples/sample-05/src/test/java/com/example/Sample05Application1Tests.java +++ b/samples/sample-05/src/test/java/com/example/Sample05Application1Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package com.example; -import static org.awaitility.Awaitility.await; - import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -28,6 +26,8 @@ import org.springframework.kafka.core.KafkaTemplate; import org.springframework.test.annotation.DirtiesContext; +import static org.awaitility.Awaitility.await; + /** * This test is going to fail from IDE since there is no exposed {@code spring.kafka.bootstrap-servers} system property. * Use Maven to run tests which enables global embedded Kafka broker via properties provided to Surefire plugin. diff --git a/samples/sample-05/src/test/java/com/example/Sample05Application2Tests.java b/samples/sample-05/src/test/java/com/example/Sample05Application2Tests.java index 6a567a66eb..8943ca9075 100644 --- a/samples/sample-05/src/test/java/com/example/Sample05Application2Tests.java +++ b/samples/sample-05/src/test/java/com/example/Sample05Application2Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package com.example; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; - import java.util.concurrent.TimeUnit; import org.apache.kafka.common.errors.TimeoutException; @@ -29,6 +27,8 @@ import org.springframework.kafka.core.KafkaTemplate; import org.springframework.test.annotation.DirtiesContext; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; + /** * This test is going to fail from IDE since there is no exposed {@code spring.kafka.bootstrap-servers} system property. * This test demonstrates that global embedded Kafka broker config for {@code auto.create.topics.enable=false} diff --git a/samples/sample-06/README.adoc b/samples/sample-06/README.adoc new file mode 100644 index 0000000000..0bfaa0fda2 --- /dev/null +++ b/samples/sample-06/README.adoc @@ -0,0 +1,36 @@ +== Sample 6 + +This sample demonstrates a simple Kafka Streams topology tested with TopologyTestDriver. + +The application contains a simple Kafka Streams topology that counts the keys seen so far in a stateful manner. +The corresponding `TopologyTestDriver` based JUnit test verifies the behavior of the business logic in the Kafka Streams topology. + + +Console output describe the topology as shown below: + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v2.5.2) + +2021-06-30 17:38:33.637 INFO 92063 --- [ main] com.example.ApplicationTests : Starting ApplicationTests using Java 11.0.10 on C02FL1KSMD6T with PID 92063 (started by igomez in /Users/igomez/Projects/spring-kafka/samples/sample-05) +2021-06-30 17:38:33.638 INFO 92063 --- [ main] com.example.ApplicationTests : The following profiles are active: test +2021-06-30 17:38:35.027 INFO 92063 --- [ main] com.example.ApplicationTests : Started ApplicationTests in 1.73 seconds (JVM running for 2.833) +2021-06-30 17:38:35.695 INFO 92063 --- [ main] com.example.ApplicationTests : Topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000 (topics: [input]) + --> KSTREAM-AGGREGATE-0000000002 + Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001]) + --> KTABLE-SUPPRESS-0000000003 + <-- KSTREAM-SOURCE-0000000000 + Processor: KTABLE-SUPPRESS-0000000003 (stores: [KTABLE-SUPPRESS-STATE-STORE-0000000004]) + --> KTABLE-TOSTREAM-0000000005 + <-- KSTREAM-AGGREGATE-0000000002 + Processor: KTABLE-TOSTREAM-0000000005 (stores: []) + --> KSTREAM-SINK-0000000006 + <-- KTABLE-SUPPRESS-0000000003 + Sink: KSTREAM-SINK-0000000006 (topic: output) + <-- KTABLE-TOSTREAM-0000000005 diff --git a/samples/sample-06/pom.xml b/samples/sample-06/pom.xml new file mode 100644 index 0000000000..117beeb954 --- /dev/null +++ b/samples/sample-06/pom.xml @@ -0,0 +1,128 @@ + + + 4.0.0 + + com.example + kafka-sample-06 + 3.2.0-SNAPSHOT + jar + + kafka-sample-06 + Kafka Sample 6 + + org.springframework.boot + spring-boot-starter-parent + 3.2.5 + + + + + UTF-8 + UTF-8 + 17 + + + + + + org.springframework.kafka + spring-kafka + + + + org.apache.kafka + kafka-streams + + + + org.springframework.boot + spring-boot-starter-test + test + + + + org.springframework.boot + spring-boot-starter-web + + + + org.apache.kafka + kafka-streams-test-utils + test + + + + org.apache.kafka + kafka-clients + test + test + + + + org.awaitility + awaitility + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + + + spring-snapshots + Spring Snapshots + https://repo.spring.io/libs-snapshot-local + + + spring-milestones + Spring milestones + https://repo.spring.io/libs-milestone-local + + + rsocket-snapshots + RSocket Snapshots + https://oss.jfrog.org/oss-snapshot-local + + true + + + + spring-releases + Spring Releases + https://repo.spring.io/release + + + + + spring-snapshots + Spring Snapshots + https://repo.spring.io/snapshot + + true + + + + spring-milestones + Spring Milestones + https://repo.spring.io/milestone + + false + + + + spring-releases + Spring Releases + https://repo.spring.io/release + + + + + diff --git a/samples/sample-06/src/main/java/com/example/Application.java b/samples/sample-06/src/main/java/com/example/Application.java new file mode 100644 index 0000000000..3e21fa45da --- /dev/null +++ b/samples/sample-06/src/main/java/com/example/Application.java @@ -0,0 +1,41 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.kafka.annotation.EnableKafkaStreams; + +/** + * + * @author Nacho Munoz + * @since 3.2.0 + */ +@EnableKafkaStreams +@SpringBootApplication +public class Application { + + private final Logger logger = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} diff --git a/samples/sample-06/src/main/java/com/example/Topology.java b/samples/sample-06/src/main/java/com/example/Topology.java new file mode 100644 index 0000000000..a58741e824 --- /dev/null +++ b/samples/sample-06/src/main/java/com/example/Topology.java @@ -0,0 +1,64 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example; + +import java.time.Duration; + +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.Suppressed; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; + + +/** + * A basic topology that counts records by key and materialises the output into a new topic + * + * @author Nacho Munoz + * @author Soby Chacko + * @since 3.2.0 + */ +@Configuration +@Component +public class Topology { + private final String inputTopic; + + private final String outputTopic; + + @Autowired + public Topology(@Value("${input-topic.name}") final String inputTopic, + @Value("${output-topic.name}") final String outputTopic) { + this.inputTopic = inputTopic; + this.outputTopic = outputTopic; + } + + @Autowired + public void defaultTopology(final StreamsBuilder builder) { + builder.stream(inputTopic, Consumed.with(Serdes.Integer(), Serdes.String())) + .groupByKey() + .count() + .suppress(Suppressed.untilTimeLimit(Duration.ofMillis(5), Suppressed.BufferConfig.unbounded())) + .toStream() + .to(outputTopic); + + } + +} diff --git a/samples/sample-06/src/main/resources/application-test.properties b/samples/sample-06/src/main/resources/application-test.properties new file mode 100644 index 0000000000..af04773f38 --- /dev/null +++ b/samples/sample-06/src/main/resources/application-test.properties @@ -0,0 +1,6 @@ +logging.level.root=off +logging.level.com.example=info + +bootstrap.servers= +spring.kafka.properties.bootstrap.servers=${bootstrap.servers} +spring.kafka.streams.application-id=Sample-06-Service-Test diff --git a/samples/sample-06/src/main/resources/application.properties b/samples/sample-06/src/main/resources/application.properties new file mode 100644 index 0000000000..c6fae65697 --- /dev/null +++ b/samples/sample-06/src/main/resources/application.properties @@ -0,0 +1,4 @@ +logging.level.root=off +logging.level.com.example=info + +spring.kafka.streams.application-id=Sample-06-Service diff --git a/samples/sample-06/src/main/resources/application.yml b/samples/sample-06/src/main/resources/application.yml new file mode 100644 index 0000000000..1f06f69c7f --- /dev/null +++ b/samples/sample-06/src/main/resources/application.yml @@ -0,0 +1,6 @@ +logging: + level.root: info +input-topic: + name: input +output-topic: + name: output diff --git a/samples/sample-06/src/test/java/com/example/ApplicationTests.java b/samples/sample-06/src/test/java/com/example/ApplicationTests.java new file mode 100644 index 0000000000..8493312d2d --- /dev/null +++ b/samples/sample-06/src/test/java/com/example/ApplicationTests.java @@ -0,0 +1,89 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example; + +import java.time.Duration; +import java.util.List; + +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.TestInputTopic; +import org.apache.kafka.streams.TestOutputTopic; +import org.apache.kafka.streams.TopologyTestDriver; +import org.awaitility.Awaitility; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.kafka.config.StreamsBuilderFactoryBean; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * @author Nacho Munoz + * @author Soby Chacko + * @since 3.2.0 + */ +@SpringBootTest +public class ApplicationTests { + private final Logger logger = LoggerFactory.getLogger(ApplicationTests.class); + + private TopologyTestDriver testDriver; + + @Value("${input-topic.name}") + private String inputTopicName; + + @Value("${output-topic.name}") + private String outputTopicName; + + private TestInputTopic inputTopic; + + private TestOutputTopic outputTopic; + + @Autowired + private StreamsBuilderFactoryBean streamsBuilder; + + @BeforeEach + public void setup() { + this.testDriver = new TopologyTestDriver(streamsBuilder.getTopology(), streamsBuilder.getStreamsConfiguration()); + logger.info(streamsBuilder.getTopology().describe().toString()); + this.inputTopic = testDriver.createInputTopic(inputTopicName, Serdes.Integer().serializer(), Serdes.String().serializer()); + this.outputTopic = testDriver.createOutputTopic(outputTopicName, Serdes.Integer().deserializer(), Serdes.Long().deserializer()); + } + + @AfterEach + public void after() { + if (testDriver != null) { + this.testDriver.close(); + } + } + + @Test + public void testTopologyLogic() { + inputTopic.pipeInput(1, "test", 1L); + inputTopic.pipeInput(1, "test", 10L); + inputTopic.pipeInput(2, "test", 2L); + + Awaitility.waitAtMost(Duration.ofSeconds(5)).until(() -> outputTopic.getQueueSize() == 2L); + assertThat(outputTopic.readValuesToList()).isEqualTo(List.of(2L, 1L)); + } + +} diff --git a/samples/sample-07/.gitignore b/samples/sample-07/.gitignore new file mode 100644 index 0000000000..c2065bc262 --- /dev/null +++ b/samples/sample-07/.gitignore @@ -0,0 +1,37 @@ +HELP.md +.gradle +build/ +!gradle/wrapper/gradle-wrapper.jar +!**/src/main/**/build/ +!**/src/test/**/build/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache +bin/ +!**/src/main/**/bin/ +!**/src/test/**/bin/ + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr +out/ +!**/src/main/**/out/ +!**/src/test/**/out/ + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ + +### VS Code ### +.vscode/ diff --git a/samples/sample-07/README.adoc b/samples/sample-07/README.adoc new file mode 100644 index 0000000000..11bfecb6aa --- /dev/null +++ b/samples/sample-07/README.adoc @@ -0,0 +1,50 @@ +== Sample 7 + +This sample demonstrates the application of the new consumer rebalance protocol in Spring for Apache Kafka. + +The new consumer rebalance protocol refers to the Server Side rebalance protocol proposed in link:https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol[KIP-848]. + +`Spring Boot` starts the `Kafka Broker` container defined in the `compose.yaml` file upon startup. + +```yaml +version: '3' +services: + broker: + image: bitnami/kafka:3.7.0 + ... + # KIP-848 + KAFKA_CFG_GROUP_COORDINATOR_REBALANCE_PROTOCOLS: "classic,consumer" + KAFKA_CFG_TRANSACTION_PARTITION_VERIFICATION_ENABLE: "false" +``` + +The config of `group.protocol = conumser` should be added to `Consumer` configuration to apply new consumer rebalance protocol. + +The `group.protocol` can be configured in the `resources/application.yaml` as follows: + +```yaml +spring: + kafka: + consumer: + properties: + group.protocol: consumer +``` + +Next, the `Consumer` created by `@KafkaListener` will request a subscription to the `test-topic` from the `Broker`. + +The `Broker` will then send the Topic Partition Assign information to the `Consumer`. This means that the `Consumer` rebalancing has finished, and the `Consumer` has started to poll messages. + +```java +@Component +public class Sample07KafkaListener { + + @KafkaListener(topics = "test-topic", groupId = "sample07-1") + public void listenWithGroup1(String message) { + System.out.println("Received message at group sample07-1: " + message); + } + + @KafkaListener(topics = "test-topic", groupId = "sample07-2") + public void listenWithGroup2(String message) { + System.out.println("Received message at group sample07-2: " + message); + } +} +``` diff --git a/samples/sample-07/build.gradle b/samples/sample-07/build.gradle new file mode 100644 index 0000000000..923f395209 --- /dev/null +++ b/samples/sample-07/build.gradle @@ -0,0 +1,28 @@ +plugins { + id 'java' + id 'org.springframework.boot' version '3.3.0-SNAPSHOT' + id 'io.spring.dependency-management' version '1.1.5' +} + +group = 'com.example' +version = '3.2.0-SNAPSHOT' + +java { + sourceCompatibility = '17' +} + +repositories { + mavenCentral() + maven { url 'https://repo.spring.io/milestone' } + maven { url 'https://repo.spring.io/snapshot' } +} + +dependencies { + implementation 'org.springframework.boot:spring-boot-starter' + implementation 'org.springframework.kafka:spring-kafka' + developmentOnly 'org.springframework.boot:spring-boot-docker-compose' +} + +tasks.named('test') { + useJUnitPlatform() +} diff --git a/samples/sample-07/compose.yaml b/samples/sample-07/compose.yaml new file mode 100644 index 0000000000..6f92091d48 --- /dev/null +++ b/samples/sample-07/compose.yaml @@ -0,0 +1,34 @@ +version: '3' +services: + broker: + image: bitnami/kafka:3.7.0 + hostname: broker + container_name: broker + ports: + - "9092:9092" + - "10000:9094" + environment: + # Kraft Settings + KAFKA_CFG_NODE_ID: 0 + KAFKA_KRAFT_CLUSTER_ID: HsDBs9l6UUmQq7Y5E6bNlw + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@localhost:9093 + KAFKA_CFG_PROCESS_ROLES: controller,broker + + # Listeners + KAFKA_CFG_LISTENERS: INTERNAL://broker:29092, PLAINTEXT://0.0.0.0:9092, EXTERNAL://:9094, CONTROLLER://:9093 + KAFKA_CFG_ADVERTISED_LISTENERS: INTERNAL://broker:29092, PLAINTEXT://broker:9092, EXTERNAL://127.0.0.1:10000 + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT,INTERNAL:PLAINTEXT + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: INTERNAL + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + + # Clustering + KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: 1 + + # KIP-848 + KAFKA_CFG_GROUP_COORDINATOR_REBALANCE_PROTOCOLS: "classic,consumer" + KAFKA_CFG_TRANSACTION_PARTITION_VERIFICATION_ENABLE: "false" diff --git a/samples/sample-07/gradle/wrapper/gradle-wrapper.jar b/samples/sample-07/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000..e6441136f3 Binary files /dev/null and b/samples/sample-07/gradle/wrapper/gradle-wrapper.jar differ diff --git a/samples/sample-07/gradle/wrapper/gradle-wrapper.properties b/samples/sample-07/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000000..b82aa23a4f --- /dev/null +++ b/samples/sample-07/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/samples/sample-07/gradlew b/samples/sample-07/gradlew new file mode 100755 index 0000000000..1aa94a4269 --- /dev/null +++ b/samples/sample-07/gradlew @@ -0,0 +1,249 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/samples/sample-07/gradlew.bat b/samples/sample-07/gradlew.bat new file mode 100644 index 0000000000..7101f8e467 --- /dev/null +++ b/samples/sample-07/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/samples/sample-07/settings.gradle b/samples/sample-07/settings.gradle new file mode 100644 index 0000000000..1a7026fb88 --- /dev/null +++ b/samples/sample-07/settings.gradle @@ -0,0 +1,9 @@ +pluginManagement { + repositories { + maven { url 'https://repo.spring.io/milestone' } + maven { url 'https://repo.spring.io/snapshot' } + gradlePluginPortal() + } +} + +rootProject.name = 'sample-07' diff --git a/samples/sample-07/src/main/java/com/example/sample07/Sample07Application.java b/samples/sample-07/src/main/java/com/example/sample07/Sample07Application.java new file mode 100644 index 0000000000..020e1a00aa --- /dev/null +++ b/samples/sample-07/src/main/java/com/example/sample07/Sample07Application.java @@ -0,0 +1,38 @@ +/* + * Copyright 2022-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.sample07; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +/** + * New consumer rebalance protocol sample which purpose is only to demonstrate the application + * of the New Consumer Rebalance Protocol in Spring Kafka. + * + * @author Sanghyeok An. + * + * @since 3.2.0 + */ + +@SpringBootApplication +public class Sample07Application { + + public static void main(String[] args) { + SpringApplication.run(Sample07Application.class, args); + } + +} diff --git a/samples/sample-07/src/main/java/com/example/sample07/Sample07KafkaListener.java b/samples/sample-07/src/main/java/com/example/sample07/Sample07KafkaListener.java new file mode 100644 index 0000000000..3c4195f059 --- /dev/null +++ b/samples/sample-07/src/main/java/com/example/sample07/Sample07KafkaListener.java @@ -0,0 +1,45 @@ +/* + * Copyright 2022-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.sample07; + +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.stereotype.Component; + +/** + * New consumer rebalance protocol sample which purpose is only to demonstrate the application + * of the New Consumer Rebalance Protocol in Spring Kafka. + * Each consumer will subscribe test-topic with different group id. + * Then, new consumer rebalance protocol will be completed successfully. + * + * @author Sanghyeok An. + * + * @since 3.2.0 + */ + +@Component +public class Sample07KafkaListener { + + @KafkaListener(topics = "test-topic", groupId = "sample07-1") + public void listenWithGroup1(String message) { + System.out.println("Received message at group sample07-1: " + message); + } + + @KafkaListener(topics = "test-topic", groupId = "sample07-2") + public void listenWithGroup2(String message) { + System.out.println("Received message at group sample07-2: " + message); + } +} diff --git a/samples/sample-07/src/main/resources/application.yaml b/samples/sample-07/src/main/resources/application.yaml new file mode 100644 index 0000000000..557545e087 --- /dev/null +++ b/samples/sample-07/src/main/resources/application.yaml @@ -0,0 +1,16 @@ +spring: + docker: + compose: + lifecycle-management: start_and_stop + start: + command: up + stop: + command: down + timeout: 10s + kafka: + consumer: + bootstrap-servers: localhost:10000 + key-deserializer: org.apache.kafka.common.serialization.StringDeserializer + value-deserializer: org.apache.kafka.common.serialization.StringDeserializer + properties: + group.protocol: consumer diff --git a/settings.gradle b/settings.gradle index 272037ac85..a81eefa98d 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,14 +1,12 @@ pluginManagement { repositories { - mavenCentral() gradlePluginPortal() - maven { url 'https://repo.spring.io/release' } + mavenCentral() } } plugins { - id 'com.gradle.enterprise' version '3.15.1' - id 'io.spring.ge.conventions' version '0.0.15' + id 'io.spring.develocity.conventions' version '0.0.22' } rootProject.name = 'spring-kafka-dist' diff --git a/spring-kafka-bom/spring-amqp-bom.txt b/spring-kafka-bom/spring-kafka-bom.txt similarity index 100% rename from spring-kafka-bom/spring-amqp-bom.txt rename to spring-kafka-bom/spring-kafka-bom.txt diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/change-history.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/change-history.adoc index 26aeb4ec86..7842d96056 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/change-history.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/change-history.adoc @@ -1,6 +1,131 @@ [[history]] = Change History +[[what-s-new-in-3-2-since-3-1]] +== What's New in 3.2 Since 3.1 +:page-section-summary-toc: 1 + +This section covers the changes made from version 3.1 to version 3.2. +For changes in earlier version, see xref:appendix/change-history.adoc[Change History]. + +[[x32-kafka-client-version]] +=== Kafka Client Version + +This version requires 3.7.0 `kafka-clients`. +The 3.7.0 version of Kafka client introduces the new consumer group protocol. +Fore more details and it's limitations see https://cwiki.apache.org/confluence/display/KAFKA/The+Next+Generation+of+the+Consumer+Rebalance+Protocol+%28KIP-848%29+-+Early+Access+Release+Notes[KIP-848]. +The new consumer group protocol is an early access release and not meant to be used in production. +It is only recommended to use for testing purposes in this version. +Therefore, Spring for Apache Kafka supports this new consumer group protocol only to the extent of such testing level support available in the `kafka-client` itself. +By default, Spring for Apache Kafka uses the classic consumer group protocol and when testing the new consumer group protocol, that needs to be opted-in via the `group.protocol` property on the consumer. + +[[x32-testing-support-changes]] +=== Testing Support Changes + +The `kraft` mode is disabled in `EmbeddedKafka` by default and users wanting to use the `kraft` mode must enable it. +This is due to certain instabilities observed while using `EmbeddedKafka` in `kraft` mode, especially when testing the new consumer group protocol. +The new consumer group protocol is only supported in `kraft` mode and because of this, when testing the new protocol, that needs to be done against a real Kafka cluster and not the one based on the `KafkaClusterTestKit`, which `EmbeddedKafka` is based upon. +In addition, there were some other race conditions observed, while running multiple `KafkaListener` methods with `EmbeddedKafka` in `kraft` mode. +Until these issues are resolved, the `kraft` default on `EmbeddedKafka` will remain as `false`. + +[[x32-kafka-streams-iqs-support]] +=== Kafka Streams Interactive Query Support + +A new API `KafkaStreamsInteractiveQuerySupport` for accessing queryable stores used in Kafka Streams interactive queries. +See xref:streams.adoc#kafka-streams-iq-support[Kafka Streams Interactive Support] for more details. + +[[x32-tiss]] +=== TransactionIdSuffixStrategy + +A new `TransactionIdSuffixStrategy` interface was introduced to manage `transactional.id` suffix. +The default implementation is `DefaultTransactionIdSuffixStrategy` when setting `maxCache` greater than zero can reuse `transactional.id` within a specific range, otherwise suffixes will be generated on the fly by incrementing a counter. +See xref:kafka/transactions.adoc#transaction-id-suffix-fixed[Fixed TransactionIdSuffix] for more information. + +[[x32-async-return]] +=== Async @KafkaListener Return + +`@KafkaListener` (and `@KafkaHandler`) methods can now return asynchronous return types include `CompletableFuture`, `Mono` and Kotlin `suspend` functions. +See xref:kafka/receiving-messages/async-returns.adoc[Async Returns] for more information. + +[[x32-customizable-dlt-routing]] +=== Routing of messages to custom DLTs based on thrown exceptions + +It's now possible to redirect messages to the custom DLTs based on the type of the exception, which has been thrown during the message processing. +Rules for the redirection are set either via the `RetryableTopic.exceptionBasedDltRouting` or the `RetryTopicConfigurationBuilder.dltRoutingRules`. +Custom DLTs are created automatically as well as other retry and dead-letter topics. +See xref:retrytopic/features.adoc#exc-based-custom-dlt-routing[Routing of messages to custom DLTs based on thrown exceptions] for more information. + +[[x32-cp-ptm]] +=== Deprecating ContainerProperties transactionManager property + +Deprecating the `transactionManager` property in `ContainerProperties` in favor of `KafkaAwareTransactionManager`, a narrower type compared to the general `PlatformTransactionManager`. See xref:kafka/container-props.adoc#kafkaAwareTransactionManager[ContainerProperties] and xref:kafka/transactions.adoc#transaction-synchronization[Transaction Synchronization]. + +[[x32-after-rollback-processing]] +=== After Rollback Processing + +A new `AfterRollbackProcessor` API `processBatch` is provided. +See xref:kafka/annotation-error-handling.adoc#after-rollback[After-rollback Processor] for more information. + +[[x32-retry-topic]] +=== Change @RetryableTopic SameIntervalTopicReuseStrategy default value +Change `@RetryableTopic` property `SameIntervalTopicReuseStrategy` default value to `SINGLE_TOPIC`. +See xref:retrytopic/topic-naming.adoc#single-topic-maxinterval-delay[Single Topic for maxInterval Exponential Delay]. + +=== Non-blocking retries support class level @KafkaListener +Non-blocking retries support xref:kafka/receiving-messages/class-level-kafkalistener.adoc[@KafkaListener on a Class]. +See xref:retrytopic.adoc[Non-Blocking Retries]. + +=== Support process @RetryableTopic on a class in RetryTopicConfigurationProvider. +Provides a new public API to find `RetryTopicConfiguration`. +See xref:retrytopic/retry-config.adoc#find-retry-topic-config[Find RetryTopicConfiguration] + +=== RetryTopicConfigurer support process MultiMethodKafkaListenerEndpoint. +The `RetryTopicConfigurer` support process and register `MultiMethodKafkaListenerEndpoint`. +The `MultiMethodKafkaListenerEndpoint` provides `getter/setter` for properties `defaultMethod` and `methods`. +Modify the `EndpointCustomizer` that strictly for `MethodKafkaListenerEndpoint` types. +The `EndpointHandlerMethod` add new constructors construct an instance for the provided bean. +Provides new class `EndpointHandlerMultiMethod` to handler multi method for retrying endpoints. + +[[x32-seek-offset-compute-fn]] +=== New API method to seek to an offset based on a user provided function +`ConsumerCallback` provides a new API to seek to an offset based on a user-defined function, which takes the current offset in the consumer as an argument. +See xref:kafka/seek.adoc#seek[Seek API Docs] for more details. + +[[x32-annotation-partition-offset-seek-position]] +=== @PartitionOffset support for SeekPosition +Adding `seekPosition` property to `@PartitionOffset` support for `TopicPartitionOffset.SeekPosition`. +See xref:kafka/receiving-messages/listener-annotation.adoc#manual-assignment[manual-assignment] for more details. + +[[x32-topic-partition-offset-constructor]] +=== New constructor in TopicPartitionOffset that accepts a function to compute the offset to seek to +`TopicPartitionOffset` has a new constructor that takes a user-provided function to compute the offset to seek to. +When this constructor is used, the framework calls the function with the input argument of the current consumer offset position. +See xref:kafka/seek.adoc#seek[Seek API Docs] for more details. + +[[x32-default-clientid-prefix]] +=== Spring Boot application name as default client ID prefix + +For Spring Boot applications which define an application name, this name is now used +as a default prefix for auto-generated client IDs for certain client types. +See xref:kafka/connecting.adoc#default-client-id-prefixes[Default client ID prefixes] for more details. + +[[get-listener-containers-matching]] +== Enhanced Retrieval of MessageListenerContainers + +`ListenerContainerRegistry` provides two new API's dynamically find and filter `MessageListenerContainer` instances. +`getListenerContainersMatching(Predicate idMatcher)` to filter by ID and the other is +`getListenerContainersMatching(BiPredicate matcher)` to filter by ID and container properties. + +See xref:kafka/receiving-messages/kafkalistener-lifecycle.adoc#retrieving-message-listener-containers[`@KafkaListener` Lifecycle Management's API Docs] for more information. + +[[x32-observation]] +== Enhanced observation by providing more tracing tags + +`KafkaTemplateObservation` provides more tracing tags(low cardinality). +`KafkaListenerObservation` provides a new API to find high cardinality key names and more tracing tags(high or low cardinality). +See xref:kafka/micrometer.adoc#observation[Micrometer Observation] + + [[what-s-new-in-3-1-since-3-0]] == What's New in 3.1 Since 3.0 :page-section-summary-toc: 1 @@ -261,7 +386,7 @@ The `@KafkaListener` annotation now has the `filter` attribute, to override the The `@KafkaListener` annotation now has the `info` attribute; this is used to populate the new listener container property `listenerInfo`. This is then used to populate a `KafkaHeaders.LISTENER_INFO` header in each record which can be used in `RecordInterceptor`, `RecordFilterStrategy`, or the listener itself. -See xref:kafka/annotation-error-handling.adoc#li-header[Listener Info Header] and xref:kafka/container-props.adoc#alc-props[Abstract Listener Container Properties] for more information. +See xref:kafka/annotation-error-handling.adoc#li-header[Listener Info Header] and xref:kafka/container-props.adoc#amlc-props[AbstractMessageListenerContainer Properties] for more information. [[x28-template]] === `KafkaTemplate` Changes @@ -781,7 +906,7 @@ Also, a `StringOrBytesSerializer` is now available; it can serialize `byte[]`, ` See xref:kafka/serdes.adoc#messaging-message-conversion[Spring Messaging Message Conversion] for more information. The `JsonSerializer`, `JsonDeserializer` and `JsonSerde` now have fluent APIs to make programmatic configuration simpler. -See the javadocs, xref:kafka/serdes.adoc[Serialization, Deserialization, and Message Conversion], and xref:streams.adoc#serde[Streams JSON Serialization and Deserialization] for more informaion. +See the javadocs, xref:kafka/serdes.adoc[Serialization, Deserialization, and Message Conversion], and xref:streams.adoc#serde[Streams JSON Serialization and Deserialization] for more information. [[cb-2-2-and-2-3-replyingkafkatemplate]] === ReplyingKafkaTemplate diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/override-boot-dependencies.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/override-boot-dependencies.adoc index 1956c97a81..918f3a08e3 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/override-boot-dependencies.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/appendix/override-boot-dependencies.adoc @@ -4,7 +4,7 @@ When using Spring for Apache Kafka in a Spring Boot application, the Apache Kafka dependency versions are determined by Spring Boot's dependency management. If you wish to use a different version of `kafka-clients` or `kafka-streams`, and use the embedded kafka broker for testing, you need to override their version used by Spring Boot dependency management; set the `kafka.version` property. -NOTE: Default `kafka-clients` dependencies for Spring Boot 3.0.x and 3.1.x are 3.3.2 and 3.4.1 respectively. +NOTE: Default `kafka-clients` dependencies for Spring Boot 3.1.x and 3.2.x are 3.4.1 and 3.6.2 respectively. Or, to use a different Spring for Apache Kafka version with a supported Spring Boot version, set the `spring-kafka.version` property. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/index.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/index.adoc index 55e65693c1..c06fdb35a8 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/index.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/index.adoc @@ -10,8 +10,3 @@ Gary Russell; Artem Bilan; Biju Kunjummen; Jay Bryant; Soby Chacko; Tomaz Fernan The Spring for Apache Kafka project applies core Spring concepts to the development of Kafka-based messaging solutions. We provide a "`template`" as a high-level abstraction for sending messages. We also provide support for Message-driven POJOs. - -(C) 2016 - 2023 VMware, Inc. - -Copies of this document may be made for your own use and for distribution to others, provided that you do not charge any fee for such copies and further provided that each copy contains this Copyright Notice, whether distributed in print or electronically. - diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/annotation-error-handling.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/annotation-error-handling.adoc index 52f9ec06cc..777f46589a 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/annotation-error-handling.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/annotation-error-handling.adoc @@ -65,8 +65,7 @@ In either case, you should NOT perform any seeks on the consumer because the con Starting with version 2.8, the legacy `ErrorHandler` and `BatchErrorHandler` interfaces have been superseded by a new `CommonErrorHandler`. These error handlers can handle errors for both record and batch listeners, allowing a single listener container factory to create containers for both types of listener. -`CommonErrorHandler` implementations to replace most legacy framework error handler implementations are provided and the legacy error handlers deprecated. -The legacy interfaces are still supported by listener containers and listener container factories; they will be deprecated in a future release. +`CommonErrorHandler` implementations to replace most legacy framework error handler implementations are provided. See xref:kafka/annotation-error-handling.adoc#migrating-legacy-eh[Migrating Custom Legacy Error Handler Implementations to `CommonErrorHandler`] for information to migrate custom error handlers to `CommonErrorHandler`. @@ -198,7 +197,7 @@ When using a POJO batch listener (e.g. `List`), and you don't have the fu ---- @KafkaListener(id = "recovering", topics = "someTopic") public void listen(List things) { - for (int i = 0; i < records.size(); i++) { + for (int i = 0; i < things.size(); i++) { try { process(things.get(i)); } @@ -351,6 +350,8 @@ This is to cause the transaction to roll back (if transactions are enabled). The `CommonDelegatingErrorHandler` can delegate to different error handlers, depending on the exception type. For example, you may wish to invoke a `DefaultErrorHandler` for most exceptions, or a `CommonContainerStoppingErrorHandler` for others. +All delegates must share the same compatible properties (`ackAfterHandle`, `seekAfterError` ...). + [[log-eh]] == Logging Error Handler @@ -425,7 +426,7 @@ To replace any `BatchErrorHandler` implementation, you should implement `handleB You should also implement `handleOtherException()` - to handle exceptions that occur outside the scope of record processing (e.g. consumer errors). [[after-rollback]] -== After-rollback Processor +== After Rollback Processor When using transactions, if the listener throws an exception (and an error handler, if present, throws an exception), the transaction is rolled back. By default, any unprocessed records (including the failed record) are re-fetched on the next poll. @@ -561,6 +562,27 @@ It is disabled by default to avoid the (small) overhead of looking up the state The `DefaultErrorHandler` and `DefaultAfterRollbackProcessor` support this feature. +[[delivery-attempts-header-for-batch-listener]] +== Delivery Attempts Header for batch listener + +When processing `ConsumerRecord` with the `BatchListener`, the `KafkaHeaders.DELIVERY_ATTEMPT` header can be present in a different way compared to `SingleRecordListener`. + +Starting with version 3.3, if you want to inject the `KafkaHeaders.DELIVERY_ATTEMPT` header into the `ConsumerRecord` when using the `BatchListener`, set the `DeliveryAttemptAwareRetryListener` as the `RetryListener` in the `ErrorHandler`. + +Please refer to the code below. +[source, java] +---- +final FixedBackOff fixedBackOff = new FixedBackOff(1, 10); +final DefaultErrorHandler errorHandler = new DefaultErrorHandler(fixedBackOff); +errorHandler.setRetryListeners(new DeliveryAttemptAwareRetryListener()); + +ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +factory.setConsumerFactory(consumerFactory); +factory.setCommonErrorHandler(errorHandler); +---- + +Then, whenever a batch fails to complete, the `DeliveryAttemptAwareRetryListener` will inject a `KafkaHeaders.DELIVERY_ATTMPT` header into the `ConsumerRecord`. + [[li-header]] == Listener Info Header @@ -613,7 +635,7 @@ The framework provides the `DeadLetterPublishingRecoverer`, which publishes the The recoverer requires a `KafkaTemplate`, which is used to send the record. You can also, optionally, configure it with a `BiFunction, Exception, TopicPartition>`, which is called to resolve the destination topic and partition. -IMPORTANT: By default, the dead-letter record is sent to a topic named `.DLT` (the original topic name suffixed with `.DLT`) and to the same partition as the original record. +IMPORTANT: By default, the dead-letter record is sent to a topic named `-dlt` (the original topic name suffixed with `-dlt`) and to the same partition as the original record. Therefore, when you use the default resolver, the dead-letter topic **must have at least as many partitions as the original topic.** If the returned `TopicPartition` has a negative partition, the partition is not set in the `ProducerRecord`, so the partition is selected by Kafka. @@ -795,4 +817,3 @@ DefaultErrorHandler handler() { ---- This will retry after `1, 2, 4, 8, 10, 10` seconds, before calling the recoverer. - diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/connecting.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/connecting.adoc index 754d9b6ed8..678c5a1f00 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/connecting.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/connecting.adoc @@ -58,3 +58,37 @@ These listeners can be used, for example, to create and bind a Micrometer `Kafka The framework provides listeners that do exactly that; see xref:kafka/micrometer.adoc#micrometer-native[Micrometer Native Metrics]. +[[default-client-id-prefixes]] +== Default client ID prefixes + +Starting with version 3.2, for Spring Boot applications which define an application name using the `spring.application.name` property, this name is now used +as a default prefix for auto-generated client IDs for these client types: + +- consumer clients which don't use a consumer group +- producer clients +- admin clients + +This makes it easier to identify these clients at server side for troubleshooting or applying quotas. + +.Example client ids resulting for a Spring Boot application with `spring.application.name=myapp` +[%autowidth] +|=== +|Client Type |Without application name |With application name + +|consumer without consumer group +|consumer-null-1 +|myapp-consumer-1 + +|consumer with consumer group "mygroup" +|consumer-mygroup-1 +|consumer-mygroup-1 + +|producer +|producer-1 +|myapp-producer-1 + +|admin +|adminclient-1 +|myapp-admin-1 +|=== + diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/container-props.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/container-props.adoc index b126974ebe..7daab75789 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/container-props.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/container-props.adoc @@ -30,7 +30,7 @@ See the JavaDocs for `ContainerProperties.AssignmentCommitOption` for more information about the available options. |[[asyncAcks]]<> -|false +|`false` |Enable out-of-order commits (see xref:kafka/receiving-messages/ooo-commits.adoc[Manually Committing Offsets]); the consumer is paused and commits are deferred until gaps are filled. |[[authExceptionRetryInterval]]<> @@ -38,6 +38,10 @@ See the JavaDocs for `ContainerProperties.AssignmentCommitOption` for more infor |When not null, a `Duration` to sleep between polls when an `AuthenticationException` or `AuthorizationException` is thrown by the Kafka client. When null, such exceptions are considered fatal and the container will stop. +|[[batchRecoverAfterRollback]]<> +|`false` +|Set to `true` to enable batch recovery, See xref:kafka/annotation-error-handling.adoc#after-rollback[After Rollback Processor]. + |[[clientId]]<> |(empty string) |A prefix for the `client.id` consumer property. @@ -57,10 +61,6 @@ Useful when the consumer code cannot determine that an `ErrorHandlingDeserialize |`null` |When present and `syncCommits` is `false` a callback invoked after the commit completes. -|[[offsetAndMetadataProvider]]<> -|`null` -|A provider for `OffsetAndMetadata`; by default, the provider creates an offset and metadata with empty metadata. The provider gives a way to customize the metadata. - |[[commitLogLevel]]<> |DEBUG |The logging level for logs pertaining to committing offsets. @@ -69,15 +69,15 @@ Useful when the consumer code cannot determine that an `ErrorHandlingDeserialize |`null` |A rebalance listener; see xref:kafka/receiving-messages/rebalance-listeners.adoc[Rebalancing Listeners]. -|[[consumerStartTimout]]<> +|[[commitRetries]]<> +|3 +|Set the number of retries `RetriableCommitFailedException` when using `syncCommits` set to true. +Default 3 (4-attempt total). + +|[[consumerStartTimeout]]<> |30s |The time to wait for the consumer to start before logging an error; this might happen if, say, you use a task executor with insufficient threads. -|[[consumerTaskExecutor]]<> -|`SimpleAsyncTaskExecutor` -|A task executor to run the consumer threads. -The default executor creates threads named `-C-n`; with the `KafkaMessageListenerContainer`, the name is the bean name; with the `ConcurrentMessageListenerContainer` the name is the bean name suffixed with `-n` where n is incremented for each child container. - |[[deliveryAttemptHeader]]<> |`false` |See xref:kafka/annotation-error-handling.adoc#delivery-header[Delivery Attempts Header]. @@ -123,9 +123,18 @@ Also see `idleBeforeDataMultiplier`. |None |Used to override any arbitrary consumer properties configured on the consumer factory. +|[[kafkaAwareTransactionManager]]<> +|`null` +|See xref:kafka/transactions.adoc[Transactions]. + +|[[listenerTaskExecutor]]<> +|`SimpleAsyncTaskExecutor` +|A task executor to run the consumer threads. +The default executor creates threads named `-C-n`; with the `KafkaMessageListenerContainer`, the name is the bean name; with the `ConcurrentMessageListenerContainer` the name is the bean name suffixed with `-m` where `m` is incremented for each child container. See xref:kafka/receiving-messages/container-thread-naming.adoc#container-thread-naming[Container Thread Naming]. + |[[logContainerConfig]]<> |`false` -|Set to true to log at INFO level all container properties. +|Set to `true` to log at INFO level all container properties. |[[messageListener]]<> |`null` @@ -145,7 +154,7 @@ Also see `idleBeforeDataMultiplier`. |[[missingTopicsFatal]]<> |`false` -|When true prevents the container from starting if the confifgured topic(s) are not present on the broker. +|When true prevents the container from starting if the configured topic(s) are not present on the broker. |[[monitorInterval]]<> |30s @@ -157,9 +166,21 @@ See `noPollThreshold` and `pollTimeout`. |Multiplied by `pollTimeOut` to determine whether to publish a `NonResponsiveConsumerEvent`. See `monitorInterval`. +|[[observationConvention]]<> +|`null` +|When set, add dynamic tags to the timers and traces, based on information in the consumer records. + +|[[observationEnabled]]<> +|`false` +|Set to `true` to enable observation via Micrometer. + +|[[offsetAndMetadataProvider]]<> +|`null` +|A provider for `OffsetAndMetadata`; by default, the provider creates an offset and metadata with empty metadata. The provider gives a way to customize the metadata. + |[[onlyLogRecordMetadata]]<> |`false` -|Set to false to log the complete consumer record (in error, debug logs etc) instead of just `topic-partition@offset`. +|Set to `false` to log the complete consumer record (in error, debug logs etc.) instead of just `topic-partition@offset`. |[[pauseImmediate]]<> |`false` @@ -215,11 +236,11 @@ Mutually exclusive; at least one must be provided; enforced by `ContainerPropert |[[transactionManager]]<> |`null` -|See xref:kafka/transactions.adoc[Transactions]. +|Deprecated since 3.2, see <>, xref:kafka/transactions.adoc#transaction-synchronization[Other transaction managers]. |=== -[[alc-props]] -.`AbstractListenerContainer` Properties +[[amlc-props]] +.`AbstractMessageListenerContainer` Properties [cols="9,10,16", options="header"] |=== | Property @@ -256,14 +277,6 @@ See xref:kafka/annotation-error-handling.adoc#error-handlers[Container Error Han |`ContainerProperties` |The container properties instance. -|[[errorHandler]]<> -|See desc. -|Deprecated - see `commonErrorHandler`. - -|[[genericErrorHandler]]<> -|See desc. -|Deprecated - see `commonErrorHandler`. - |[[groupId2]]<> |See desc. |The `containerProperties.groupId`, if present, otherwise the `group.id` property from the consumer factory. @@ -307,10 +320,6 @@ Also see `interceptBeforeTx`. |(read only) |The partitions currently assigned to this container (explicitly or not). -|[[assignedPartitionsByClientId]]<> -|(read only) -|The partitions currently assigned to this container (explicitly or not). - |[[clientIdSuffix]]<> |`null` |Used by the concurrent container to give each child container's consumer a unique `client.id`. @@ -335,10 +344,6 @@ Also see `interceptBeforeTx`. |(read only) |The aggregate of partitions currently assigned to this container's child `KafkaMessageListenerContainer`+++s+++ (explicitly or not). -|[[assignedPartitionsByClientId2]]<> -|(read only) -|The partitions currently assigned to this container's child `KafkaMessageListenerContainer`+++s+++ (explicitly or not), keyed by the child container's consumer's `client.id` property. - |[[concurrency]]<> |1 |The number of child `KafkaMessageListenerContainer`+++s+++ to manage. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/events.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/events.adoc index b5b49e5a80..a6249286bd 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/events.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/events.adoc @@ -8,7 +8,7 @@ The following Spring application events are published by listener containers and * `ConsumerFailedToStartEvent`: published if no `ConsumerStartingEvent` is published within the `consumerStartTimeout` container property. This event might signal that the configured task executor has insufficient threads to support the containers it is used in and their concurrency. An error message is also logged when this condition occurs. -* `ListenerContainerIdleEvent`: published when no messages have been received in `idleInterval` (if configured). +* `ListenerContainerIdleEvent`: published when no messages have been received in `idleEventInterval` (if configured). * `ListenerContainerNoLongerIdleEvent`: published when a record is consumed after previously publishing a `ListenerContainerIdleEvent`. * `ListenerContainerPartitionIdleEvent`: published when no messages have been received from that partition in `idlePartitionEventInterval` (if configured). * `ListenerContainerPartitionNoLongerIdleEvent`: published when a record is consumed from a partition that has previously published a `ListenerContainerPartitionIdleEvent`. @@ -23,6 +23,7 @@ See xref:kafka/thread-safety.adoc[Thread Safety]. * `ConsumerRetryAuthEvent`: published when authentication or authorization of a consumer fails and is being retried. * `ConsumerRetryAuthSuccessfulEvent`: published when authentication or authorization has been retried successfully. Can only occur when there has been a `ConsumerRetryAuthEvent` before. * `ContainerStoppedEvent`: published when all consumers have stopped. +* `ConcurrentContainerStoppedEvent`: published when the `ConcurrentMessageListenerContainer` has stopped. IMPORTANT: By default, the application context's event multicaster invokes event listeners on the calling thread. If you change the multicaster to use an async executor, you must not invoke any `Consumer` methods when the event contains a reference to the consumer. @@ -89,7 +90,7 @@ The `ConsumerRetryAuthEvent` event has the following properties: ** `AUTHENTICATION` - the event was published because of an authentication exception. ** `AUTHORIZATION` - the event was published because of an authorization exception. -The `ConsumerStartingEvent`, `ConsumerStartingEvent`, `ConsumerFailedToStartEvent`, `ConsumerStoppedEvent`, `ConsumerRetryAuthSuccessfulEvent` and `ContainerStoppedEvent` events have the following properties: +The `ConsumerStartingEvent`, `ConsumerStartedEvent`, `ConsumerFailedToStartEvent`, `ConsumerStoppedEvent`, `ConsumerRetryAuthSuccessfulEvent` and `ContainerStoppedEvent` events have the following properties: * `source`: The listener container instance that published the event. * `container`: The listener container or the parent listener container, if the source container is a child. @@ -110,7 +111,7 @@ You can use this event to restart the container after such a condition: [source, java] ---- -if (event.getReason.equals(Reason.FENCED)) { +if (event.getReason().equals(Reason.FENCED)) { event.getSource(MessageListenerContainer.class).start(); } ---- @@ -204,7 +205,7 @@ Consequently, in the preceding example, we narrow the events received based on t Since containers created for the `@KafkaListener` support concurrency, the actual containers are named `id-n` where the `n` is a unique value for each instance to support the concurrency. That is why we use `startsWith` in the condition. -CAUTION: If you wish to use the idle event to stop the lister container, you should not call `container.stop()` on the thread that calls the listener. +CAUTION: If you wish to use the idle event to stop the listener container, you should not call `container.stop()` on the thread that calls the listener. Doing so causes delays and unnecessary log messages. Instead, you should hand off the event to a different thread that can then stop the container. Also, you should not `stop()` the container instance if it is a child container. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/interceptors.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/interceptors.adoc index 2d96b135db..b2a3ae1728 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/interceptors.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/interceptors.adoc @@ -30,11 +30,9 @@ public class Application { Map producerProperties = new HashMap<>(); // producerProperties.put(..., ...) // ... - Map producerProperties = properties.buildProducerProperties(); producerProperties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MyProducerInterceptor.class.getName()); producerProperties.put("some.bean", someBean); - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerProperties); - return factory; + return new DefaultKafkaProducerFactory<>(producerProperties); } @Bean diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/micrometer.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/micrometer.adoc index cdfd498422..07f0d200bd 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/micrometer.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/micrometer.adoc @@ -24,7 +24,7 @@ NOTE: With the concurrent container, timers are created for each thread and the [[monitoring-kafkatemplate-performance]] == Monitoring KafkaTemplate Performance -Starting with version 2.5, the template will automatically create and update Micrometer `Timer`+++s for send operations, if `Micrometer` is detected on the classpath, and a single `MeterRegistry` is present in the application context. +Starting with version 2.5, the template will automatically create and update Micrometer `Timer`+++s+++ for send operations, if `Micrometer` is detected on the classpath, and a single `MeterRegistry` is present in the application context. The timers can be disabled by setting the template's `micrometerEnabled` property to `false`. Two timers are maintained - one for successful calls to the listener and one for failures. @@ -88,6 +88,11 @@ double count = this.meterRegistry.get("kafka.producer.node.incoming.byte.total") A similar listener is provided for the `StreamsBuilderFactoryBean` - see xref:streams.adoc#streams-micrometer[KafkaStreams Micrometer Support]. +Starting with version 3.3, a `KafkaMetricsSupport` abstract class is introduced to manage `io.micrometer.core.instrument.binder.kafka.KafkaMetrics` binding into a `MeterRegistry` for provided Kafka client. +This class is a super for the mentioned above `MicrometerConsumerListener`, `MicrometerProducerListener` and `KafkaStreamsMicrometerListener`. +However, it can be used for any Kafka client use-cases. +The class needs to be extended and its `bindClient()` and `unbindClient()` API have to be called to connect Kafka client metrics with a Micrometer collector. + [[observation]] == Micrometer Observation @@ -95,6 +100,8 @@ Using Micrometer for observation is now supported, since version 3.0, for the `K Set `observationEnabled` to `true` on the `KafkaTemplate` and `ContainerProperties` to enable observation; this will disable xref:kafka/micrometer.adoc[Micrometer Timers] because the timers will now be managed with each observation. +IMPORTANT: Micrometer Observation does not support batch listener; this will enable Micrometer Timers + Refer to https://micrometer.io/docs/tracing[Micrometer Tracing] for more information. To add tags to timers/traces, configure a custom `KafkaTemplateObservationConvention` or `KafkaListenerObservationConvention` to the template or listener container, respectively. @@ -109,6 +116,6 @@ Starting with version 3.0.6, you can add dynamic tags to the timers and traces, To do so, add a custom `KafkaListenerObservationConvention` and/or `KafkaTemplateObservationConvention` to the listener container properties or `KafkaTemplate` respectively. The `record` property in both observation contexts contains the `ConsumerRecord` or `ProducerRecord` respectively. -The sender and receiver contexts' `remoteServiceName` properties are set to the Kafka `clusterId` property; this is retrieved by a `KafkaAdmin`. +The sender and receiver contexts `remoteServiceName` properties are set to the Kafka `clusterId` property; this is retrieved by a `KafkaAdmin`. If, for some reason - perhaps lack of admin permissions, you cannot retrieve the cluster id, starting with version 3.1, you can set a manual `clusterId` on the `KafkaAdmin` and inject it into `KafkaTemplate` s and listener containers. When it is `null` (default), the admin will invoke the `describeCluster` admin operation to retrieve it from the broker. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/pause-resume.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/pause-resume.adoc index c06436ba35..58f55aa34d 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/pause-resume.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/pause-resume.adoc @@ -13,11 +13,11 @@ Starting with version 2.1.5, you can call `isPauseRequested()` to see if `pause( However, the consumers might not have actually paused yet. `isConsumerPaused()` returns true if all `Consumer` instances have actually paused. -In addition (also since 2.1.5), `ConsumerPausedEvent` and `ConsumerResumedEvent` instances are published with the container as the `source` property and the `TopicPartition` instances involved in the `partitions` property. +In addition(also since 2.1.5), `ConsumerPausedEvent` and `ConsumerResumedEvent` instances are published with the container as the `source` property and the `TopicPartition` instances involved in the `partitions` property. Starting with version 2.9, a new container property `pauseImmediate`, when set to true, causes the pause to take effect after the current record is processed. -By default, the pause takes effect when all of the records from the previous poll have been processed. -See <>. +By default, the pause takes effect when all the records from the previous poll have been processed. +See xref:kafka/container-props.adoc#pauseImmediate[pauseImmediate]. The following simple Spring Boot application demonstrates by using the container registry to get a reference to a `@KafkaListener` method's container and pausing or resuming its consumers as well as receiving the corresponding events: diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/annotation-send-to.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/annotation-send-to.adoc index b5102fd02e..6a5b3d23d2 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/annotation-send-to.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/annotation-send-to.adoc @@ -117,7 +117,7 @@ The following example shows how to do so: ---- @Bean public KafkaTemplate myReplyingTemplate() { - return new KafkaTemplate(producerFactory()) { + return new KafkaTemplate(producerFactory()) { @Override public CompletableFuture> send(String topic, String data) { diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/enforced-rebalance.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/enforced-rebalance.adoc index 145550ef6f..4bf3118009 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/enforced-rebalance.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/enforced-rebalance.adoc @@ -6,7 +6,7 @@ Starting with version `3.1.2`, Spring for Apache Kafka provides an option to inv When calling this API, it is simply alerting the Kafka consumer to trigger an enforced rebalance; the actual rebalance will only occur as part of the next `poll()` operation. If there is already a rebalance in progress, calling an enforced rebalance is a NO-OP. The caller must wait for the current rebalance to complete before invoking another one. -See the javadocs for `enfroceRebalance` for more details. +See the javadocs for `enforceRebalance` for more details. The following code snippet shows the essence of enforcing a rebalance using the message listener container. @@ -29,6 +29,6 @@ public ApplicationRunner runner(KafkaTemplate template, KafkaLis } ---- -As the code above shows, the application uses the `KafkaListenerEndpointRegistry` to gain access to the message listener container and then calling the `enforceRebalnce` API on it. +As the code above shows, the application uses the `KafkaListenerEndpointRegistry` to gain access to the message listener container and then calling the `enforceRebalance` API on it. When calling the `enforceRebalance` on the listener container, it delegates the call to the underlying Kafka consumer. -The Kafka consumer will trigger a rebalance as part of the next `poll()` operation. \ No newline at end of file +The Kafka consumer will trigger a rebalance as part of the next `poll()` operation. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/filtering.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/filtering.adoc index dd65d477a1..86d6ba069a 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/filtering.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/filtering.adoc @@ -4,7 +4,7 @@ In certain scenarios, such as rebalancing, a message that has already been processed may be redelivered. The framework cannot know whether such a message has been processed or not. That is an application-level function. -This is known as the https://www.enterpriseintegrationpatterns.com/patterns/messaging/IdempotentReceiver.html[Idempotent Receiver] pattern and Spring Integration provides an https://docs.spring.io/spring-integration/reference/html/#idempotent-receiver[implementation] of it. +This is known as the https://www.enterpriseintegrationpatterns.com/patterns/messaging/IdempotentReceiver.html[Idempotent Receiver] pattern and Spring Integration provides an https://docs.spring.io/spring-integration/reference/handler-advice/idempotent-receiver.html[implementation] of it. The Spring for Apache Kafka project also provides some assistance by means of the `FilteringMessageListenerAdapter` class, which can wrap your `MessageListener`. This class takes an implementation of `RecordFilterStrategy` in which you implement the `filter` method to signal that a message is a duplicate and should be discarded. @@ -27,3 +27,63 @@ public void listen(Thing thing) { } ---- +Starting with version 3.3, Ignoring empty batches that result from filtering by `RecordFilterStrategy` is supported. +When implementing `RecordFilterStrategy`, it can be configured through `ignoreEmptyBatch()`. +The default setting is `false`, indicating `KafkaListener` will be invoked even if all `ConsumerRecord` s are filtered out. + +If `true` is returned, the `KafkaListener` [underline]#will not be invoked# when all `ConsumerRecord` are filtered out. +However, commit to broker, will still be executed. + +If `false` is returned, the `KafkaListener` [underline]#will be invoked# when all `ConsumerRecord` are filtered out. + +Here are some examples. + +[source,java] +---- +public class IgnoreEmptyBatchRecordFilterStrategy implements RecordFilterStrategy { + ... + @Override + public List> filterBatch( + List> consumerRecords) { + return List.of(); + } + + @Override + public boolean ignoreEmptyBatch() { + return true; + } +}; + +// NOTE: ignoreEmptyBatchRecordFilterStrategy is bean name of IgnoreEmptyBatchRecordFilterStrategy instance. +@KafkaListener(id = "filtered", topics = "topic", filter = "ignoreEmptyBatchRecordFilterStrategy") +public void listen(List things) { + ... +} +---- +In this case, `IgnoreEmptyBatchRecordFilterStrategy` always returns empty list and return `true` as result of `ignoreEmptyBatch()`. +Thus `KafkaListener#listen(...)` never will be invoked at all. + +[source,java] +---- +public class NotIgnoreEmptyBatchRecordFilterStrategy implements RecordFilterStrategy { + ... + @Override + public List> filterBatch( + List> consumerRecords) { + return List.of(); + } + + @Override + public boolean ignoreEmptyBatch() { + return false; + } +}; + +// NOTE: notIgnoreEmptyBatchRecordFilterStrategy is bean name of NotIgnoreEmptyBatchRecordFilterStrategy instance. +@KafkaListener(id = "filtered", topics = "topic", filter = "notIgnoreEmptyBatchRecordFilterStrategy") +public void listen(List things) { + ... +} +---- +However, in this case, `IgnoreEmptyBatchRecordFilterStrategy` always returns empty list and return `false` as result of `ignoreEmptyBatch()`. +Thus `KafkaListener#listen(...)` always will be invoked. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/kafkalistener-lifecycle.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/kafkalistener-lifecycle.adoc index ccb0704f29..7d61688ab5 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/kafkalistener-lifecycle.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/kafkalistener-lifecycle.adoc @@ -41,3 +41,49 @@ IMPORTANT: Endpoints registered after the application context has been refreshed An example of late registration is a bean with a `@KafkaListener` in prototype scope where an instance is created after the context is initialized. Starting with version 2.8.7, you can set the registry's `alwaysStartAfterRefresh` property to `false` and then the container's `autoStartup` property will define whether or not the container is started. +[[retrieving-message-listener-containers]] + +== Retrieving MessageListenerContainers from KafkaListenerEndpointRegistry + +The `KafkaListenerEndpointRegistry` provides methods for retrieving `MessageListenerContainer` instances to accommodate a range of management scenarios: + +**All Containers**: For operations that cover all listener containers, use `getListenerContainers()` to retrieve a comprehensive collection. + +[source, java] +---- +Collection allContainers = registry.getListenerContainers(); +---- + +**Specific Container by ID**: To manage an individual container, `getListenerContainer(String id)` enables retrieval by its id. + +[source, java] +---- +MessageListenerContainer specificContainer = registry.getListenerContainer("myContainerId"); +---- + +**Dynamic Container Filtering**: Introduced in version 3.2, two overloaded `getListenerContainersMatching` methods enable refined selection of containers. +One method takes a `Predicate` for ID-based filtering as a parameter, while the other takes a `BiPredicate` +for more advanced criteria that may include container properties or state as a parameter. + +[source, java] +---- +// Prefix matching (Predicate) +Collection filteredContainers = + registry.getListenerContainersMatching(id -> id.startsWith("productListener-retry-")); + +// Regex matching (Predicate) +Collection regexFilteredContainers = + registry.getListenerContainersMatching(myPattern::matches); + +// Pre-built Set of IDs (Predicate) +Collection setFilteredContainers = + registry.getListenerContainersMatching(myIdSet::contains); + +// Advanced Filtering: ID prefix and running state (BiPredicate) +Collection advancedFilteredContainers = + registry.getListenerContainersMatching( + (id, container) -> id.startsWith("specificPrefix-") && container.isRunning() + ); +---- + +Utilize these methods to efficiently manage and query `MessageListenerContainer` instances within your application. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/listener-annotation.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/listener-annotation.adoc index 9503a2862a..2de479ae11 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/listener-annotation.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/listener-annotation.adoc @@ -54,7 +54,7 @@ public class KafkaConfig { @Bean public Map consumerConfigs() { Map props = new HashMap<>(); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokersAsString()); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ... return props; } @@ -150,11 +150,34 @@ public void listen(ConsumerRecord record) { The initial offset will be applied to all 6 partitions. +Since 3.2, `@PartitionOffset` support `SeekPosition.END`, `SeekPosition.BEGINNING`, `SeekPosition.TIMESTAMP`, `seekPosition` match `SeekPosition` enum name: + +[source, java] +---- +@KafkaListener(id = "seekPositionTime", topicPartitions = { + @TopicPartition(topic = TOPIC_SEEK_POSITION, partitionOffsets = { + @PartitionOffset(partition = "0", initialOffset = "723916800000", seekPosition = "TIMESTAMP"), + @PartitionOffset(partition = "1", initialOffset = "0", seekPosition = "BEGINNING"), + @PartitionOffset(partition = "2", initialOffset = "0", seekPosition = "END") + }) +}) +public void listen(ConsumerRecord record) { + ... +} +---- + +If seekPosition set `END` or `BEGINNING` will ignore `initialOffset` and `relativeToCurrent`. +If seekPosition set `TIMESTAMP`, `initialOffset` means timestamp. + [[manual-acknowledgment]] == Manual Acknowledgment When using manual `AckMode`, you can also provide the listener with the `Acknowledgment`. +To activate the manual `AckMode`, you need to set the ack-mode in `ContainerProperties` to the appropriate manual mode. The following example also shows how to use a different container factory. +This custom container factory must set the `AckMode` to a manual type by calling the `getContainerProperties()` and then calling `setAckMode` on it. +Otherwise, the `Acknowledgment` object will be null. + [source, java] ---- @@ -233,7 +256,7 @@ public KafkaListenerContainerFactory batchFactory() { } ---- -NOTE: Starting with version 2.8, you can override the factory's `batchListener` propery using the `batch` property on the `@KafkaListener` annotation. +NOTE: Starting with version 2.8, you can override the factory's `batchListener` property using the `batch` property on the `@KafkaListener` annotation. This, together with the changes to xref:kafka/annotation-error-handling.adoc#error-handlers[Container Error Handlers] allows the same factory to be used for both record and batch listeners. NOTE: Starting with version 2.9.6, the container factory has separate setters for the `recordMessageConverter` and `batchMessageConverter` properties. @@ -381,7 +404,7 @@ public class Listener { } ---- -If, in the unlikely event that you have an actual bean called `__listener`, you can change the expression token byusing the `beanRef` attribute. +If, in the unlikely event that you have an actual bean called `__listener`, you can change the expression token by using the `beanRef` attribute. The following example shows how to do so: [source, java] diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/message-listener-container.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/message-listener-container.adoc index 910bf9e056..39da98852e 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/message-listener-container.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/message-listener-container.adoc @@ -6,7 +6,7 @@ Two `MessageListenerContainer` implementations are provided: * `KafkaMessageListenerContainer` * `ConcurrentMessageListenerContainer` -The `KafkaMessageListenerContainer` receives all message from all topics or partitions on a single thread. +The `KafkaMessageListenerContainer` receives all messages from all topics or partitions on a single thread. The `ConcurrentMessageListenerContainer` delegates to one or more `KafkaMessageListenerContainer` instances to provide multi-threaded consumption. Starting with version 2.2.7, you can add a `RecordInterceptor` to the listener container; it will be invoked before calling the listener allowing inspection or modification of the record. @@ -139,10 +139,10 @@ For the first constructor, Kafka distributes the partitions across the consumers ==== When listening to multiple topics, the default partition distribution may not be what you expect. For example, if you have three topics with five partitions each and you want to use `concurrency=15`, you see only five active consumers, each assigned one partition from each topic, with the other 10 consumers being idle. -This is because the default Kafka `PartitionAssignor` is the `RangeAssignor` (see its Javadoc). +This is because the default Kafka `ConsumerPartitionAssignor` is the `RangeAssignor` (see its Javadoc). For this scenario, you may want to consider using the `RoundRobinAssignor` instead, which distributes the partitions across all of the consumers. Then, each consumer is assigned one topic or partition. -To change the `PartitionAssignor`, you can set the `partition.assignment.strategy` consumer property (`ConsumerConfigs.PARTITION_ASSIGNMENT_STRATEGY_CONFIG`) in the properties provided to the `DefaultKafkaConsumerFactory`. +To change the `ConsumerPartitionAssignor`, you can set the `partition.assignment.strategy` consumer property (`ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG`) in the properties provided to the `DefaultKafkaConsumerFactory`. When using Spring Boot, you can assign set the strategy as follows: diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/rebalance-listeners.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/rebalance-listeners.adoc index 3b42474b6d..d1db7d38f1 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/rebalance-listeners.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/receiving-messages/rebalance-listeners.adoc @@ -52,7 +52,7 @@ containerProperties.setConsumerRebalanceListener(new ConsumerAwareRebalanceListe ---- IMPORTANT: Starting with version 2.4, a new method `onPartitionsLost()` has been added (similar to a method with the same name in `ConsumerRebalanceLister`). -The default implementation on `ConsumerRebalanceLister` simply calls `onPartionsRevoked`. +The default implementation on `ConsumerRebalanceLister` simply calls `onPartitionsRevoked`. The default implementation on `ConsumerAwareRebalanceListener` does nothing. When supplying the listener container with a custom listener (of either type), it is important that your implementation does not call `onPartitionsRevoked` from `onPartitionsLost`. If you implement `ConsumerRebalanceListener` you should override the default method. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/seek.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/seek.adoc index 243521445d..a4803d864d 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/seek.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/seek.adoc @@ -9,7 +9,7 @@ void registerSeekCallback(ConsumerSeekCallback callback); void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback); -void onPartitionsRevoked(Collection partitions) +void onPartitionsRevoked(Collection partitions); void onIdleContainer(Map assignments, ConsumerSeekCallback callback); ---- @@ -34,6 +34,8 @@ The callback has the following methods: ---- void seek(String topic, int partition, long offset); +void seek(String topic, int partition, Function offsetComputeFunction); + void seekToBeginning(String topic, int partition); void seekToBeginning(Collection partitions); @@ -47,8 +49,15 @@ void seekRelative(String topic, int partition, long offset, boolean toCurrent); void seekToTimestamp(String topic, int partition, long timestamp); void seekToTimestamp(Collection topicPartitions, long timestamp); + +String getGroupId(); ---- +The two different variants of the `seek` methods provide a way to seek to an arbitrary offset. +The method that takes a `Function` as an argument to compute the offset was added in version 3.2 of the framework. +This function provides access to the current offset (the current position returned by the consumer, which is the next offset to be fetched). +The user can decide what offset to seek to based on the current offset in the consumer as part of the function definition. + `seekRelative` was added in version 2.3, to perform relative seeks. * `offset` negative and `toCurrent` `false` - seek relative to the end of the partition. @@ -177,17 +186,18 @@ public class SeekToLastOnIdleListener extends AbstractConsumerSeekAware { * Rewind all partitions one record. */ public void rewindAllOneRecord() { - getSeekCallbacks() - .forEach((tp, callback) -> - callback.seekRelative(tp.topic(), tp.partition(), -1, true)); + getTopicsAndCallbacks() + .forEach((tp, callbacks) -> + callbacks.forEach(callback -> callback.seekRelative(tp.topic(), tp.partition(), -1, true)) + ); } /** * Rewind one partition one record. */ public void rewindOnePartitionOneRecord(String topic, int partition) { - getSeekCallbackFor(new TopicPartition(topic, partition)) - .seekRelative(topic, partition, -1, true); + getSeekCallbacksFor(new TopicPartition(topic, partition)) + .forEach(callback -> callback.seekRelative(topic, partition, -1, true)); } } @@ -206,7 +216,7 @@ Example: public class MyListener extends AbstractConsumerSeekAware { @KafkaListener(...) - void listn(...) { + void listen(...) { ... } } @@ -225,4 +235,11 @@ public class SomeOtherBean { ---- +As of version 3.3, a new method `getGroupId()` was introduced in the `ConsumerSeekAware.ConsumerSeekCallback` interface. +This method is particularly useful when you need to identify the consumer group associated with a specific seek callback. + +NOTE: When using a class that extends `AbstractConsumerSeekAware`, a seek operation performed in one listener may impact all listeners in the same class. +This might not always be the desired behavior. +To address this, you can use the `getGroupId()` method provided by the callback. +This allows you to perform seek operations selectively, targeting only the consumer group of interest. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/sending-messages.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/sending-messages.adoc index 19a33384bc..8d59b41fe9 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/sending-messages.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/sending-messages.adoc @@ -62,15 +62,12 @@ interface OperationsCallback { See the https://docs.spring.io/spring-kafka/api/org/springframework/kafka/core/KafkaTemplate.html[Javadoc] for more detail. -IMPORTANT: In version 3.0, the methods that previously returned `ListenableFuture` have been changed to return `CompletableFuture`. -To facilitate the migration, the 2.9 version added a method `usingCompletableFuture()` which provided the same methods with `CompletableFuture` return types; this method is no longer available. - The `sendDefault` API requires that a default topic has been provided to the template. The API takes in a `timestamp` as a parameter and stores this timestamp in the record. How the user-provided timestamp is stored depends on the timestamp type configured on the Kafka topic. If the topic is configured to use `CREATE_TIME`, the user-specified timestamp is recorded (or generated if not specified). -If the topic is configured to use `LOG_APPEND_TIME`, the user-specified timestamp is ignored and the broker adds in the local broker time. +If the topic is configured to use `LOG_APPEND_TIME`, the user-specified timestamp ignored and the broker adds in the local broker time. The `metrics` and `partitionsFor` methods delegate to the same methods on the underlying https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html[`Producer`]. The `execute` method provides direct access to the underlying https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html[`Producer`]. @@ -139,10 +136,11 @@ The following listing shows the definition of the `ProducerListener` interface: ---- public interface ProducerListener { - void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata); + default void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { + } - void onError(ProducerRecord producerRecord, RecordMetadata recordMetadata, - Exception exception); + default void onError(ProducerRecord producerRecord, RecordMetadata recordMetadata, Exception exception) { + } } ---- @@ -166,7 +164,7 @@ future.whenComplete((result, ex) -> { `SendResult` has two properties, a `ProducerRecord` and `RecordMetadata`. See the Kafka API documentation for information about those objects. -The `Throwable` can be cast to a `KafkaProducerException`; its `failedProducerRecord` property contains the failed record. +The `Throwable` can be cast to a `KafkaProducerException`; its `producerRecord` property contains the failed record. If you wish to block the sending thread to await the result, you can invoke the future's `get()` method; using the method with a timeout is recommended. If you have set a `linger.ms`, you may wish to invoke `flush()` before waiting or, for convenience, the template has a constructor with an `autoFlush` parameter that causes the template to `flush()` on each send. @@ -184,7 +182,7 @@ This section shows examples of sending messages to Kafka: public void sendToKafka(final MyOutputData data) { final ProducerRecord record = createRecord(data); - CompletableFuture> future = template.send(record); + CompletableFuture> future = template.send(record); future.whenComplete((result, ex) -> { if (ex == null) { handleSuccess(data); @@ -216,7 +214,7 @@ public void sendToKafka(final MyOutputData data) { ---- ==== -Note that the cause of the `ExecutionException` is `KafkaProducerException` with the `failedProducerRecord` property. +Note that the cause of the `ExecutionException` is `KafkaProducerException` with the `producerRecord` property. [[routing-template]] == Using `RoutingKafkaTemplate` @@ -307,6 +305,7 @@ public KafkaTemplate kafkaTemplate() { Starting with version 2.5.10, you can now update the producer properties after the factory is created. This might be useful, for example, if you have to update SSL key/trust store locations after a credentials change. The changes will not affect existing producer instances; call `reset()` to close any existing producers so that new producers will be created using the new properties. + NOTE: You cannot change a transactional producer factory to non-transactional, and vice-versa. Two new methods are now provided: @@ -340,8 +339,6 @@ The result is a `CompletableFuture` that is asynchronously populated with the re The result also has a `sendFuture` property, which is the result of calling `KafkaTemplate.send()`. You can use this future to determine the result of the send operation. -IMPORTANT: In version 3.0, the futures returned by these methods (and their `sendFuture` properties) have been changed to `CompletableFuture`+++s+++ instead of `ListenableFuture`+++s+++. - If the first method is used, or the `replyTimeout` argument is `null`, the template's `defaultReplyTimeout` property is used (5 seconds by default). Starting with version 2.8.8, the template has a new method `waitForAssignment`. @@ -447,7 +444,7 @@ catch (InterruptedException e) { ... } catch (ExecutionException e) { - if (e.getCause instanceof MyException) { + if (e.getCause() instanceof MyException) { ... } } @@ -571,8 +568,6 @@ RequestReplyMessageFuture sendAndReceive(Message message); These will use the template's default `replyTimeout`, there are also overloaded versions that can take a timeout in the method call. -IMPORTANT: In version 3.0, the futures returned by these methods (and their `sendFuture` properties) have been changed to `CompletableFuture`+++s+++ instead of `ListenableFuture`+++s+++. - Use the first method if the consumer's `Deserializer` or the template's `MessageConverter` can convert the payload without any additional information, either via configuration or type metadata in the reply message. Use the second method if you need to provide type information for the return type, to assist the message converter. @@ -650,6 +645,12 @@ public Message messageReturn(String in) { } ---- +=== Original Record Key in Reply + +Starting with version 3.3, the Kafka record key from the incoming request (if it exists) will be preserved in the reply record. +This is only applicable for single record request/reply scenario. +When the listener is batch or when the return type is a collection, it is up to the application to specify which keys to use by wrapping the reply record in a `Message` type. + [[aggregating-request-reply]] == Aggregating Multiple Replies diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/serdes.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/serdes.adoc index a2616389fd..3128120512 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/serdes.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/serdes.adoc @@ -138,10 +138,10 @@ The following example creates a set of mappings: [source, java] ---- senderProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class); -senderProps.put(JsonSerializer.TYPE_MAPPINGS, "cat:com.mycat.Cat, hat:com.myhat.hat"); +senderProps.put(JsonSerializer.TYPE_MAPPINGS, "cat:com.mycat.Cat, hat:com.myhat.Hat"); ... consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class); -consumerProps.put(JsonDeSerializer.TYPE_MAPPINGS, "cat:com.yourcat.Cat, hat:com.yourhat.hat"); +consumerProps.put(JsonDeserializer.TYPE_MAPPINGS, "cat:com.yourcat.Cat, hat:com.yourhat.Hat"); ---- IMPORTANT: The corresponding objects must be compatible. @@ -166,7 +166,7 @@ The following Spring Boot example overrides the default factories: [source, java] ---- @Bean -public ConsumerFactory kafkaConsumerFactory(JsonDeserializer customValueDeserializer) { +public ConsumerFactory kafkaConsumerFactory(JsonDeserializer customValueDeserializer) { Map properties = new HashMap<>(); // properties.put(..., ...) // ... @@ -175,7 +175,7 @@ public ConsumerFactory kafkaConsumerFactory(JsonDeserializer cust } @Bean -public ProducerFactory kafkaProducerFactory(JsonSerializer customValueSerializer) { +public ProducerFactory kafkaProducerFactory(JsonSerializer customValueSerializer) { return new DefaultKafkaProducerFactory<>(properties.buildProducerProperties(), new StringSerializer(), customValueSerializer); } @@ -185,6 +185,9 @@ public ProducerFactory kafkaProducerFactory(JsonSerializer custom Setters are also provided, as an alternative to using these constructors. ==== +NOTE: When using Spring Boot and overriding the `ConsumerFactory` and `ProducerFactory` as shown above, wild card generic types need to be used with the bean method return type. +If concrete generic types are provided instead, then Spring Boot will ignore these beans and still use the default ones. + Starting with version 2.2, you can explicitly configure the deserializer to use the supplied target type and ignore type information in headers by using one of the overloaded constructors that have a boolean `useHeadersIfPresent` argument (which is `true` by default). The following example shows how to do so: @@ -346,7 +349,7 @@ public ProducerFactory producerFactory(Map conf ---- Starting with version 2.8.3, you can configure the serializer to check if the map key is assignable from the target object, useful when a delegate serializer can serialize sub classes. -In this case, if there are amiguous matches, an ordered `Map`, such as a `LinkedHashMap` should be provided. +In this case, if there are ambiguous matches, an ordered `Map`, such as a `LinkedHashMap` should be provided. [[by-topic]] === By Topic @@ -361,7 +364,7 @@ producerConfigs.put(DelegatingByTopicSerializer.VALUE_SERIALIZATION_TOPIC_CONFIG "topic[0-4]:" + ByteArraySerializer.class.getName() + ", topic[5-9]:" + StringSerializer.class.getName()); ... -ConsumerConfigs.put(DelegatingByTopicDeserializer.VALUE_SERIALIZATION_TOPIC_CONFIG, +consumerConfigs.put(DelegatingByTopicDeserializer.VALUE_SERIALIZATION_TOPIC_CONFIG, "topic[0-4]:" + ByteArrayDeserializer.class.getName() + ", topic[5-9]:" + StringDeserializer.class.getName()); ---- @@ -463,7 +466,7 @@ The `SmartMessageConverter.toMessage()` method is called to create a new outboun Similarly, in the `KafkaMessageConverter.toMessage()` method, after the converter has created a new `Message` from the `ConsumerRecord`, the `SmartMessageConverter.fromMessage()` method is called and then the final inbound message is created with the newly converted payload. In either case, if the `SmartMessageConverter` returns `null`, the original message is used. -When the default converter is used in the `KafkaTemplate` and listener container factory, you configure the `SmartMessageConverter` by calling `setMessagingConverter()` on the template and via the `contentMessageConverter` property on `@KafkaListener` methods. +When the default converter is used in the `KafkaTemplate` and listener container factory, you configure the `SmartMessageConverter` by calling `setMessagingConverter()` on the template and via the `contentTypeConverter` property on `@KafkaListener` methods. Examples: @@ -664,7 +667,7 @@ Consider using a `DelegatingByTypeSerializer` configured to use a `ByteArraySeri Starting with version 3.1, you can add a `Validator` to the `ErrorHandlingDeserializer`. If the delegate `Deserializer` successfully deserializes the object, but that object fails validation, an exception is thrown similar to a deserialization exception occurring. This allows the original raw data to be passed to the error handler. -WHen creating the deserializer yourself, simply call `setValidator`; if you configure the serializer using properties, set the consumer configuration property `ErrorHandlingDeserializer.VALIDATOR_CLASS` to the class or fully qualified class name for your `Validator`. +When creating the deserializer yourself, simply call `setValidator`; if you configure the serializer using properties, set the consumer configuration property `ErrorHandlingDeserializer.VALIDATOR_CLASS` to the class or fully qualified class name for your `Validator`. When using Spring Boot, this property name is `spring.kafka.consumer.properties.spring.deserializer.validator.class`. [[payload-conversion-with-batch]] @@ -721,6 +724,24 @@ public void listen(List> fooMessages) { } ---- +If record in the batch cannot be converted, its payload is set as `null` into the target `payloads` list. +The conversion exception is logged as warning for this record and also stored into a `KafkaHeaders.CONVERSION_FAILURES` header as an item of the `List`. +The target `@KafkaListener` method may perform Java `Stream` API to filter out those `null` values from the payload list or do something with the conversion exceptions header: + +[source, java] +---- +@KafkaListener(id = "foo", topics = "foo", autoStartup = "false") +public void listen(List list, + @Header(KafkaHeaders.CONVERSION_FAILURES) List conversionFailures) { + + for (int i = 0; i < list.size(); i++) { + if (conversionFailures.get(i) != null) { + throw new BatchListenerFailedException("Conversion Failed", conversionFailures.get(i), i); + } + } +} +---- + [[conversionservice-customization]] == `ConversionService` Customization diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/thread-safety.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/thread-safety.adoc index b5d49b09ed..9925424613 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/thread-safety.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/thread-safety.adoc @@ -17,3 +17,10 @@ Note that `SimpleThreadScope` does not destroy beans that have a destruction int IMPORTANT: By default, the application context's event multicaster invokes event listeners on the calling thread. If you change the multicaster to use an async executor, thread cleanup is not effective. +== Special Note on Virtual Threads and Concurrent Message Listener Containers + +Because of certain limitations in the underlying library classes still using `synchronized` blocks for thread coordination, applications need to be cautious when using virtual threads with concurrent message listener containers. +When virtual threads are enabled, if the concurrency exceeds the available number of platform threads, it is very likely for the virtual threads to be pinned on the platform threads and possible race conditions. +Therefore, as the 3rd party libraries that Spring for Apache Kafka uses evolves to fully support virtual threads, it is recommended to keep the concurrency on the message listener container to be equal to or less than the number of platform threads. +This way, the applications avoid any race conditions between the threads and the virtual threads being pinned on platform threads. + diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/transactions.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/transactions.adoc index 9bf7b81a96..aae4b186c4 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/transactions.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/transactions.adoc @@ -79,6 +79,9 @@ Instead, use a `KafkaTransactionManager` in the container to start the Kafka tra See xref:tips.adoc#ex-jdbc-sync[Examples of Kafka Transactions with Other Transaction Managers] for an example application that chains JDBC and Kafka transactions. +IMPORTANT: xref:retrytopic.adoc[Non-Blocking Retries] cannot combine with xref:kafka/transactions.adoc#container-transaction-manager[Container Transactions]. +When the listener code throws an exception, container transaction commit succeeds, and the record is sent to the retryable topic. + [[kafkatemplate-local-transactions]] == `KafkaTemplate` Local Transactions diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/quick-tour.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/quick-tour.adoc index 17f5d37d85..a322840a02 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/quick-tour.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/quick-tour.adoc @@ -58,7 +58,7 @@ However, the quickest way to get started is to use https://start.spring.io[start This quick tour works with the following versions: -* Apache Kafka Clients 3.6.x +* Apache Kafka Clients 3.7.x * Spring Framework 6.1.x * Minimum Java version: 17 diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic.adoc index 08d02f3f2f..ab585a4352 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic.adoc @@ -7,5 +7,8 @@ Version 2.9 changed the mechanism to bootstrap infrastructure beans; see xref:re Achieving non-blocking retry / dlt functionality with Kafka usually requires setting up extra topics and creating and configuring the corresponding listeners. Since 2.7 Spring for Apache Kafka offers support for that via the `@RetryableTopic` annotation and `RetryTopicConfiguration` class to simplify that bootstrapping. +Since 3.2, Spring for Apache Kafka supports non-blocking retries with xref:kafka/receiving-messages/class-level-kafkalistener.adoc[@KafkaListener on a Class]. + IMPORTANT: Non-blocking retries are not supported with xref:kafka/receiving-messages/listener-annotation.adoc#batch-listeners[Batch Listeners]. +IMPORTANT: Non-Blocking Retries cannot combine with xref:kafka/transactions.adoc#container-transaction-manager[Container Transactions]. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/dlt-strategies.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/dlt-strategies.adoc index 8741ed560e..cd2c3432f8 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/dlt-strategies.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/dlt-strategies.adoc @@ -22,7 +22,7 @@ public void processMessage(MyPojo message) { } @DltHandler -public void processMessage(MyPojo message) { +public void processDltMessage(MyPojo message) { // ... message processing, persistence, etc } ---- diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/features.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/features.adoc index 30a301ebfa..ff97cb57bd 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/features.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/features.adoc @@ -33,7 +33,7 @@ public void processMessage(MyPojo message) { public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { return RetryTopicConfigurationBuilder .newInstance() - .fixedBackoff(3_000) + .fixedBackOff(3_000) .maxAttempts(4) .create(template); } @@ -47,7 +47,7 @@ You can also provide a custom implementation of Spring Retry's `SleepingBackOffP public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { return RetryTopicConfigurationBuilder .newInstance() - .customBackOff(new MyCustomBackOffPolicy()) + .customBackoff(new MyCustomBackOffPolicy()) .maxAttempts(5) .create(template); } @@ -81,7 +81,7 @@ public void processMessage(MyPojo message) { public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { return RetryTopicConfigurationBuilder .newInstance() - .fixedBackoff(2_000) + .fixedBackOff(2_000) .timeoutAfter(5_000) .create(template); } @@ -212,7 +212,7 @@ If your broker version is earlier than 2.4, you will need to set an explicit val [[retry-headers]] == Failure Header Management -When considering how to manage failure headers (original headers and exception headers), the framework delegates to the `DeadLetterPublishingRecover` to decide whether to append or replace the headers. +When considering how to manage failure headers (original headers and exception headers), the framework delegates to the `DeadLetterPublishingRecoverer` to decide whether to append or replace the headers. By default, it explicitly sets `appendOriginalHeaders` to `false` and leaves `stripPreviousExceptionHeaders` to the default used by the `DeadLetterPublishingRecover`. @@ -291,11 +291,10 @@ public RetryTopicConfiguration myRetryTopic(KafkaTemplate templa return RetryTopicConfigurationBuilder .newInstance() .dltRoutingRules(Map.of("-deserialization", Set.of(DeserializationException.class))) - .create(kafkaOperations) .create(template); } ---- `suffix` takes place before the general `dltTopicSuffix` in the custom DLT name. Considering presented examples, the message, which caused the `DeserializationException` will be routed to the `my-annotated-topic-deserialization-dlt` instead of the `my-annotated-topic-dlt`. -Custom DLTs will be created following the same rules as stated in the xref:retrytopic/features.adoc#topics-autocreation[Topics AutoCreation]. \ No newline at end of file +Custom DLTs will be created following the same rules as stated in the xref:retrytopic/features.adoc#topics-autocreation[Topics AutoCreation]. diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/how-the-pattern-works.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/how-the-pattern-works.adoc index 7db1f32f64..6b39be1ac8 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/how-the-pattern-works.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/how-the-pattern-works.adoc @@ -13,11 +13,9 @@ IMPORTANT: By using this strategy you lose Kafka's ordering guarantees for that IMPORTANT: You can set the `AckMode` mode you prefer, but `RECORD` is suggested. -IMPORTANT: At this time this functionality doesn't support class level `@KafkaListener` annotations. - When using a manual `AckMode` with `asyncAcks` set to true, the `DefaultErrorHandler` must be configured with `seekAfterError` set to `false`. -Starting with versions 2.9.10, 3.0.8, this will be set to true unconditionally for such configurations. -With earlier versions, it was necessary to override the `RetryConfigurationSupport.configureCustomizers()` method to set the property to `true`. +Starting with versions 2.9.10, 3.0.8, this will be set to `false` unconditionally for such configurations. +With earlier versions, it was necessary to override the `RetryConfigurationSupport.configureCustomizers()` method to set the property to `false`. [source, java] ---- @@ -28,4 +26,3 @@ protected void configureCustomizers(CustomizersConfigurer customizersConfigurer) ---- In addition, before those versions, using the default (logging) DLT handler was not compatible with any kind of manual `AckMode`, regardless of the `asyncAcks` property. - diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-config.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-config.adoc index 6394863aa5..dae6fe1eae 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-config.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-config.adoc @@ -28,6 +28,21 @@ public void processMessage(MyPojo message) { } ---- +Since 3.2, `@RetryableTopic` support for @KafkaListener on a class would be: +[source,java] +---- +@RetryableTopic(listenerContainerFactory = "my-retry-topic-factory") +@KafkaListener(topics = "my-annotated-topic") +public class ClassLevelRetryListener { + + @KafkaHandler + public void processMessage(MyPojo message) { + // ... message processing + } + +} +---- + You can specify a method in the same class to process the dlt messages by annotating it with the `@DltHandler` annotation. If no DltHandler method is provided a default consumer is created which only logs the consumption. @@ -62,7 +77,7 @@ static @interface MetaAnnotatedRetryableTopic { You can also configure the non-blocking retry support by creating `RetryTopicConfiguration` beans in a `@Configuration` annotated class. -[source, java] +[source, java] ---- @Bean public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { @@ -197,3 +212,26 @@ You can return a specific partition number, or `null` to indicate that the `Kafk By default, all values of retry headers (number of attempts, timestamps) are retained when a record transitions through the retry topics. Starting with version 2.9.6, if you want to retain just the last value of these headers, use the `configureDeadLetterPublishingContainerFactory()` method shown above to set the factory's `retainAllRetryHeaderValues` property to `false`. +[[find-retry-topic-config]] +== Find RetryTopicConfiguration +Attempts to provide an instance of `RetryTopicConfiguration` by either creating one from a `@RetryableTopic` annotation, or from the bean container if no annotation is available. + +If beans are found in the container, there's a check to determine whether the provided topics should be handled by any of such instances. + +If `@RetryableTopic` annotation is provided, a `DltHandler` annotated method is looked up. + +since 3.2, provide new API to Create `RetryTopicConfiguration` when `@RetryableTopic` annotated on a class: + +[source, java] +---- +@Bean +public RetryTopicConfiguration myRetryTopic() { + RetryTopicConfigurationProvider provider = new RetryTopicConfigurationProvider(beanFactory); + return provider.findRetryConfigurationFor(topics, null, AnnotatedClass.class, bean); +} + +@RetryableTopic +public static class AnnotatedClass { + // NoOps +} +---- \ No newline at end of file diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-topic-lcf.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-topic-lcf.adoc index 75ef37e208..72e31bf304 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-topic-lcf.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/retry-topic-lcf.adoc @@ -35,13 +35,3 @@ public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate IMPORTANT: Since 2.8.3 you can use the same factory for retryable and non-retryable topics. -If you need to revert the factory configuration behavior to prior 2.8.3, you can override the `configureRetryTopicConfigurer` method of a `@Configuration` class that extends `RetryTopicConfigurationSupport` as explained in xref:retrytopic/retry-config.adoc#retry-topic-global-settings[Configuring Global Settings and Features] and set `useLegacyFactoryConfigurer` to `true`, such as: - -[source, java] ----- -@Override -protected Consumer configureRetryTopicConfigurer() { - return rtc -> rtc.useLegacyFactoryConfigurer(true); -} ----- - diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/topic-naming.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/topic-naming.adoc index 2754d28d8d..1d97f7a26c 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/topic-naming.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/retrytopic/topic-naming.adoc @@ -92,7 +92,7 @@ public void processMessage(MyPojo message) { public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { return RetryTopicConfigurationBuilder .newInstance() - .fixedBackoff(3_000) + .fixedBackOff(3_000) .maxAttempts(5) .useSingleTopicForFixedDelays() .create(template); @@ -111,7 +111,7 @@ This "final" retry topic will be suffixed with the provided or default suffix, a NOTE: By opting to use a single topic for the retries with the `maxInterval` delay, it may become more viable to configure an exponential retry policy that keeps retrying for a long time, because in this approach you do not need a large amount of topics. -The default behavior is to work with the number of retry topics equal to the configured `maxAttempts` minus 1 and, when using exponential backoff, the retry topics are suffixed with the delay values, with the last retry topic (corresponding to the `maxInterval` delay) being suffixed with an additional index. +Starting 3.2, the default behavior is reuses the retry topic for the same intervals, when using exponential backoff, the retry topics are suffixed with the delay values, with the last retry topic reuses for the same intervals(corresponding to the `maxInterval` delay). For instance, when configuring the exponential backoff with `initialInterval=1_000`, `multiplier=2`, and `maxInterval=16_000`, in order to keep trying for one hour, one would need to configure `maxAttempts` as 229, and by default the needed retry topics would be: @@ -119,27 +119,27 @@ For instance, when configuring the exponential backoff with `initialInterval=1_0 * -retry-2000 * -retry-4000 * -retry-8000 -* -retry-16000-0 -* -retry-16000-1 -* -retry-16000-2 -* ... -* -retry-16000-224 +* -retry-16000 -When using the strategy that reuses the retry topic for the same intervals, in the same configuration above the needed retry topics would be: +When using the strategy that work with the number of retry topics equal to the configured `maxAttempts` minus 1, the last retry topic (corresponding to the `maxInterval` delay) being suffixed with an additional index would be: * -retry-1000 * -retry-2000 * -retry-4000 * -retry-8000 -* -retry-16000 +* -retry-16000-0 +* -retry-16000-1 +* -retry-16000-2 +* ... +* -retry-16000-224 -This will be the default in a future release. +If multiple topics are required, then that can be done using the following configuration. [source, java] ---- @RetryableTopic(attempts = 230, backoff = @Backoff(delay = 1_000, multiplier = 2, maxDelay = 16_000), - sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.SINGLE_TOPIC) + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) @KafkaListener(topics = "my-annotated-topic") public void processMessage(MyPojo message) { // ... message processing diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/streams.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/streams.adoc index fd68c121d8..7675c3ab24 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/streams.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/streams.adoc @@ -82,9 +82,16 @@ A new `KafkaStreams` is created on each `start()`. You might also consider using different `StreamsBuilderFactoryBean` instances, if you would like to control the lifecycles for `KStream` instances separately. You also can specify `KafkaStreams.StateListener`, `Thread.UncaughtExceptionHandler`, and `StateRestoreListener` options on the `StreamsBuilderFactoryBean`, which are delegated to the internal `KafkaStreams` instance. -Also, apart from setting those options indirectly on `StreamsBuilderFactoryBean`, starting with _version 2.1.5_, you can use a `KafkaStreamsCustomizer` callback interface to configure an inner `KafkaStreams` instance. + +Also, apart from setting those options indirectly on `StreamsBuilderFactoryBean`, you can use a `KafkaStreamsCustomizer` callback interface to: + +1. (from _version 2.1.5_) configure an inner `KafkaStreams` instance using `customize(KafkaStreams)` +2. (from _version 3.3.0_) instantiate a custom implementation of `KafkaStreams` using `initKafkaStreams(Topology, Properties, KafkaClientSupplier)` + Note that `KafkaStreamsCustomizer` overrides the options provided by `StreamsBuilderFactoryBean`. + If you need to perform some `KafkaStreams` operations directly, you can access that internal `KafkaStreams` instance by using `StreamsBuilderFactoryBean.getKafkaStreams()`. + You can autowire `StreamsBuilderFactoryBean` bean by type, but you should be sure to use the full type in the bean definition, as the following example shows: [source,java] @@ -213,6 +220,14 @@ You can declare and use any additional `StreamsBuilderFactoryBean` beans as well You can perform additional customization of that bean, by providing a bean that implements `StreamsBuilderFactoryBeanConfigurer`. If there are multiple such beans, they will be applied according to their `Ordered.order` property. + +=== Cleanup & Stop configuration + +When the factory is stopped, the `KafkaStreams.close()` is called with 2 parameters : + +* closeTimeout : how long to to wait for the threads to shutdown (defaults to `DEFAULT_CLOSE_TIMEOUT` set to 10 seconds). Can be configured using `StreamsBuilderFactoryBean.setCloseTimeout()`. +* leaveGroupOnClose : to trigger consumer leave call from the group (defaults to `false`). Can be configured using `StreamsBuilderFactoryBean.setLeaveGroupOnClose()`. + By default, when the factory bean is stopped, the `KafkaStreams.cleanUp()` method is called. Starting with version 2.1.2, the factory bean has additional constructors, taking a `CleanupConfig` object that has properties to let you control whether the `cleanUp()` method is called during `start()` or `stop()` or neither. Starting with version 2.7, the default is to never clean up local state. @@ -321,7 +336,7 @@ Of course, the `recoverer()` bean can be your own implementation of `ConsumerRec Starting with version 3.2, Spring for Apache Kafka provides basic facilities required for interactive queries in Kafka Streams. Interactive queries are useful in stateful Kafka Streams applications since they provide a way to constantly query the stateful stores in the application. Thus, if an application wants to materialize the current view of the system under consideration, interactive queries provide a way to do that. -To learn more about interacive queries, see this https://kafka.apache.org/36/documentation/streams/developer-guide/interactive-queries.html[article]. +To learn more about interactive queries, see this https://kafka.apache.org/36/documentation/streams/developer-guide/interactive-queries.html[article]. The support in Spring for Apache Kafka is centered around an API called `KafkaStreamsInteractiveQueryService` which is a facade around interactive queries APIs in Kafka Streams library. An application can create an instance of this service as a bean and then later on use it to retrieve the state store by its name. @@ -361,17 +376,15 @@ Here is the type signature from the API. public T retrieveQueryableStore(String storeName, QueryableStoreType storeType) ---- -When calling this method, the user can specifially ask for the proper state store type, as we have done in the above example. - -NOTE: `KafkaStreamsInteractiveQueryService` API in Spring for Apache Kafka only supports providing access to local key-value stores at the moment. +When calling this method, the user can specifically ask for the proper state store type, as we have done in the above example. === Retrying State Store Retrieval When trying to retrieve the state store using the `KafkaStreamsInteractiveQueryService`, there is a chance that the state store might not be found for various reasons. If those reasons are transitory, `KafkaStreamsInteractiveQueryService` provides an option to retry the retrieval of the state store by allowing to inject a custom `RetryTemplate`. -By default, the `RetryTemmplate` that is used in `KafkaStreamsInteractiveQueryService` uses a maximum attempts of three with a fixed backoff of one second. +By default, the `RetryTemplate` that is used in `KafkaStreamsInteractiveQueryService` uses a maximum attempts of three with a fixed backoff of one second. -Here is how you can inject a custom `RetryTemmplate` into `KafkaStreamsInteractiveQueryService` with the maximum attempts of ten. +Here is how you can inject a custom `RetryTemplate` into `KafkaStreamsInteractiveQueryService` with the maximum attempts of ten. [source, java] ---- @@ -388,6 +401,49 @@ public KafkaStreamsInteractiveQueryService kafkaStreamsInteractiveQueryService(S } ---- +=== Querying Remote State Stores + +The API shown above for retrieving the state store - `retrieveQueryableStore` is intended for locally available key-value state stores. +In productions settings, Kafka Streams applications are most likely distributed based on the number of partitions. +If a topic has four partitions and there are four instances of the same Kafka Streams processor running, then each instance maybe responsible for processing a single partition from the topic. +In this scenario, calling `retrieveQueryableStore` may not give the correct result that an instance is looking for, although it might return a valid store. +Let's assume that the topic with four partitions has data about various keys and a single partition is always responsible for a specific key. +If the instance that is calling `retrieveQueryableStore` is looking for information about a key that this instance does not host, then it will not receive any data. +This is because the current Kafka Streams instance does not know anything about this key. +To fix this, the calling instance first needs to make sure that they have the host information for the Kafka Streams processor instance where the particular key is hosted. +This can be retrieved from any Kafka Streams instance under the same `application.id` as below. + +[source, java] +---- +@Autowired +private KafkaStreamsInteractiveQueryService interactiveQueryService; + +HostInfo kafkaStreamsApplicationHostInfo = this.interactiveQueryService.getKafkaStreamsApplicationHostInfo("app-store", 12345, new IntegerSerializer()); +---- + +In the example code above, the calling instance is querying for a particular key `12345` from the state-store named `app-store`. +The API also needs a corresponding key serializer, which in this case is the `IntegerSerializer`. +Kafka Streams looks through all it's instances under the same `application.id` and tries to find which instance hosts this particular key, +Once found, it returns that host information as a `HostInfo` object. + +This is how the API looks like: + +[source, java] +---- +public HostInfo getKafkaStreamsApplicationHostInfo(String store, K key, Serializer serializer) +---- + +When using multiple instances of the Kafka Streams processors of the same `application.id` in a distributed way like this, the application is supposed to provide an RPC layer where the state stores can be queried over an RPC endpoint such as a REST one. +See this https://kafka.apache.org/36/documentation/streams/developer-guide/interactive-queries.html#querying-remote-state-stores-for-the-entire-app[article] for more details on this. +When using Spring for Apache Kafka, it is very easy to add a Spring based REST endpoint by using the spring-web technologies. +Once there is a REST endpoint, then that can be used to query the state stores from any Kafka Streams instance, given the `HostInfo` where the key is hosted is known to the instance. + +If the key hosting the instance is the current instance, then the application does not need to call the RPC mechanism, but rather make an in-JVM call. +However, the trouble is that an application may not know that the instance that is making the call is where the key is hosted because a particular server may lose a partition due to a consumer rebalance. +To fix this issue, `KafkaStreamsInteractiveQueryService` provides a convenient API for querying the current host information via an API method `getCurrentKafkaStreamsApplicationHostInfo()` that returns the current `HostInfo`. +The idea is that the application can first acquire information about where the key is held, and then compare the `HostInfo` with the one about the current instance. +If the `HostInfo` data matches, then it can proceed with a simple JVM call via the `retrieveQueryableStore`, otherwise go with the RPC option. + [[kafka-streams-example]] == Kafka Streams Example @@ -424,7 +480,7 @@ public class KafkaStreamsConfig { stream .mapValues((ValueMapper) String::toUpperCase) .groupByKey() - .windowedBy(TimeWindows.of(Duration.ofMillis(1_000))) + .windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofMillis(1_000))) .reduce((String value1, String value2) -> value1 + value2, Named.as("windowStore")) .toStream() diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/testing.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/testing.adoc index 0b387a9a74..873c3ec312 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/testing.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/testing.adoc @@ -8,8 +8,8 @@ The `spring-kafka-test` jar contains some useful utilities to assist with testin Two implementations are provided: -* `EmbeddedKafkaZKBroker` - legacy implementation which starts an embedded `Zookeeper` instance. -* `EmbeddedKafkaKraftBroker` - (default) uses `Kraft` instead of `Zookeeper` in combined controller and broker modes (since 3.1). +* `EmbeddedKafkaZKBroker` - legacy implementation which starts an embedded `Zookeeper` instance (which is still the default when using `EmbeddedKafka`). +* `EmbeddedKafkaKraftBroker` - uses `Kraft` instead of `Zookeeper` in combined controller and broker modes (since 3.1). There are several techniques to configure the broker as discussed in the following sections. @@ -228,7 +228,7 @@ In addition, these properties can be provided: - `spring.kafka.embedded.topics` - topics (comma-separated value) to create in the started Kafka cluster; - `spring.kafka.embedded.partitions` - number of partitions to provision for the created topics; - `spring.kafka.embedded.broker.properties.location` - the location of the file for additional Kafka broker configuration properties; the value of this property must follow the Spring resource abstraction pattern; -- `spring.kafka.embedded.kraft` - when false, use an `EmbeddedKafkaZKBroker` instead of an `EmbeddedKafkaKraftBroker`. +- `spring.kafka.embedded.kraft` - default false, when true, use an `EmbeddedKafkaKraftBroker` instead of an `EmbeddedKafkaZKBroker`. Essentially these properties mimic some of the `@EmbeddedKafka` attributes. @@ -243,7 +243,7 @@ NOTE: `spring-kafka-test` has transitive dependencies on `junit-jupiter-api` and If you wish to use the embedded broker and are NOT using JUnit, you may wish to exclude these dependencies. [[embedded-kafka-annotation]] -== @EmbeddedKafka Annotation +== `@EmbeddedKafka` Annotation We generally recommend that you use the rule as a `@ClassRule` to avoid starting and stopping the broker between tests (and use a different topic for each test). Starting with version 2.0, if you use Spring's test application context caching, you can also declare a `EmbeddedKafkaBroker` bean, so a single broker can be used across multiple test classes. For convenience, we provide a test class-level annotation called `@EmbeddedKafka` to register the `EmbeddedKafkaBroker` bean. @@ -295,7 +295,7 @@ public class KafkaStreamsTests { Starting with version 2.2.4, you can also use the `@EmbeddedKafka` annotation to specify the Kafka ports property. -Starting with version 3.1, set the `kraft` property to `false` to use an `EmbeddedKafkaZKBroker` instead of an `EmbeddedKafkaKraftBroker`. +Starting with version 3.2, set the `kraft` property to `true` to use an `EmbeddedKafkaKraftBroker` instead of an `EmbeddedKafkaZKBroker`. The following example sets the `topics`, `brokerProperties`, and `brokerPropertiesLocation` attributes of `@EmbeddedKafka` support property placeholder resolutions: @@ -317,7 +317,7 @@ Properties defined by `brokerProperties` override properties found in `brokerPro You can use the `@EmbeddedKafka` annotation with JUnit 4 or JUnit 5. [[embedded-kafka-junit5]] -== @EmbeddedKafka Annotation with JUnit5 +== `@EmbeddedKafka` Annotation with JUnit5 Starting with version 2.3, there are two ways to use the `@EmbeddedKafka` annotation with JUnit5. When used with the `@SpringJunitConfig` annotation, the embedded broker is added to the test application context. @@ -404,6 +404,16 @@ public class MyApplicationTests { Notice that, since this is a Spring Boot application, we override the broker list property to set Spring Boot's property. +[[embedded-broker-with-springjunitconfig-annotations]] +== `@EmbeddedKafka` with `@SpringJunitConfig` + +When using `@EmbeddedKafka` with `@SpringJUnitConfig`, it is recommended to use `@DirtiesContext` on the test class. +This is to prevent potential race conditions occurring during the JVM shutdown after running multiple tests in a test suite. +For example, without using `@DirtiesContext`, the `EmbeddedKafkaBroker` may shutdown earlier while the application context still needs resources from it. +Since every `EmbeddedKafka` test-runs create its own temporary directory, when this race condition occurs, it will produce error log messages indicating that the files that it is trying to delete or cleanup are not available anymore. +Adding `@DirtiesContext` will ensure that the application context is cleaned up after each test and not cached, making it less vulnerable to potential resource race conditions like these. + + [[kafka-testing-embeddedkafka-annotation]] === `@EmbeddedKafka` Annotation or `EmbeddedKafkaBroker` Bean diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/tips.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/tips.adoc index 0dbb13b4bf..9f003d0c4e 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/tips.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/tips.adoc @@ -12,9 +12,9 @@ The following is an example of how to use the power of a SpEL expression to crea [source, java] ---- @KafkaListener(topicPartitions = @TopicPartition(topic = "compacted", - partitions = "#{@finder.partitions('compacted')}"), + partitions = "#{@finder.partitions('compacted')}", partitionOffsets = @PartitionOffset(partition = "*", initialOffset = "0"))) -public void listen(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key, String payload) { +public void listen(@Header(KafkaHeaders.RECEIVED_KEY) String key, String payload) { ... } @@ -166,7 +166,7 @@ public void sendToKafka(String in) { [[tip-json]] == Customizing the JsonSerializer and JsonDeserializer -The serializer and deserializer support a number of cusomizations using properties, see xref:kafka/serdes.adoc#json-serde[JSON] for more information. +The serializer and deserializer support a number of customizations using properties, see xref:kafka/serdes.adoc#json-serde[JSON] for more information. The `kafka-clients` code, not Spring, instantiates these objects, unless you inject them directly into the consumer and producer factories. If you wish to configure the (de)serializer using properties, but wish to use, say, a custom `ObjectMapper`, simply create a subclass and pass the custom mapper into the `super` constructor. For example: diff --git a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/whats-new.adoc b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/whats-new.adoc index 5c856bb4c0..c80ac18c77 100644 --- a/spring-kafka-docs/src/main/antora/modules/ROOT/pages/whats-new.adoc +++ b/spring-kafka-docs/src/main/antora/modules/ROOT/pages/whats-new.adoc @@ -1,43 +1,70 @@ = What's new? -[[what-s-new-in-3-2-since-3-1]] -== What's New in 3.2 Since 3.1 +[[what-s-new-in-3-3-since-3-2]] +== What's New in 3.3 Since 3.2 :page-section-summary-toc: 1 -This section covers the changes made from version 3.1 to version 3.2. +This section covers the changes made from version 3.2 to version 3.3. For changes in earlier version, see xref:appendix/change-history.adoc[Change History]. -[[x32-kafka-streams-iqs-support]] -=== Kafka Streams Interactive Query Support +[[x33-dlt-topic-naming]] +=== DLT Topic Naming Convention -A new API `KafkaStreamsInteractiveQuerySupport` for accessing queryable stores used in Kafka Streams interactive queries. -See xref:streams.adoc#kafka-streams-iq-support[Kafka Streams Interactive Support] for more details. +The naming convention for DLT topics has been standardized to use the "-dlt" suffix consistently. This change ensures compatibility and avoids conflicts when transitioning between different retry solutions. Users who wish to retain the ".DLT" suffix behavior need to opt-in explicitly by setting the appropriate DLT name property. +[[x33-seek-with-group-id]] +=== Enhanced Seek Operations for Consumer Groups +A new method, `getGroupId()`, has been added to the `ConsumerSeekCallback` interface. +This method allows for more selective seek operations by targeting only the desired consumer group. +The `AbstractConsumerSeekAware` can also now register, retrieve, and remove all callbacks for each topic partition in a multi-group listener scenario without missing any. +See the new APIs (`getSeekCallbacksFor(TopicPartition topicPartition)`, `getTopicsAndCallbacks()`) for more details. +For more details, see xref:kafka/seek.adoc#seek[Seek API Docs]. -[[x32-tiss]] -=== TransactionIdSuffixStrategy +[[x33-new-option-ignore-empty-batch]] +=== Configurable Handling of Empty Batches in Kafka Listener with RecordFilterStrategy -A new `TransactionIdSuffixStrategy` interface was introduced to manage `transactional.id` suffix. -The default implementation is `DefaultTransactionIdSuffixStrategy` when setting `maxCache` greater than zero can reuse `transactional.id` within a specific range, otherwise suffixes will be generated on the fly by incrementing a counter. -See xref:kafka/transactions.adoc#transaction-id-suffix-fixed[Fixed TransactionIdSuffix] for more information. +`RecordFilterStrategy` now supports ignoring empty batches that result from filtering. +This can be configured through overriding default method `ignoreEmptyBatch()`, which defaults to false, ensuring `KafkaListener` is invoked even if all `ConsumerRecords` are filtered out. +For more details, see xref:kafka/receiving-messages/filtering.adoc[Message receive filtering Docs]. -[[x32-async-return]] -=== Async @KafkaListener Return -`@KafkaListener` (and `@KafkaHandler`) methods can now return asynchronous return types include `CompletableFuture`, `Mono` and Kotlin `suspend` functions. -See xref:kafka/receiving-messages/async-returns.adoc[Async Returns] for more information. +[[x33-concurrent-container-stopped-event]] +=== ConcurrentContainerStoppedEvent -[[x32-customizable-dlt-routing]] -=== Routing of messages to custom DLTs based on thrown exceptions +The `ConcurentContainerMessageListenerContainer` emits now a `ConcurrentContainerStoppedEvent` when all of its child containers are stopped. +For more details, see xref:kafka/events.adoc[Application Events] and `ConcurrentContainerStoppedEvent` Javadocs. -It's now possible to redirect messages to the custom DLTs based on the type of the exception, which has been thrown during the message processing. -Rules for the redirection are set either via the `RetryableTopic.exceptionBasedDltRouting` or the `RetryTopicConfigurationBuilder.dltRoutingRules`. -Custom DLTs are created automatically as well as other retry and dead-letter topics. -See xref:retrytopic/features.adoc#exc-based-custom-dlt-routing[Routing of messages to custom DLTs based on thrown exceptions] for more information. +[[x33-original-record-key-in-reply]] +=== Original Record Key in Reply -[[x32-after-rollback-processing]] -=== After Rollback Processing +When using `ReplyingKafkaTemplate`, if the original record from the request contains a key, then that same key will be part of the reply as well. +For more details, see xref:kafka/sending-messages.adoc[Sending Messages] section of the reference docs. -A new `AfterRollbackProcessor` API `processBatch` is provided. -See xref:kafka/annotation-error-handling.adoc#after-rollback[After-rollback Processor] for more information. \ No newline at end of file +[[x33-customize-logging-in-DeadLetterPublishingRecovererFactory]] +=== Customizing Logging in DeadLetterPublishingRecovererFactory + +When using `DeadLetterPublishingRecovererFactory`, the user applications can override the `maybeLogListenerException` method to customize the logging behavior. + +[[x33-customize-admin-client-in-KafkaAdmin]] +=== Customize Admin client in KafkaAdmin + +When extending `KafkaAdmin`, user applications may override the `createAdmin` method to customize Admin client creation. + +[[x33-customize-kafka-streams-implementation]] +=== Customizing The Implementation of Kafka Streams + +When using `KafkaStreamsCustomizer` it is now possible to return a custom implementation of the `KafkaStreams` object by overriding the `initKafkaStreams` method. + +[[x33-kafka-headers-for-batch-listeners]] +=== KafkaHeaders.DELIVERY_ATTEMPT for batch listeners + +When using a `BatchListener`, the `ConsumerRecord` can have the `KafkaHeaders.DELIVERY_ATTMPT` header in its headers fields. +If the `DeliveryAttemptAwareRetryListener` is set to error handler as retry listener, each `ConsumerRecord` has delivery attempt header. +For more details, see xref:kafka/annotation-error-handling.adoc#delivery-attempts-header-for-batch-listener[Kafka Headers for Batch Listener]. + +[[x33-task-scheduler-for-kafka-metrics]] +=== Kafka Metrics Listeners and `TaskScheduler` + +The `MicrometerProducerListener`, `MicrometerConsumerListener` and `KafkaStreamsMicrometerListener` can now be configured with a `TaskScheduler`. +See `KafkaMetricsSupport` JavaDocs and xref:kafka/micrometer.adoc[Micrometer Support] for more information. diff --git a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/dynamic/Application.kt b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/dynamic/Application.kt index fba0d96111..51960cbc30 100644 --- a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/dynamic/Application.kt +++ b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/dynamic/Application.kt @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ import org.springframework.kafka.listener.MessageListener /** * @author Gary Russell + * @author Soby Chacko * @since 2.8.9 */ @SpringBootApplication @@ -47,8 +48,8 @@ class Application { return ApplicationRunner { _: ApplicationArguments? -> // tag::getBeans[] -applicationContext.getBean(MyPojo::class.java, "one", arrayOf("topic2")) -applicationContext.getBean(MyPojo::class.java, "two", arrayOf("topic3")) +applicationContext.getBean(MyPojo::class.java, "one", "topic2") +applicationContext.getBean(MyPojo::class.java, "two", "topic3") // end::getBeans[] } } @@ -88,7 +89,7 @@ private fun createContainer( @Bean @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) -fun pojo(id: String?, topic: String?): MyPojo { +fun pojo(id: String, topic: String): MyPojo { return MyPojo(id, topic) } //end::pojoBean[] @@ -114,9 +115,9 @@ class MyListener : MessageListener { // tag::pojo[] -class MyPojo(id: String?, topic: String?) { +class MyPojo(val id: String, val topic: String) { - @KafkaListener(id = "#{__listener.id}", topics = ["#{__listener.topics}"]) + @KafkaListener(id = "#{__listener.id}", topics = ["#{__listener.topic}"]) fun listen(`in`: String?) { println(`in`) } diff --git a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/requestreply/Application.kt b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/requestreply/Application.kt index b7781dd58b..902ed27d04 100644 --- a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/requestreply/Application.kt +++ b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/requestreply/Application.kt @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,27 +15,25 @@ */ package org.springframework.kafka.kdocs.requestreply -import org.springframework.boot.autoconfigure.SpringBootApplication -import org.springframework.kafka.core.KafkaAdmin.NewTopics -import org.springframework.kafka.config.TopicBuilder -import org.springframework.kafka.core.KafkaTemplate -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory -import org.springframework.kafka.requestreply.ReplyingKafkaTemplate -import org.springframework.kafka.support.converter.ByteArrayJsonMessageConverter -import org.springframework.boot.ApplicationRunner -import org.springframework.boot.ApplicationArguments -import org.springframework.kafka.requestreply.RequestReplyTypedMessageFuture -import org.springframework.messaging.support.MessageBuilder -import org.springframework.core.ParameterizedTypeReference -import org.springframework.kafka.annotation.KafkaListener -import org.springframework.messaging.handler.annotation.SendTo -import kotlin.jvm.JvmStatic import org.apache.kafka.common.serialization.ByteArrayDeserializer import org.apache.kafka.common.serialization.ByteArraySerializer import org.slf4j.LoggerFactory +import org.springframework.boot.ApplicationRunner import org.springframework.boot.SpringApplication +import org.springframework.boot.autoconfigure.SpringBootApplication import org.springframework.context.annotation.Bean +import org.springframework.core.ParameterizedTypeReference +import org.springframework.kafka.annotation.KafkaListener +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory +import org.springframework.kafka.config.TopicBuilder +import org.springframework.kafka.core.KafkaAdmin.NewTopics +import org.springframework.kafka.core.KafkaTemplate import org.springframework.kafka.core.ProducerFactory +import org.springframework.kafka.requestreply.ReplyingKafkaTemplate +import org.springframework.kafka.requestreply.RequestReplyTypedMessageFuture +import org.springframework.kafka.support.converter.ByteArrayJsonMessageConverter +import org.springframework.messaging.handler.annotation.SendTo +import org.springframework.messaging.support.MessageBuilder import java.util.concurrent.TimeUnit import java.util.function.Consumer diff --git a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/consumer/Application.kt b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/consumer/Application.kt index a7fecb0aee..f547d9dbdc 100644 --- a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/consumer/Application.kt +++ b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/consumer/Application.kt @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,12 +15,11 @@ */ package org.springframework.kafka.kdocs.started.consumer -import org.springframework.boot.autoconfigure.SpringBootApplication import org.apache.kafka.clients.admin.NewTopic -import org.springframework.kafka.annotation.KafkaListener -import kotlin.jvm.JvmStatic +import org.springframework.boot.autoconfigure.SpringBootApplication import org.springframework.boot.runApplication import org.springframework.context.annotation.Bean +import org.springframework.kafka.annotation.KafkaListener import org.springframework.kafka.kdocs.started.producer.Application /** diff --git a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/noboot/Sender.kt b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/noboot/Sender.kt index aca85c641e..848ad36e8a 100644 --- a/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/noboot/Sender.kt +++ b/spring-kafka-docs/src/main/kotlin/org/springframework/kafka/kdocs/started/noboot/Sender.kt @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,7 @@ */ package org.springframework.kafka.kdocs.started.noboot -import org.springframework.boot.runApplication import org.springframework.kafka.core.KafkaTemplate -import kotlin.jvm.JvmStatic -import org.springframework.context.annotation.AnnotationConfigApplicationContext -import org.springframework.kafka.kdocs.started.producer.Application /** * Code snippet for quick start. diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaBroker.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaBroker.java index 9c5b74d868..7d700a4390 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaBroker.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaBroker.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,14 +19,13 @@ import java.util.Map; import java.util.Set; +import kafka.server.KafkaConfig; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.Consumer; import org.springframework.beans.factory.DisposableBean; import org.springframework.beans.factory.InitializingBean; -import kafka.server.KafkaConfig; - /** * @author Gary Russell * @since 3.1 @@ -90,6 +89,14 @@ default void afterPropertiesSet() { */ EmbeddedKafkaBroker brokerListProperty(String brokerListProperty); + /** + * Set the timeout in seconds for admin operations (e.g. topic creation, close). + * @param adminTimeout the timeout. + * @return the {@link EmbeddedKafkaBroker} + * @since 2.8.5 + */ + EmbeddedKafkaBroker adminTimeout(int adminTimeout); + /** * Get the bootstrap server addresses as a String. * @return the bootstrap servers. diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaBrokerFactory.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaBrokerFactory.java new file mode 100644 index 0000000000..d04c6dd1f1 --- /dev/null +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaBrokerFactory.java @@ -0,0 +1,146 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.test; + +import java.io.IOException; +import java.io.InputStream; +import java.io.StringReader; +import java.util.Arrays; +import java.util.Map; +import java.util.Properties; +import java.util.function.Function; + +import org.springframework.core.io.Resource; +import org.springframework.core.io.support.PathMatchingResourcePatternResolver; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.util.StringUtils; + +/** + * The factory to encapsulate an {@link EmbeddedKafkaBroker} creation logic. + * + * @author Artem Bilan + * + * @since 3.2.6 + */ +public final class EmbeddedKafkaBrokerFactory { + + private static final String TRANSACTION_STATE_LOG_REPLICATION_FACTOR = "transaction.state.log.replication.factor"; + + /** + * Create an {@link EmbeddedKafkaBroker} based on the {@code EmbeddedKafka} annotation. + * @param embeddedKafka the {@code EmbeddedKafka} annotation. + * @return a new {@link EmbeddedKafkaBroker} instance. + */ + public static EmbeddedKafkaBroker create(EmbeddedKafka embeddedKafka) { + return create(embeddedKafka, Function.identity()); + } + + /** + * Create an {@link EmbeddedKafkaBroker} based on the {@code EmbeddedKafka} annotation. + * @param embeddedKafka the {@code EmbeddedKafka} annotation. + * @param propertyResolver the {@link Function} for placeholders in the annotation attributes. + * @return a new {@link EmbeddedKafkaBroker} instance. + */ + @SuppressWarnings("unchecked") + public static EmbeddedKafkaBroker create(EmbeddedKafka embeddedKafka, Function propertyResolver) { + String[] topics = + Arrays.stream(embeddedKafka.topics()) + .map(propertyResolver) + .toArray(String[]::new); + + EmbeddedKafkaBroker embeddedKafkaBroker; + if (embeddedKafka.kraft()) { + embeddedKafkaBroker = kraftBroker(embeddedKafka, topics); + } + else { + embeddedKafkaBroker = zkBroker(embeddedKafka, topics); + } + int[] ports = setupPorts(embeddedKafka); + + embeddedKafkaBroker.kafkaPorts(ports) + .adminTimeout(embeddedKafka.adminTimeout()); + + Properties properties = new Properties(); + + for (String pair : embeddedKafka.brokerProperties()) { + if (!StringUtils.hasText(pair)) { + continue; + } + try { + properties.load(new StringReader(propertyResolver.apply(pair))); + } + catch (Exception ex) { + throw new IllegalStateException("Failed to load broker property from [" + pair + "]", ex); + } + } + + String brokerPropertiesLocation = embeddedKafka.brokerPropertiesLocation(); + if (StringUtils.hasText(brokerPropertiesLocation)) { + String propertiesLocation = propertyResolver.apply(brokerPropertiesLocation); + Resource propertiesResource = new PathMatchingResourcePatternResolver().getResource(propertiesLocation); + if (!propertiesResource.exists()) { + throw new IllegalStateException( + "Failed to load broker properties from [" + propertiesResource + "]: resource does not exist."); + } + try (InputStream in = propertiesResource.getInputStream()) { + Properties p = new Properties(); + p.load(in); + p.forEach((key, value) -> properties.putIfAbsent(key, propertyResolver.apply((String) value))); + } + catch (IOException ex) { + throw new IllegalStateException("Failed to load broker properties from [" + propertiesResource + "]", ex); + } + } + + properties.putIfAbsent(TRANSACTION_STATE_LOG_REPLICATION_FACTOR, + String.valueOf(Math.min(3, embeddedKafka.count()))); + + embeddedKafkaBroker.brokerProperties((Map) (Map) properties); + String bootstrapServersProperty = embeddedKafka.bootstrapServersProperty(); + if (StringUtils.hasText(bootstrapServersProperty)) { + embeddedKafkaBroker.brokerListProperty(bootstrapServersProperty); + } + + // Safe to start an embedded broker eagerly before context refresh + embeddedKafkaBroker.afterPropertiesSet(); + + return embeddedKafkaBroker; + } + + private static int[] setupPorts(EmbeddedKafka embedded) { + int[] ports = embedded.ports(); + if (embedded.count() > 1 && ports.length == 1 && ports[0] == 0) { + ports = new int[embedded.count()]; + } + return ports; + } + + private static EmbeddedKafkaBroker kraftBroker(EmbeddedKafka embedded, String[] topics) { + return new EmbeddedKafkaKraftBroker(embedded.count(), embedded.partitions(), topics); + } + + private static EmbeddedKafkaBroker zkBroker(EmbeddedKafka embedded, String[] topics) { + return new EmbeddedKafkaZKBroker(embedded.count(), embedded.controlledShutdown(), embedded.partitions(), topics) + .zkPort(embedded.zookeeperPort()) + .zkConnectionTimeout(embedded.zkConnectionTimeout()) + .zkSessionTimeout(embedded.zkSessionTimeout()); + } + + private EmbeddedKafkaBrokerFactory() { + } + +} diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaKraftBroker.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaKraftBroker.java index c414aa2e5f..9566c3f1a2 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaKraftBroker.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaKraftBroker.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.lang.reflect.Method; import java.nio.file.Files; import java.time.Duration; import java.util.AbstractMap.SimpleEntry; @@ -38,6 +39,9 @@ import java.util.function.Function; import java.util.stream.Collectors; +import kafka.server.KafkaConfig; +import kafka.testkit.KafkaClusterTestKit; +import kafka.testkit.TestKitNodes; import org.apache.commons.logging.LogFactory; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.AdminClient; @@ -53,10 +57,8 @@ import org.springframework.core.log.LogAccessor; import org.springframework.util.Assert; - -import kafka.server.KafkaConfig; -import kafka.testkit.KafkaClusterTestKit; -import kafka.testkit.TestKitNodes; +import org.springframework.util.ClassUtils; +import org.springframework.util.ReflectionUtils; /** * An embedded Kafka Broker(s) using KRaft. @@ -70,6 +72,9 @@ * @author Nakul Mishra * @author Pawel Lozinski * @author Adrian Chlebosz + * @author Soby Chacko + * @author Sanghyeok An + * @author Wouter Coekaerts * * @since 3.1 */ @@ -85,6 +90,23 @@ public class EmbeddedKafkaKraftBroker implements EmbeddedKafkaBroker { public static final int DEFAULT_ADMIN_TIMEOUT = 10; + private static final boolean IS_KAFKA_39_OR_LATER = ClassUtils.isPresent( + "org.apache.kafka.server.config.AbstractKafkaConfig", EmbeddedKafkaKraftBroker.class.getClassLoader()); + + private static final Method SET_CONFIG_METHOD; + + static { + if (IS_KAFKA_39_OR_LATER) { + SET_CONFIG_METHOD = ReflectionUtils.findMethod( + KafkaClusterTestKit.Builder.class, + "setConfigProp", + String.class, Object.class); + } + else { + SET_CONFIG_METHOD = null; + } + } + private final int count; private final Set topics; @@ -174,12 +196,7 @@ public EmbeddedKafkaBroker brokerListProperty(String brokerListProperty) { return this; } - /** - * Set the timeout in seconds for admin operations (e.g. topic creation, close). - * @param adminTimeout the timeout. - * @return the {@link EmbeddedKafkaKraftBroker} - * @since 2.8.5 - */ + @Override public EmbeddedKafkaBroker adminTimeout(int adminTimeout) { this.adminTimeout = Duration.ofSeconds(adminTimeout); return this; @@ -204,7 +221,6 @@ public void afterPropertiesSet() { } } - private void start() { if (this.cluster != null) { return; @@ -216,7 +232,7 @@ private void start() { .setNumBrokerNodes(this.count) .setNumControllerNodes(this.count) .build()); - this.brokerProperties.forEach((k, v) -> clusterBuilder.setConfigProp((String) k, (String) v)); + this.brokerProperties.forEach((k, v) -> setConfigProperty(clusterBuilder, (String) k, v)); this.cluster = clusterBuilder.build(); } catch (Exception ex) { @@ -242,6 +258,17 @@ private void start() { System.setProperty(SPRING_EMBEDDED_KAFKA_BROKERS, getBrokersAsString()); } + private static void setConfigProperty(KafkaClusterTestKit.Builder clusterBuilder, String key, Object value) { + if (IS_KAFKA_39_OR_LATER) { + // For Kafka 3.9.0+: use reflection + ReflectionUtils.invokeMethod(SET_CONFIG_METHOD, clusterBuilder, key, value); + } + else { + // For Kafka 3.8.0: direct call + clusterBuilder.setConfigProp(key, (String) value); + } + } + @Override public void destroy() { AtomicReference shutdownFailure = new AtomicReference<>(); @@ -253,15 +280,15 @@ public void destroy() { } private void addDefaultBrokerPropsIfAbsent() { - this.brokerProperties.putIfAbsent(KafkaConfig.DeleteTopicEnableProp(), "true"); - this.brokerProperties.putIfAbsent(KafkaConfig.GroupInitialRebalanceDelayMsProp(), "0"); - this.brokerProperties.putIfAbsent(KafkaConfig.OffsetsTopicReplicationFactorProp(), "" + this.count); - this.brokerProperties.putIfAbsent(KafkaConfig.NumPartitionsProp(), "" + this.partitionsPerTopic); + this.brokerProperties.putIfAbsent("delete.topic.enable", "true"); + this.brokerProperties.putIfAbsent("group.initial.rebalance.delay.ms", "0"); + this.brokerProperties.putIfAbsent("offsets.topic.replication.factor", "" + this.count); + this.brokerProperties.putIfAbsent("num.partitions", "" + this.partitionsPerTopic); } private void logDir(Properties brokerConfigProperties) { try { - brokerConfigProperties.put(KafkaConfig.LogDirProp(), + brokerConfigProperties.put("log.dir", Files.createTempDirectory("spring.kafka." + UUID.randomUUID()).toString()); } catch (IOException e) { @@ -532,7 +559,7 @@ public void consumeFromEmbeddedTopics(Consumer consumer, boolean seekToEnd List notEmbedded = Arrays.stream(topicsToConsume) .filter(topic -> !this.topics.contains(topic)) .collect(Collectors.toList()); - if (notEmbedded.size() > 0) { + if (!notEmbedded.isEmpty()) { throw new IllegalStateException("topic(s):'" + notEmbedded + "' are not in embedded topic list"); } final AtomicReference> assigned = new AtomicReference<>(); @@ -560,6 +587,8 @@ public void onPartitionsAssigned(Collection partitions) { + (seekToEnd ? "end; " : "beginning")); if (seekToEnd) { consumer.seekToEnd(assigned.get()); + // seekToEnd is asynchronous. query the position to force the seek to happen now. + assigned.get().forEach(consumer::position); } else { consumer.seekToBeginning(assigned.get()); diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaZKBroker.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaZKBroker.java index cd64cefb09..a12eac978b 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaZKBroker.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/EmbeddedKafkaZKBroker.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -42,6 +42,13 @@ import java.util.function.Function; import java.util.stream.Collectors; +import kafka.cluster.EndPoint; +import kafka.server.KafkaConfig; +import kafka.server.KafkaServer; +import kafka.utils.CoreUtils; +import kafka.utils.TestUtils; +import kafka.zk.ZkFourLetterWords; +import kafka.zookeeper.ZooKeeperClient; import org.apache.commons.logging.LogFactory; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.AdminClientConfig; @@ -67,14 +74,6 @@ import org.springframework.retry.support.RetryTemplate; import org.springframework.util.Assert; -import kafka.cluster.EndPoint; -import kafka.server.KafkaConfig; -import kafka.server.KafkaServer; -import kafka.utils.CoreUtils; -import kafka.utils.TestUtils; -import kafka.zk.ZkFourLetterWords; -import kafka.zookeeper.ZooKeeperClient; - /** * An embedded Kafka Broker(s) and Zookeeper manager. * This class is intended to be used in the unit tests. @@ -87,12 +86,16 @@ * @author Nakul Mishra * @author Pawel Lozinski * @author Adrian Chlebosz + * @author Soby Chacko + * @author Sanghyeok An + * @author Borahm Lee + * @author Wouter Coekaerts * * @since 2.2 */ public class EmbeddedKafkaZKBroker implements EmbeddedKafkaBroker { - private static final LogAccessor logger = new LogAccessor(LogFactory.getLog(EmbeddedKafkaBroker.class)); // NOSONAR + private static final LogAccessor logger = new LogAccessor(LogFactory.getLog(EmbeddedKafkaZKBroker.class)); // NOSONAR public static final String SPRING_EMBEDDED_ZOOKEEPER_CONNECT = "spring.embedded.zookeeper.connect"; @@ -247,12 +250,7 @@ public void setZkPort(int zkPort) { this.zkPort = zkPort; } - /** - * Set the timeout in seconds for admin operations (e.g. topic creation, close). - * @param adminTimeout the timeout. - * @return the {@link EmbeddedKafkaBroker} - * @since 2.8.5 - */ + @Override public EmbeddedKafkaBroker adminTimeout(int adminTimeout) { this.adminTimeout = Duration.ofSeconds(adminTimeout); return this; @@ -302,17 +300,17 @@ public void afterPropertiesSet() { } this.zkConnect = LOOPBACK + ":" + this.zookeeper.getPort(); this.kafkaServers.clear(); - boolean userLogDir = this.brokerProperties.get(KafkaConfig.LogDirProp()) != null && this.count == 1; + boolean userLogDir = this.brokerProperties.get("log.dir") != null && this.count == 1; for (int i = 0; i < this.count; i++) { Properties brokerConfigProperties = createBrokerProperties(i); - brokerConfigProperties.setProperty(KafkaConfig.ReplicaSocketTimeoutMsProp(), "1000"); - brokerConfigProperties.setProperty(KafkaConfig.ControllerSocketTimeoutMsProp(), "1000"); - brokerConfigProperties.setProperty(KafkaConfig.OffsetsTopicReplicationFactorProp(), "1"); - brokerConfigProperties.setProperty(KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp(), + brokerConfigProperties.setProperty("replica.socket.timeout.ms", "1000"); + brokerConfigProperties.setProperty("controller.socket.timeout.ms", "1000"); + brokerConfigProperties.setProperty("offsets.topic.replication.factor", "1"); + brokerConfigProperties.setProperty("replica.high.watermark.checkpoint.interval.ms", String.valueOf(Long.MAX_VALUE)); this.brokerProperties.forEach(brokerConfigProperties::put); - if (!this.brokerProperties.containsKey(KafkaConfig.NumPartitionsProp())) { - brokerConfigProperties.setProperty(KafkaConfig.NumPartitionsProp(), "" + this.partitionsPerTopic); + if (!this.brokerProperties.containsKey("num.partitions")) { + brokerConfigProperties.setProperty("num.partitions", "" + this.partitionsPerTopic); } if (!userLogDir) { logDir(brokerConfigProperties); @@ -337,7 +335,7 @@ public void afterPropertiesSet() { private void logDir(Properties brokerConfigProperties) { try { - brokerConfigProperties.put(KafkaConfig.LogDirProp(), + brokerConfigProperties.put("log.dir", Files.createTempDirectory("spring.kafka." + UUID.randomUUID()).toString()); } catch (IOException e) { @@ -733,7 +731,7 @@ public void consumeFromEmbeddedTopics(Consumer consumer, boolean seekToEnd List notEmbedded = Arrays.stream(topicsToConsume) .filter(topic -> !this.topics.contains(topic)) .collect(Collectors.toList()); - if (notEmbedded.size() > 0) { + if (!notEmbedded.isEmpty()) { throw new IllegalStateException("topic(s):'" + notEmbedded + "' are not in embedded topic list"); } final AtomicReference> assigned = new AtomicReference<>(); @@ -761,6 +759,8 @@ public void onPartitionsAssigned(Collection partitions) { + (seekToEnd ? "end; " : "beginning")); if (seekToEnd) { consumer.seekToEnd(assigned.get()); + // seekToEnd is asynchronous. query the position to force the seek to happen now. + assigned.get().forEach(consumer::position); } else { consumer.seekToBeginning(assigned.get()); diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/assertj/KafkaConditions.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/assertj/KafkaConditions.java index cb1bbf1f55..b995f2043e 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/assertj/KafkaConditions.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/assertj/KafkaConditions.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2019 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -90,7 +90,6 @@ public static Condition> keyValue(K key, V value) { return new ConsumerRecordPartitionCondition(partition); } - public static class ConsumerRecordKeyCondition extends Condition> { private final K key; diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/condition/EmbeddedKafkaCondition.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/condition/EmbeddedKafkaCondition.java index 103f03668d..f436c9b7b0 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/condition/EmbeddedKafkaCondition.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/condition/EmbeddedKafkaCondition.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,9 @@ package org.springframework.kafka.test.condition; -import java.io.IOException; -import java.io.InputStream; -import java.io.StringReader; import java.lang.reflect.AnnotatedElement; import java.util.Arrays; -import java.util.Map; import java.util.Optional; -import java.util.Properties; import org.junit.jupiter.api.extension.AfterAllCallback; import org.junit.jupiter.api.extension.ConditionEvaluationResult; @@ -37,15 +32,11 @@ import org.junit.jupiter.api.extension.ParameterResolver; import org.springframework.core.annotation.AnnotatedElementUtils; -import org.springframework.core.io.Resource; -import org.springframework.core.io.support.PathMatchingResourcePatternResolver; import org.springframework.kafka.test.EmbeddedKafkaBroker; -import org.springframework.kafka.test.EmbeddedKafkaKraftBroker; -import org.springframework.kafka.test.EmbeddedKafkaZKBroker; +import org.springframework.kafka.test.EmbeddedKafkaBrokerFactory; import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.test.context.junit.jupiter.SpringExtension; import org.springframework.util.Assert; -import org.springframework.util.StringUtils; /** * JUnit5 condition for an embedded broker. @@ -117,89 +108,22 @@ public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext con private boolean springTestContext(AnnotatedElement annotatedElement) { return AnnotatedElementUtils.findAllMergedAnnotations(annotatedElement, ExtendWith.class) .stream() - .filter(extended -> Arrays.asList(extended.value()).contains(SpringExtension.class)) - .findFirst() - .isPresent(); + .map(ExtendWith::value) + .flatMap(Arrays::stream) + .anyMatch(SpringExtension.class::isAssignableFrom); } - @SuppressWarnings("unchecked") private EmbeddedKafkaBroker createBroker(EmbeddedKafka embedded) { - int[] ports = setupPorts(embedded); - EmbeddedKafkaBroker broker; - if (embedded.kraft()) { - broker = kraftBroker(embedded, ports); - } - else { - broker = zkBroker(embedded, ports); - } - Properties properties = new Properties(); - - for (String pair : embedded.brokerProperties()) { - if (!StringUtils.hasText(pair)) { - continue; - } - try { - properties.load(new StringReader(pair)); - } - catch (Exception ex) { - throw new IllegalStateException("Failed to load broker property from [" + pair + "]", - ex); - } - } - if (StringUtils.hasText(embedded.brokerPropertiesLocation())) { - Resource propertiesResource = new PathMatchingResourcePatternResolver() - .getResource(embedded.brokerPropertiesLocation()); - if (!propertiesResource.exists()) { - throw new IllegalStateException( - "Failed to load broker properties from [" + propertiesResource - + "]: resource does not exist."); - } - try (InputStream in = propertiesResource.getInputStream()) { - Properties p = new Properties(); - p.load(in); - p.forEach(properties::putIfAbsent); - } - catch (IOException ex) { - throw new IllegalStateException( - "Failed to load broker properties from [" + propertiesResource + "]", ex); - } - } - broker.brokerProperties((Map) (Map) properties); - if (StringUtils.hasText(embedded.bootstrapServersProperty())) { - broker.brokerListProperty(embedded.bootstrapServersProperty()); - } - broker.afterPropertiesSet(); - return broker; - } - - private EmbeddedKafkaBroker kraftBroker(EmbeddedKafka embedded, int[] ports) { - return new EmbeddedKafkaKraftBroker(embedded.count(), embedded.partitions(), embedded.topics()) - .kafkaPorts(ports) - .adminTimeout(embedded.adminTimeout()); - } - - private EmbeddedKafkaBroker zkBroker(EmbeddedKafka embedded, int[] ports) { - return new EmbeddedKafkaZKBroker(embedded.count(), embedded.controlledShutdown(), - embedded.partitions(), embedded.topics()) - .zkPort(embedded.zookeeperPort()) - .kafkaPorts(ports) - .zkConnectionTimeout(embedded.zkConnectionTimeout()) - .zkSessionTimeout(embedded.zkSessionTimeout()) - .adminTimeout(embedded.adminTimeout()); - } - - private int[] setupPorts(EmbeddedKafka embedded) { - int[] ports = embedded.ports(); - if (embedded.count() > 1 && ports.length == 1 && ports[0] == 0) { - ports = new int[embedded.count()]; - } - return ports; + return EmbeddedKafkaBrokerFactory.create(embedded); } private EmbeddedKafkaBroker getBrokerFromStore(ExtensionContext context) { - return getParentStore(context).get(EMBEDDED_BROKER, EmbeddedKafkaBroker.class) == null + EmbeddedKafkaBroker embeddedKafkaBrokerFromParentStore = + getParentStore(context) + .get(EMBEDDED_BROKER, EmbeddedKafkaBroker.class); + return embeddedKafkaBrokerFromParentStore == null ? getStore(context).get(EMBEDDED_BROKER, EmbeddedKafkaBroker.class) - : getParentStore(context).get(EMBEDDED_BROKER, EmbeddedKafkaBroker.class); + : embeddedKafkaBrokerFromParentStore; } private Store getStore(ExtensionContext context) { @@ -211,7 +135,6 @@ private Store getParentStore(ExtensionContext context) { return parent.getStore(Namespace.create(getClass(), parent)); } - public static EmbeddedKafkaBroker getBroker() { return BROKERS.get(); } diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafka.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafka.java index c3ee14a2fc..87b121f7b1 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafka.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafka.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -56,6 +56,10 @@ * } * * + * When using EmbeddedKafka with {@link org.springframework.test.context.junit.jupiter.SpringJUnitConfig}, + * it is recommended to use {@link org.springframework.test.annotation.DirtiesContext} on the test class, + * in order to prevent certain race conditions on JVM shutdown when running multiple tests. + * * @author Artem Bilan * @author Elliot Metsger * @author Zach Olauson @@ -63,6 +67,7 @@ * @author Sergio Lourenco * @author Pawel Lozinski * @author Adrian Chlebosz + * @author Soby Chacko * * @since 1.3 * @@ -189,11 +194,11 @@ int adminTimeout() default EmbeddedKafkaBroker.DEFAULT_ADMIN_TIMEOUT; /** - * Use KRaft instead of Zookeeper; default true. + * Use KRaft instead of Zookeeper; default false. * @return whether to use KRaft. * @since 3.6 */ - boolean kraft() default true; + boolean kraft() default false; } diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizer.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizer.java index 8a20abd414..26b7521ca1 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizer.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizer.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,27 +16,17 @@ package org.springframework.kafka.test.context; -import java.io.IOException; -import java.io.InputStream; -import java.io.StringReader; -import java.util.Arrays; -import java.util.Map; -import java.util.Properties; - import org.springframework.beans.factory.config.ConfigurableListableBeanFactory; import org.springframework.beans.factory.support.BeanDefinitionRegistry; import org.springframework.beans.factory.support.DefaultSingletonBeanRegistry; import org.springframework.beans.factory.support.RootBeanDefinition; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.core.env.ConfigurableEnvironment; -import org.springframework.core.io.Resource; import org.springframework.kafka.test.EmbeddedKafkaBroker; -import org.springframework.kafka.test.EmbeddedKafkaKraftBroker; -import org.springframework.kafka.test.EmbeddedKafkaZKBroker; +import org.springframework.kafka.test.EmbeddedKafkaBrokerFactory; import org.springframework.test.context.ContextCustomizer; import org.springframework.test.context.MergedContextConfiguration; import org.springframework.util.Assert; -import org.springframework.util.StringUtils; /** * The {@link ContextCustomizer} implementation for the {@link EmbeddedKafkaBroker} bean registration. @@ -47,6 +37,7 @@ * @author Oleg Artyomov * @author Sergio Lourenco * @author Pawel Lozinski + * @author Seonghwan Lee * * @since 1.3 */ @@ -59,88 +50,19 @@ class EmbeddedKafkaContextCustomizer implements ContextCustomizer { } @Override - @SuppressWarnings("unchecked") public void customizeContext(ConfigurableApplicationContext context, MergedContextConfiguration mergedConfig) { ConfigurableListableBeanFactory beanFactory = context.getBeanFactory(); Assert.isInstanceOf(DefaultSingletonBeanRegistry.class, beanFactory); ConfigurableEnvironment environment = context.getEnvironment(); - String[] topics = - Arrays.stream(this.embeddedKafka.topics()) - .map(environment::resolvePlaceholders) - .toArray(String[]::new); - - int[] ports = setupPorts(); - EmbeddedKafkaBroker embeddedKafkaBroker; - if (this.embeddedKafka.kraft()) { - embeddedKafkaBroker = new EmbeddedKafkaKraftBroker(this.embeddedKafka.count(), - this.embeddedKafka.partitions(), - topics) - .kafkaPorts(ports); - } - else { - embeddedKafkaBroker = new EmbeddedKafkaZKBroker(this.embeddedKafka.count(), - this.embeddedKafka.controlledShutdown(), - this.embeddedKafka.partitions(), - topics) - .kafkaPorts(ports) - .zkPort(this.embeddedKafka.zookeeperPort()) - .zkConnectionTimeout(this.embeddedKafka.zkConnectionTimeout()) - .zkSessionTimeout(this.embeddedKafka.zkSessionTimeout()); - } - - Properties properties = new Properties(); - - for (String pair : this.embeddedKafka.brokerProperties()) { - if (!StringUtils.hasText(pair)) { - continue; - } - try { - properties.load(new StringReader(environment.resolvePlaceholders(pair))); - } - catch (Exception ex) { - throw new IllegalStateException("Failed to load broker property from [" + pair + "]", ex); - } - } - - if (StringUtils.hasText(this.embeddedKafka.brokerPropertiesLocation())) { - String propertiesLocation = environment.resolvePlaceholders(this.embeddedKafka.brokerPropertiesLocation()); - Resource propertiesResource = context.getResource(propertiesLocation); - if (!propertiesResource.exists()) { - throw new IllegalStateException( - "Failed to load broker properties from [" + propertiesResource + "]: resource does not exist."); - } - try (InputStream in = propertiesResource.getInputStream()) { - Properties p = new Properties(); - p.load(in); - p.forEach((key, value) -> properties.putIfAbsent(key, environment.resolvePlaceholders((String) value))); - } - catch (IOException ex) { - throw new IllegalStateException("Failed to load broker properties from [" + propertiesResource + "]", ex); - } - } - - embeddedKafkaBroker.brokerProperties((Map) (Map) properties); - if (StringUtils.hasText(this.embeddedKafka.bootstrapServersProperty())) { - embeddedKafkaBroker.brokerListProperty(this.embeddedKafka.bootstrapServersProperty()); - } - - // Safe to start an embedded broker eagerly before context refresh - embeddedKafkaBroker.afterPropertiesSet(); + EmbeddedKafkaBroker embeddedKafkaBroker = + EmbeddedKafkaBrokerFactory.create(this.embeddedKafka, environment::resolvePlaceholders); ((BeanDefinitionRegistry) beanFactory).registerBeanDefinition(EmbeddedKafkaBroker.BEAN_NAME, new RootBeanDefinition(EmbeddedKafkaBroker.class, () -> embeddedKafkaBroker)); } - private int[] setupPorts() { - int[] ports = this.embeddedKafka.ports(); - if (this.embeddedKafka.count() > 1 && ports.length == 1 && ports[0] == 0) { - ports = new int[this.embeddedKafka.count()]; - } - return ports; - } - @Override public int hashCode() { return this.embeddedKafka.hashCode(); diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/core/BrokerAddress.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/core/BrokerAddress.java index 1e25258c02..3560552d9a 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/core/BrokerAddress.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/core/BrokerAddress.java @@ -1,5 +1,5 @@ /* - * Copyright 2015-2019 the original author or authors. + * Copyright 2015-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ package org.springframework.kafka.test.core; -import org.springframework.util.Assert; - import kafka.cluster.BrokerEndPoint; +import org.springframework.util.Assert; + /** * Encapsulates the address of a Kafka broker. * diff --git a/spring-kafka-test/src/main/java/org/springframework/kafka/test/utils/KafkaTestUtils.java b/spring-kafka-test/src/main/java/org/springframework/kafka/test/utils/KafkaTestUtils.java index b5b5d2c7c8..c4902dce2c 100644 --- a/spring-kafka-test/src/main/java/org/springframework/kafka/test/utils/KafkaTestUtils.java +++ b/spring-kafka-test/src/main/java/org/springframework/kafka/test/utils/KafkaTestUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2022 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -44,6 +44,7 @@ import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.streams.StreamsConfig; import org.springframework.beans.DirectFieldAccessor; import org.springframework.core.log.LogAccessor; @@ -57,6 +58,7 @@ * @author Gary Russell * @author Hugo Wood * @author Artem Bilan + * @author Sanghyeok An */ public final class KafkaTestUtils { @@ -82,6 +84,17 @@ public static Map consumerProps(String group, String autoCommit, return consumerProps(embeddedKafka.getBrokersAsString(), group, autoCommit); } + /** + * Set up test properties for an {@code } consumer. + * @param brokers the bootstrapServers property. + * @param group the group id. + * @return the properties. + * @since 3.3 + */ + public static Map consumerProps(String brokers, String group) { + return consumerProps(brokers, group, "false"); + } + /** * Set up test properties for an {@code } producer. * @param embeddedKafka a {@link EmbeddedKafkaBroker} instance. @@ -128,6 +141,20 @@ public static Map producerProps(String brokers) { return props; } + /** + * Set up test properties for the Kafka Streams. + * @param applicationId the applicationId for the Kafka Streams. + * @param brokers the bootstrapServers property. + * @return the properties. + * @since 3.3 + */ + public static Map streamsProps(String applicationId, String brokers) { + Map props = new HashMap<>(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, brokers); + return props; + } + /** * Poll the consumer, expecting a single record for the specified topic. * @param consumer the consumer. diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaKraftBrokerTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaKraftBrokerTests.java index 86766caa2e..cc1b5e9412 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaKraftBrokerTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaKraftBrokerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,21 @@ package org.springframework.kafka.test; -import static org.assertj.core.api.Assertions.assertThat; +import java.util.Map; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; import org.junit.jupiter.api.Test; +import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.util.StringUtils; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell + * @author Wouter Coekaerts * @since 3.1 * */ @@ -37,4 +44,21 @@ void testUpDown() { kafka.destroy(); } + @Test + void testConsumeFromEmbeddedWithSeekToEnd() { + EmbeddedKafkaKraftBroker kafka = new EmbeddedKafkaKraftBroker(1, 1, "seekTestTopic"); + kafka.afterPropertiesSet(); + Map producerProps = KafkaTestUtils.producerProps(kafka); + KafkaProducer producer = new KafkaProducer<>(producerProps); + producer.send(new ProducerRecord<>("seekTestTopic", 0, 1, "beforeSeekToEnd")); + Map consumerProps = KafkaTestUtils.consumerProps("seekTest", "false", kafka); + KafkaConsumer consumer = new KafkaConsumer<>(consumerProps); + kafka.consumeFromAnEmbeddedTopic(consumer, true /* seekToEnd */, "seekTestTopic"); + producer.send(new ProducerRecord<>("seekTestTopic", 0, 1, "afterSeekToEnd")); + producer.close(); + assertThat(KafkaTestUtils.getSingleRecord(consumer, "seekTestTopic").value()) + .isEqualTo("afterSeekToEnd"); + consumer.close(); + } + } diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaZKBrokerTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaZKBrokerTests.java index eaf16d9a1c..c01891556f 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaZKBrokerTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/EmbeddedKafkaZKBrokerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,20 @@ package org.springframework.kafka.test; -import static org.assertj.core.api.Assertions.assertThat; +import java.util.Map; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; import org.junit.jupiter.api.Test; +import org.springframework.kafka.test.utils.KafkaTestUtils; + +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell + * @author Wouter Coekaerts * @since 2.3 * */ @@ -42,4 +50,22 @@ void testUpDown() { assertThat(System.getProperty(EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS)).isNull(); } + @Test + void testConsumeFromEmbeddedWithSeekToEnd() { + EmbeddedKafkaZKBroker kafka = new EmbeddedKafkaZKBroker(1); + kafka.afterPropertiesSet(); + kafka.addTopics("seekTestTopic"); + Map producerProps = KafkaTestUtils.producerProps(kafka); + KafkaProducer producer = new KafkaProducer<>(producerProps); + producer.send(new ProducerRecord<>("seekTestTopic", 0, 1, "beforeSeekToEnd")); + Map consumerProps = KafkaTestUtils.consumerProps("seekTest", "false", kafka); + KafkaConsumer consumer = new KafkaConsumer<>(consumerProps); + kafka.consumeFromAnEmbeddedTopic(consumer, true /* seekToEnd */, "seekTestTopic"); + producer.send(new ProducerRecord<>("seekTestTopic", 0, 1, "afterSeekToEnd")); + producer.close(); + assertThat(KafkaTestUtils.getSingleRecord(consumer, "seekTestTopic").value()) + .isEqualTo("afterSeekToEnd"); + consumer.close(); + } + } diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/assertj/KafkaConditionsTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/assertj/KafkaConditionsTests.java index dc521f22f7..e4aca5f806 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/assertj/KafkaConditionsTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/assertj/KafkaConditionsTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,14 @@ package org.springframework.kafka.test.assertj; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.junit.jupiter.api.Test; + import static org.assertj.core.api.Assertions.allOf; import static org.assertj.core.api.Assertions.assertThat; import static org.springframework.kafka.test.assertj.KafkaConditions.keyValue; import static org.springframework.kafka.test.assertj.KafkaConditions.partition; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.junit.jupiter.api.Test; - /** * @author Gary Russell * @since 2.2.12 diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/EmbeddedKafkaConditionTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/EmbeddedKafkaConditionTests.java index 012d4222ba..5647227e57 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/EmbeddedKafkaConditionTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/EmbeddedKafkaConditionTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ package org.springframework.kafka.test.condition; -import static org.assertj.core.api.Assertions.assertThat; +import java.time.Duration; import org.junit.jupiter.api.Test; @@ -25,15 +25,18 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @author Michał Padula + * @author Artem Bilan * * @since 2.3 * */ @EmbeddedKafka(bootstrapServersProperty = "my.bss.property", count = 2, controlledShutdown = true, partitions = 3, - kraft = false) + adminTimeout = 67) public class EmbeddedKafkaConditionTests { @Test @@ -41,6 +44,7 @@ public void test(EmbeddedKafkaBroker broker) { assertThat(broker.getBrokersAsString()).isNotNull(); assertThat(KafkaTestUtils.getPropertyValue(broker, "brokerListProperty")).isEqualTo("my.bss.property"); assertThat(KafkaTestUtils.getPropertyValue(broker, "controlledShutdown")).isEqualTo(Boolean.TRUE); + assertThat(KafkaTestUtils.getPropertyValue(broker, "adminTimeout")).isEqualTo(Duration.ofSeconds(67)); assertThat(broker.getPartitionsPerTopic()).isEqualTo(3); } diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithNestedClassContextTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithNestedClassContextTests.java index 7edc485f2c..f95eb99264 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithNestedClassContextTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithNestedClassContextTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.test.condition; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.List; import java.util.concurrent.atomic.AtomicInteger; @@ -28,10 +26,18 @@ import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * @author Lee Jaeheon + * @author Soby Chacko + */ @EmbeddedKafka @SpringJUnitConfig(WithNestedClassContextTests.Config.class) +@DirtiesContext class WithNestedClassContextTests { private static final AtomicInteger counter = new AtomicInteger(); @@ -56,17 +62,20 @@ void equalsSize(@Autowired List classes) { void equalsCount() { assertThat(counter.get()).isEqualTo(1); } - } - public static class TestClass { } + public static class TestClass { } + @Configuration static class Config { + @Bean public TestClass testClass() { counter.incrementAndGet(); return new TestClass(); } + } + } diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithSpringTestContextTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithSpringTestContextTests.java index 337af55f0b..9e19000b9c 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithSpringTestContextTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/condition/WithSpringTestContextTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.test.condition; -import static org.assertj.core.api.Assertions.assertThat; - import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; @@ -27,6 +25,8 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.7.2 diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizerTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizerTests.java index e79cdaa987..536e23f8f1 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizerTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/context/EmbeddedKafkaContextCustomizerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,8 @@ package org.springframework.kafka.test.context; -import static org.assertj.core.api.Assertions.assertThat; +import java.time.Duration; +import java.util.Map; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -27,11 +28,15 @@ import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.utils.KafkaTestUtils; + +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Oleg Artyomov * @author Sergio Lourenco * @author Artem Bilan * @author Gary Russell + * @author Seonghwan Lee * * @since 1.3 */ @@ -48,7 +53,6 @@ void beforeEachTest() { AnnotationUtils.findAnnotation(SecondTestWithEmbeddedKafka.class, EmbeddedKafka.class); } - @Test void testHashCode() { assertThat(new EmbeddedKafkaContextCustomizer(annotationFromFirstClass).hashCode()).isNotEqualTo(0); @@ -56,7 +60,6 @@ void testHashCode() { .isEqualTo(new EmbeddedKafkaContextCustomizer(annotationFromSecondClass).hashCode()); } - @Test void testEquals() { assertThat(new EmbeddedKafkaContextCustomizer(annotationFromFirstClass)) @@ -78,6 +81,8 @@ void testPorts() { .isEqualTo("127.0.0.1:" + annotationWithPorts.ports()[0]); assertThat(KafkaTestUtils.getPropertyValue(embeddedKafkaBroker, "brokerListProperty")) .isEqualTo("my.bss.prop"); + assertThat(KafkaTestUtils.getPropertyValue(embeddedKafkaBroker, "adminTimeout")) + .isEqualTo(Duration.ofSeconds(33)); } @Test @@ -93,6 +98,21 @@ void testMulti() { .matches("127.0.0.1:[0-9]+,127.0.0.1:[0-9]+"); } + @Test + @SuppressWarnings("unchecked") + void testTransactionReplicationFactor() { + EmbeddedKafka annotationWithPorts = + AnnotationUtils.findAnnotation(TestWithEmbeddedKafkaTransactionFactor.class, EmbeddedKafka.class); + EmbeddedKafkaContextCustomizer customizer = new EmbeddedKafkaContextCustomizer(annotationWithPorts); + ConfigurableApplicationContext context = new GenericApplicationContext(); + customizer.customizeContext(context, null); + context.refresh(); + + EmbeddedKafkaBroker embeddedKafkaBroker = context.getBean(EmbeddedKafkaBroker.class); + Map properties = (Map) KafkaTestUtils.getPropertyValue(embeddedKafkaBroker, "brokerProperties"); + + assertThat(properties.get("transaction.state.log.replication.factor")).isEqualTo("2"); + } @EmbeddedKafka(kraft = false) private static final class TestWithEmbeddedKafka { @@ -104,7 +124,7 @@ private static final class SecondTestWithEmbeddedKafka { } - @EmbeddedKafka(kraft = false, ports = 8085, bootstrapServersProperty = "my.bss.prop") + @EmbeddedKafka(kraft = false, ports = 8085, bootstrapServersProperty = "my.bss.prop", adminTimeout = 33) private static final class TestWithEmbeddedKafkaPorts { } @@ -114,4 +134,9 @@ private static final class TestWithEmbeddedKafkaMulti { } + @EmbeddedKafka(kraft = false, count = 2) + private static final class TestWithEmbeddedKafkaTransactionFactor { + + } + } diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/hamcrest/KafkaMatchersTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/hamcrest/KafkaMatchersTests.java index 37c07fd32a..d7a3a129b2 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/hamcrest/KafkaMatchersTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/hamcrest/KafkaMatchersTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2020 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,6 @@ package org.springframework.kafka.test.hamcrest; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasKey; -import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasPartition; -import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasTimestamp; -import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasValue; - import java.util.Optional; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -30,6 +23,13 @@ import org.apache.kafka.common.record.TimestampType; import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasKey; +import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasPartition; +import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasTimestamp; +import static org.springframework.kafka.test.hamcrest.KafkaMatchers.hasValue; + /** * @author Biju Kunjummen * diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/junit/GlobalEmbeddedKafkaTestExecutionListenerTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/junit/GlobalEmbeddedKafkaTestExecutionListenerTests.java index 4137dcbd1f..664ba13aaf 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/junit/GlobalEmbeddedKafkaTestExecutionListenerTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/junit/GlobalEmbeddedKafkaTestExecutionListenerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.test.junit; -import static org.assertj.core.api.Assertions.assertThat; - import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; @@ -49,6 +47,8 @@ import org.springframework.util.DefaultPropertiesPersister; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Artem Bilan * diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/rule/AddressableEmbeddedBrokerTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/rule/AddressableEmbeddedBrokerTests.java index bb75e0715d..3842a27ab7 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/rule/AddressableEmbeddedBrokerTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/rule/AddressableEmbeddedBrokerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.test.rule; -import static org.assertj.core.api.Assertions.assertThat; - import java.io.IOException; import java.net.ServerSocket; import java.util.Map; @@ -39,6 +37,8 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @author Kamill Sokol diff --git a/spring-kafka-test/src/test/java/org/springframework/kafka/test/utils/KafkaTestUtilsTests.java b/spring-kafka-test/src/test/java/org/springframework/kafka/test/utils/KafkaTestUtilsTests.java index b8f12fde84..e5be343800 100644 --- a/spring-kafka-test/src/test/java/org/springframework/kafka/test/utils/KafkaTestUtilsTests.java +++ b/spring-kafka-test/src/test/java/org/springframework/kafka/test/utils/KafkaTestUtilsTests.java @@ -16,9 +16,6 @@ package org.springframework.kafka.test.utils; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; - import java.time.Duration; import java.util.List; import java.util.Map; @@ -38,6 +35,9 @@ import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; + /** * @author Gary Russell * @author Artem Bilan diff --git a/spring-kafka/src/main/java/org/springframework/kafka/annotation/EnableKafka.java b/spring-kafka/src/main/java/org/springframework/kafka/annotation/EnableKafka.java index a163bc25a0..437284b638 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/annotation/EnableKafka.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/annotation/EnableKafka.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2019 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ /** * Enable Kafka listener annotated endpoints that are created under the covers by a * {@link org.springframework.kafka.config.AbstractKafkaListenerContainerFactory - * AbstractListenerContainerFactory}. To be used on + * AbstractKafkaListenerContainerFactory}. To be used on * {@link org.springframework.context.annotation.Configuration Configuration} classes as * follows: * @@ -117,7 +117,7 @@ * *
  * @KafkaListener(containerFactory = "myKafkaListenerContainerFactory", topics = "myTopic")
- * public void process(String msg, @Header("kafka_partition") int partition) {
+ * public void process(String msg, @Header(KafkaHeaders.RECEIVED_PARTITION) int partition) {
  * 	// process incoming message
  * }
  * 
@@ -174,7 +174,7 @@ * @Override * public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) { * registrar.setEndpointRegistry(myKafkaListenerEndpointRegistry()); - * registrar.setMessageHandlerMethodFactory(myMessageHandlerMethodFactory); + * registrar.setMessageHandlerMethodFactory(myMessageHandlerMethodFactory()); * registrar.setValidator(new MyValidator()); * } * @@ -233,6 +233,7 @@ * @author Stephane Nicoll * @author Gary Russell * @author Artem Bilan + * @author Borahm Lee * * @see KafkaListener * @see KafkaListenerAnnotationBeanPostProcessor diff --git a/spring-kafka/src/main/java/org/springframework/kafka/annotation/KafkaListenerAnnotationBeanPostProcessor.java b/spring-kafka/src/main/java/org/springframework/kafka/annotation/KafkaListenerAnnotationBeanPostProcessor.java index fcab2fb01f..dcaafc4b81 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/annotation/KafkaListenerAnnotationBeanPostProcessor.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/annotation/KafkaListenerAnnotationBeanPostProcessor.java @@ -30,11 +30,15 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; import java.util.regex.Pattern; import java.util.stream.Stream; @@ -46,7 +50,6 @@ import org.springframework.beans.BeansException; import org.springframework.beans.factory.BeanFactory; import org.springframework.beans.factory.BeanInitializationException; -import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.ListableBeanFactory; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.beans.factory.ObjectFactory; @@ -97,6 +100,7 @@ import org.springframework.kafka.retrytopic.RetryTopicConfigurer; import org.springframework.kafka.retrytopic.RetryTopicSchedulerWrapper; import org.springframework.kafka.support.TopicPartitionOffset; +import org.springframework.kafka.support.TopicPartitionOffset.SeekPosition; import org.springframework.lang.Nullable; import org.springframework.messaging.converter.GenericMessageConverter; import org.springframework.messaging.converter.SmartMessageConverter; @@ -140,6 +144,9 @@ * @author Filip Halemba * @author Tomaz Fernandes * @author Wang Zhiyang + * @author Sanghyeok An + * @author Soby Chacko + * @author Omer Celik * * @see KafkaListener * @see KafkaListenerErrorHandler @@ -151,7 +158,7 @@ * @see MethodKafkaListenerEndpoint */ public class KafkaListenerAnnotationBeanPostProcessor - implements BeanPostProcessor, Ordered, ApplicationContextAware, InitializingBean, SmartInitializingSingleton { + implements BeanPostProcessor, Ordered, ApplicationContextAware, SmartInitializingSingleton { private static final String UNCHECKED = "unchecked"; @@ -181,10 +188,13 @@ public class KafkaListenerAnnotationBeanPostProcessor private final AtomicInteger counter = new AtomicInteger(); + private final AtomicBoolean enhancerIsBuilt = new AtomicBoolean(); + private KafkaListenerEndpointRegistry endpointRegistry; private String defaultContainerFactoryBeanName = DEFAULT_KAFKA_LISTENER_CONTAINER_FACTORY_BEAN_NAME; + @Nullable private ApplicationContext applicationContext; private BeanFactory beanFactory; @@ -197,8 +207,11 @@ public class KafkaListenerAnnotationBeanPostProcessor private AnnotationEnhancer enhancer; + @Nullable private RetryTopicConfigurer retryTopicConfigurer; + private final Lock globalLock = new ReentrantLock(); + @Override public int getOrder() { return LOWEST_PRECEDENCE; @@ -270,12 +283,20 @@ public void setApplicationContext(ApplicationContext applicationContext) throws * {@link #setEndpointRegistry endpoint registry} has to be explicitly configured. * @param beanFactory the {@link BeanFactory} to be used. */ - public synchronized void setBeanFactory(BeanFactory beanFactory) { - this.beanFactory = beanFactory; - if (beanFactory instanceof ConfigurableListableBeanFactory clbf) { - this.resolver = clbf.getBeanExpressionResolver(); - this.expressionContext = new BeanExpressionContext((ConfigurableListableBeanFactory) beanFactory, - this.listenerScope); + public void setBeanFactory(BeanFactory beanFactory) { + try { + this.globalLock.lock(); + this.beanFactory = beanFactory; + if (beanFactory instanceof ConfigurableListableBeanFactory clbf) { + BeanExpressionResolver beanExpressionResolver = clbf.getBeanExpressionResolver(); + if (beanExpressionResolver != null) { + this.resolver = beanExpressionResolver; + } + this.expressionContext = new BeanExpressionContext(clbf, this.listenerScope); + } + } + finally { + this.globalLock.unlock(); } } @@ -290,11 +311,6 @@ public void setCharset(Charset charset) { this.charset = charset; } - @Override - public void afterPropertiesSet() throws Exception { - buildEnhancer(); - } - @Override public void afterSingletonsInstantiated() { this.registrar.setBeanFactory(this.beanFactory); @@ -333,16 +349,18 @@ public void afterSingletonsInstantiated() { // Actually register all listeners this.registrar.afterPropertiesSet(); - Map sequencers = - this.applicationContext.getBeansOfType(ContainerGroupSequencer.class, false, false); - sequencers.values().forEach(ContainerGroupSequencer::initialize); + if (this.applicationContext != null) { + Map sequencers = + this.applicationContext.getBeansOfType(ContainerGroupSequencer.class, false, false); + sequencers.values().forEach(ContainerGroupSequencer::initialize); + } } private void buildEnhancer() { - if (this.applicationContext != null) { + if (this.applicationContext != null && this.enhancerIsBuilt.compareAndSet(false, true)) { Map enhancersMap = this.applicationContext.getBeansOfType(AnnotationEnhancer.class, false, false); - if (enhancersMap.size() > 0) { + if (!enhancersMap.isEmpty()) { List enhancers = enhancersMap.values() .stream() .sorted(new OrderComparator()) @@ -352,7 +370,7 @@ private void buildEnhancer() { for (AnnotationEnhancer enh : enhancers) { newAttrs = enh.apply(newAttrs, element); } - return attrs; + return newAttrs; }; } } @@ -365,39 +383,40 @@ public Object postProcessBeforeInitialization(Object bean, String beanName) thro @Override public Object postProcessAfterInitialization(final Object bean, final String beanName) throws BeansException { + buildEnhancer(); if (!this.nonAnnotatedClasses.contains(bean.getClass())) { Class targetClass = AopUtils.getTargetClass(bean); Collection classLevelListeners = findListenerAnnotations(targetClass); - final boolean hasClassLevelListeners = !classLevelListeners.isEmpty(); - final List multiMethods = new ArrayList<>(); Map> annotatedMethods = MethodIntrospector.selectMethods(targetClass, (MethodIntrospector.MetadataLookup>) method -> { Set listenerMethods = findListenerAnnotations(method); return (!listenerMethods.isEmpty() ? listenerMethods : null); }); - if (hasClassLevelListeners) { - Set methodsWithHandler = MethodIntrospector.selectMethods(targetClass, - (ReflectionUtils.MethodFilter) method -> - AnnotationUtils.findAnnotation(method, KafkaHandler.class) != null); - multiMethods.addAll(methodsWithHandler); - } - if (annotatedMethods.isEmpty() && !hasClassLevelListeners) { + boolean hasClassLevelListeners = !classLevelListeners.isEmpty(); + boolean hasMethodLevelListeners = !annotatedMethods.isEmpty(); + if (!hasMethodLevelListeners && !hasClassLevelListeners) { this.nonAnnotatedClasses.add(bean.getClass()); this.logger.trace(() -> "No @KafkaListener annotations found on bean type: " + bean.getClass()); } else { - // Non-empty set of methods - for (Map.Entry> entry : annotatedMethods.entrySet()) { - Method method = entry.getKey(); - for (KafkaListener listener : entry.getValue()) { - processKafkaListener(listener, method, bean, beanName); + if (hasMethodLevelListeners) { + // Non-empty set of methods + for (Map.Entry> entry : annotatedMethods.entrySet()) { + Method method = entry.getKey(); + for (KafkaListener listener : entry.getValue()) { + processKafkaListener(listener, method, bean, beanName); + } } + this.logger.debug(() -> annotatedMethods.size() + " @KafkaListener methods processed on bean '" + + beanName + "': " + annotatedMethods); + } + if (hasClassLevelListeners) { + Set methodsWithHandler = MethodIntrospector.selectMethods(targetClass, + (ReflectionUtils.MethodFilter) method -> + AnnotationUtils.findAnnotation(method, KafkaHandler.class) != null); + List multiMethods = new ArrayList<>(methodsWithHandler); + processMultiMethodListeners(classLevelListeners, multiMethods, targetClass, bean, beanName); } - this.logger.debug(() -> annotatedMethods.size() + " @KafkaListener methods processed on bean '" - + beanName + "': " + annotatedMethods); - } - if (hasClassLevelListeners) { - processMultiMethodListeners(classLevelListeners, multiMethods, bean, beanName); } } return bean; @@ -443,73 +462,75 @@ private KafkaListener enhance(AnnotatedElement element, KafkaListener ann) { } } - private synchronized void processMultiMethodListeners(Collection classLevelListeners, - List multiMethods, Object bean, String beanName) { - - List checkedMethods = new ArrayList<>(); - Method defaultMethod = null; - for (Method method : multiMethods) { - Method checked = checkProxy(method, bean); - KafkaHandler annotation = AnnotationUtils.findAnnotation(method, KafkaHandler.class); - if (annotation != null && annotation.isDefault()) { - final Method toAssert = defaultMethod; - Assert.state(toAssert == null, () -> "Only one @KafkaHandler can be marked 'isDefault', found: " - + toAssert.toString() + " and " + method.toString()); - defaultMethod = checked; + private void processMultiMethodListeners(Collection classLevelListeners, + List multiMethods, Class clazz, Object bean, String beanName) { + + try { + this.globalLock.lock(); + List checkedMethods = new ArrayList<>(); + Method defaultMethod = null; + for (Method method : multiMethods) { + Method checked = checkProxy(method, bean); + KafkaHandler annotation = AnnotationUtils.findAnnotation(method, KafkaHandler.class); + if (annotation != null && annotation.isDefault()) { + Method toAssert = defaultMethod; + Assert.state(toAssert == null, () -> "Only one @KafkaHandler can be marked 'isDefault', found: " + + toAssert.toString() + " and " + method); + defaultMethod = checked; + } + checkedMethods.add(checked); + } + for (KafkaListener classLevelListener : classLevelListeners) { + MultiMethodKafkaListenerEndpoint endpoint = + new MultiMethodKafkaListenerEndpoint<>(checkedMethods, defaultMethod, bean); + processMainAndRetryListeners(classLevelListener, bean, beanName, endpoint, null, clazz); } - checkedMethods.add(checked); - } - for (KafkaListener classLevelListener : classLevelListeners) { - MultiMethodKafkaListenerEndpoint endpoint = - new MultiMethodKafkaListenerEndpoint<>(checkedMethods, defaultMethod, bean); - String beanRef = classLevelListener.beanRef(); - this.listenerScope.addListener(beanRef, bean); - endpoint.setId(getEndpointId(classLevelListener)); - processListener(endpoint, classLevelListener, bean, beanName, resolveTopics(classLevelListener), - resolveTopicPartitions(classLevelListener)); - this.listenerScope.removeListener(beanRef); + } + finally { + this.globalLock.unlock(); } } - protected synchronized void processKafkaListener(KafkaListener kafkaListener, Method method, Object bean, + protected void processKafkaListener(KafkaListener kafkaListener, Method method, Object bean, String beanName) { - Method methodToUse = checkProxy(method, bean); - MethodKafkaListenerEndpoint endpoint = new MethodKafkaListenerEndpoint<>(); - endpoint.setMethod(methodToUse); + try { + this.globalLock.lock(); + Method methodToUse = checkProxy(method, bean); + MethodKafkaListenerEndpoint endpoint = new MethodKafkaListenerEndpoint<>(); + endpoint.setMethod(methodToUse); + processMainAndRetryListeners(kafkaListener, bean, beanName, endpoint, methodToUse, null); + } + finally { + this.globalLock.unlock(); + } + } + + private void processMainAndRetryListeners(KafkaListener kafkaListener, Object bean, String beanName, + MethodKafkaListenerEndpoint endpoint, @Nullable Method methodToUse, @Nullable Class clazz) { String beanRef = kafkaListener.beanRef(); this.listenerScope.addListener(beanRef, bean); endpoint.setId(getEndpointId(kafkaListener)); String[] topics = resolveTopics(kafkaListener); TopicPartitionOffset[] tps = resolveTopicPartitions(kafkaListener); - if (!processMainAndRetryListeners(kafkaListener, bean, beanName, methodToUse, endpoint, topics, tps)) { + if (!processMainAndRetryListeners(kafkaListener, bean, beanName, endpoint, topics, tps, methodToUse, clazz)) { processListener(endpoint, kafkaListener, bean, beanName, topics, tps); } this.listenerScope.removeListener(beanRef); } private boolean processMainAndRetryListeners(KafkaListener kafkaListener, Object bean, String beanName, - Method methodToUse, MethodKafkaListenerEndpoint endpoint, String[] topics, - TopicPartitionOffset[] tps) { - - String[] retryableCandidates = topics; - if (retryableCandidates.length == 0 && tps.length > 0) { - retryableCandidates = Arrays.stream(tps) - .map(tp -> tp.getTopic()) - .distinct() - .toList() - .toArray(new String[0]); - } + MethodKafkaListenerEndpoint endpoint, String[] topics, TopicPartitionOffset[] tps, + @Nullable Method methodToUse, @Nullable Class clazz) { + String[] retryableCandidates = getTopicsFromTopicPartitionOffset(topics, tps); RetryTopicConfiguration retryTopicConfiguration = new RetryTopicConfigurationProvider(this.beanFactory, this.resolver, this.expressionContext) - .findRetryConfigurationFor(retryableCandidates, methodToUse, bean); - + .findRetryConfigurationFor(retryableCandidates, methodToUse, clazz, bean); if (retryTopicConfiguration == null) { - String[] candidates = retryableCandidates; this.logger.debug(() -> - "No retry topic configuration found for topics " + Arrays.toString(candidates)); + "No retry topic configuration found for topics " + Arrays.toString(retryableCandidates)); return false; } @@ -525,6 +546,18 @@ private boolean processMainAndRetryListeners(KafkaListener kafkaListener, Object return true; } + private String[] getTopicsFromTopicPartitionOffset(String[] topics, TopicPartitionOffset[] tps) { + String[] retryableCandidates = topics; + if (retryableCandidates.length == 0 && tps.length > 0) { + retryableCandidates = Arrays.stream(tps) + .map(TopicPartitionOffset::getTopic) + .distinct() + .toList() + .toArray(new String[0]); + } + return retryableCandidates; + } + private RetryTopicConfigurer getRetryTopicConfigurer() { if (this.retryTopicConfigurer == null) { try { @@ -736,11 +769,16 @@ private KafkaListenerContainerFactory resolveContainerFactory(KafkaListener k private void resolveContainerPostProcessor(MethodKafkaListenerEndpoint endpoint, KafkaListener kafkaListener) { - - final String containerPostProcessor = kafkaListener.containerPostProcessor(); - if (StringUtils.hasText(containerPostProcessor)) { - endpoint.setContainerPostProcessor(this.beanFactory.getBean(containerPostProcessor, - ContainerPostProcessor.class)); + Object containerPostProcessor = resolveExpression(kafkaListener.containerPostProcessor()); + if (containerPostProcessor instanceof ContainerPostProcessor cpp) { + endpoint.setContainerPostProcessor(cpp); + } + else { + String containerPostProcessorBeanName = resolveExpressionAsString(kafkaListener.containerPostProcessor(), "containerPostProcessor"); + if (StringUtils.hasText(containerPostProcessorBeanName)) { + endpoint.setContainerPostProcessor( + this.beanFactory.getBean(containerPostProcessorBeanName, ContainerPostProcessor.class)); + } } } @@ -804,7 +842,8 @@ private String getEndpointId(KafkaListener kafkaListener) { } } - private String getEndpointGroupId(KafkaListener kafkaListener, String id) { + @Nullable + private String getEndpointGroupId(KafkaListener kafkaListener, @Nullable String id) { String groupId = null; if (StringUtils.hasText(kafkaListener.groupId())) { groupId = resolveExpressionAsString(kafkaListener.groupId(), "groupId"); @@ -818,10 +857,8 @@ private String getEndpointGroupId(KafkaListener kafkaListener, String id) { private TopicPartitionOffset[] resolveTopicPartitions(KafkaListener kafkaListener) { TopicPartition[] topicPartitions = kafkaListener.topicPartitions(); List result = new ArrayList<>(); - if (topicPartitions.length > 0) { - for (TopicPartition topicPartition : topicPartitions) { - result.addAll(resolveTopicPartitionsList(topicPartition)); - } + for (TopicPartition topicPartition : topicPartitions) { + result.addAll(resolveTopicPartitionsList(topicPartition)); } return result.toArray(new TopicPartitionOffset[0]); } @@ -868,7 +905,7 @@ private List resolveTopicPartitionsList(TopicPartition top () -> "At least one 'partition' or 'partitionOffset' required in @TopicPartition for topic '" + topic + "'"); List result = new ArrayList<>(); for (String partition : partitions) { - resolvePartitionAsInteger((String) topic, resolveExpression(partition), result, null, false, false); + resolvePartitionAsInteger((String) topic, resolveExpression(partition), result); } if (partitionOffsets.length == 1 && resolveExpression(partitionOffsets[0].partition()).equals("*")) { result.forEach(tpo -> { @@ -881,7 +918,8 @@ private List resolveTopicPartitionsList(TopicPartition top Assert.isTrue(!partitionOffset.partition().equals("*"), () -> "Partition wildcard '*' is only allowed in a single @PartitionOffset in " + result); resolvePartitionAsInteger((String) topic, resolveExpression(partitionOffset.partition()), result, - resolveInitialOffset(topic, partitionOffset), isRelative(topic, partitionOffset), true); + resolveInitialOffset(topic, partitionOffset), isRelative(topic, partitionOffset), true, + resolveExpression(partitionOffset.seekPosition())); } } Assert.isTrue(!result.isEmpty(), () -> "At least one partition required for " + topic); @@ -890,11 +928,11 @@ private List resolveTopicPartitionsList(TopicPartition top private Long resolveInitialOffset(Object topic, PartitionOffset partitionOffset) { Object initialOffsetValue = resolveExpression(partitionOffset.initialOffset()); - Long initialOffset; + long initialOffset; if (initialOffsetValue instanceof String str) { Assert.state(StringUtils.hasText(str), () -> "'initialOffset' in @PartitionOffset for topic '" + topic + "' cannot be empty"); - initialOffset = Long.valueOf(str); + initialOffset = Long.parseLong(str); } else if (initialOffsetValue instanceof Long lng) { initialOffset = lng; @@ -945,20 +983,33 @@ else if (resolvedValue instanceof Iterable) { } } + private void resolvePartitionAsInteger(String topic, Object resolvedValue, List result) { + resolvePartitionAsInteger(topic, resolvedValue, result, null, false, false, null); + } + @SuppressWarnings(UNCHECKED) - private void resolvePartitionAsInteger(String topic, Object resolvedValue, - List result, @Nullable Long offset, boolean isRelative, boolean checkDups) { + private void resolvePartitionAsInteger(String topic, Object resolvedValue, List result, + @Nullable Long offset, boolean isRelative, boolean checkDups, @Nullable Object seekPosition) { if (resolvedValue instanceof String[] strArr) { for (Object object : strArr) { - resolvePartitionAsInteger(topic, object, result, offset, isRelative, checkDups); + resolvePartitionAsInteger(topic, object, result, offset, isRelative, checkDups, seekPosition); } + return; } - else if (resolvedValue instanceof String str) { + else if (resolvedValue instanceof Iterable) { + for (Object object : (Iterable) resolvedValue) { + resolvePartitionAsInteger(topic, object, result, offset, isRelative, checkDups, seekPosition); + } + return; + } + + TopicPartitionOffset.SeekPosition tpoSp = resloveTopicPartitionOffsetSeekPosition(seekPosition); + if (resolvedValue instanceof String str) { Assert.state(StringUtils.hasText(str), () -> "partition in @TopicPartition for topic '" + topic + "' cannot be empty"); List collected = parsePartitions(str) - .map(part -> new TopicPartitionOffset(topic, part, offset, isRelative)) + .map(part -> createTopicPartitionOffset(topic, part, offset, isRelative, tpoSp)) .toList(); if (checkDups) { collected.forEach(tpo -> { @@ -971,16 +1022,11 @@ else if (resolvedValue instanceof String str) { } else if (resolvedValue instanceof Integer[] intArr) { for (Integer partition : intArr) { - result.add(new TopicPartitionOffset(topic, partition)); + result.add(createTopicPartitionOffset(topic, partition, offset, isRelative, tpoSp)); } } else if (resolvedValue instanceof Integer intgr) { - result.add(new TopicPartitionOffset(topic, intgr)); - } - else if (resolvedValue instanceof Iterable) { - for (Object object : (Iterable) resolvedValue) { - resolvePartitionAsInteger(topic, object, result, offset, isRelative, checkDups); - } + result.add(createTopicPartitionOffset(topic, intgr, offset, isRelative, tpoSp)); } else { throw new IllegalArgumentException(String.format( @@ -988,6 +1034,35 @@ else if (resolvedValue instanceof Iterable) { } } + @Nullable + private TopicPartitionOffset.SeekPosition resloveTopicPartitionOffsetSeekPosition(@Nullable Object seekPosition) { + TopicPartitionOffset.SeekPosition resloveTpoSp = null; + if (seekPosition instanceof String seekPositionName) { + String capitalLetterSeekPositionName = seekPositionName.trim().toUpperCase(Locale.ROOT); + if (SeekPosition.BEGINNING.name().equals(capitalLetterSeekPositionName)) { + resloveTpoSp = SeekPosition.BEGINNING; + } + else if (SeekPosition.END.name().equals(capitalLetterSeekPositionName)) { + resloveTpoSp = SeekPosition.END; + } + else if (SeekPosition.TIMESTAMP.name().equals(capitalLetterSeekPositionName)) { + resloveTpoSp = SeekPosition.TIMESTAMP; + } + } + return resloveTpoSp; + } + + private TopicPartitionOffset createTopicPartitionOffset(String topic, int partition, @Nullable Long offset, + boolean isRelative, @Nullable SeekPosition seekPosition) { + + if (seekPosition != null) { + return new TopicPartitionOffset(topic, partition, offset, seekPosition); + } + else { + return new TopicPartitionOffset(topic, partition, offset, isRelative); + } + } + private String resolveExpressionAsString(String value, String attribute) { Object resolved = resolveExpression(value); if (resolved instanceof String str) { @@ -1086,8 +1161,7 @@ private void addFormatters(FormatterRegistry registry) { private Collection getBeansOfType(Class type) { if (KafkaListenerAnnotationBeanPostProcessor.this.beanFactory instanceof ListableBeanFactory lbf) { - return lbf.getBeansOfType(type) - .values(); + return lbf.getBeansOfType(type).values(); } else { return Collections.emptySet(); @@ -1182,7 +1256,6 @@ private MessageHandlerMethodFactory createDefaultMessageHandlerMethodFactory() { private record BytesToStringConverter(Charset charset) implements Converter { - @Override public String convert(byte[] source) { return new String(source, this.charset); @@ -1241,7 +1314,7 @@ public interface AnnotationEnhancer extends BiFunction, Anno } - private final class BytesToNumberConverter implements ConditionalGenericConverter { + private static final class BytesToNumberConverter implements ConditionalGenericConverter { BytesToNumberConverter() { } @@ -1265,6 +1338,9 @@ public Set getConvertibleTypes() { @Nullable public Object convert(@Nullable Object source, TypeDescriptor sourceType, TypeDescriptor targetType) { byte[] bytes = (byte[]) source; + if (bytes == null) { + return null; + } if (targetType.getType().equals(long.class) || targetType.getType().equals(Long.class)) { Assert.state(bytes.length >= 8, "At least 8 bytes needed to convert a byte[] to a long"); // NOSONAR return ByteBuffer.wrap(bytes).getLong(); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/annotation/PartitionOffset.java b/spring-kafka/src/main/java/org/springframework/kafka/annotation/PartitionOffset.java index f779f9bb28..9071d2408c 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/annotation/PartitionOffset.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/annotation/PartitionOffset.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2020 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,14 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import org.springframework.kafka.support.TopicPartitionOffset.SeekPosition; + /** * Used to add partition/initial offset information to a {@code KafkaListener}. * * @author Artem Bilan * @author Gary Russell + * @author Wang Zhiyang */ @Target({}) @Retention(RetentionPolicy.RUNTIME) @@ -60,4 +63,17 @@ */ String relativeToCurrent() default "false"; + /** + * Position to seek on partition assignment. By default, seek by offset. + * Set {@link SeekPosition} seek position enum name to specify "special" + * seeks, no restrictions on capitalization. If seekPosition set 'BEGINNING' + * or 'END', ignore {@code relativeToCurrent} and {@code initialOffset}. + * If seekPosition set 'TIMESTAMP', initialOffset means time stamp, ignore + * {@code relativeToCurrent}. + * @return special seeks. + * @since 3.2 + * @see SeekPosition + */ + String seekPosition() default ""; + } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryTopicConfigurationProvider.java b/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryTopicConfigurationProvider.java index 230c41c61d..1b85aacb77 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryTopicConfigurationProvider.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryTopicConfigurationProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,10 @@ package org.springframework.kafka.annotation; +import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Method; import java.util.Map; +import java.util.Objects; import org.apache.commons.logging.LogFactory; @@ -35,7 +37,6 @@ import org.springframework.kafka.retrytopic.RetryTopicConfiguration; import org.springframework.lang.Nullable; - /** * * Attempts to provide an instance of @@ -53,6 +54,8 @@ * * @author Tomaz Fernandes * @author Gary Russell + * @author Wang Zhiyang + * * @since 2.7 * @see org.springframework.kafka.retrytopic.RetryTopicConfigurer * @see RetryableTopic @@ -96,19 +99,42 @@ public RetryTopicConfigurationProvider(@Nullable BeanFactory beanFactory, @Nulla this.resolver = resolver; this.expressionContext = expressionContext; } + @Nullable public RetryTopicConfiguration findRetryConfigurationFor(String[] topics, Method method, Object bean) { - RetryableTopic annotation = MergedAnnotations.from(method, SearchStrategy.TYPE_HIERARCHY, - RepeatableContainers.none()) - .get(RetryableTopic.class) - .synthesize(MergedAnnotation::isPresent) - .orElse(null); + return findRetryConfigurationFor(topics, method, null, bean); + } + + /** + * Find retry topic configuration. + * @param topics the retryable topic list. + * @param method the method that gets @RetryableTopic annotation. + * @param clazz the class that gets @RetryableTopic annotation. + * @param bean the bean. + * @return the retry topic configuration. + */ + @Nullable + public RetryTopicConfiguration findRetryConfigurationFor(String[] topics, @Nullable Method method, + @Nullable Class clazz, Object bean) { + + RetryableTopic annotation = getRetryableTopicAnnotationFromAnnotatedElement( + Objects.requireNonNullElse(method, clazz)); + Class declaringClass = method != null ? method.getDeclaringClass() : clazz; return annotation != null ? new RetryableTopicAnnotationProcessor(this.beanFactory, this.resolver, this.expressionContext) - .processAnnotation(topics, method, annotation, bean) + .processAnnotation(topics, declaringClass, annotation, bean) : maybeGetFromContext(topics); } + @Nullable + private RetryableTopic getRetryableTopicAnnotationFromAnnotatedElement(AnnotatedElement element) { + return MergedAnnotations.from(element, SearchStrategy.TYPE_HIERARCHY, + RepeatableContainers.none()) + .get(RetryableTopic.class) + .synthesize(MergedAnnotation::isPresent) + .orElse(null); + } + @Nullable private RetryTopicConfiguration maybeGetFromContext(String[] topics) { if (this.beanFactory == null || !ListableBeanFactory.class.isAssignableFrom(this.beanFactory.getClass())) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopic.java b/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopic.java index be371ece64..af9a6a6051 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopic.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopic.java @@ -41,11 +41,13 @@ * @author Fabio da Silva Jr. * @author João Lima * @author Adrian Chlebosz + * @author Wang Zhiyang + * * @since 2.7 * * @see org.springframework.kafka.retrytopic.RetryTopicConfigurer */ -@Target({ ElementType.METHOD, ElementType.ANNOTATION_TYPE }) +@Target({ ElementType.METHOD, ElementType.ANNOTATION_TYPE, ElementType.TYPE }) @Retention(RetentionPolicy.RUNTIME) @Documented public @interface RetryableTopic { @@ -79,7 +81,7 @@ * * The bean name of the {@link org.springframework.kafka.core.KafkaTemplate} bean that * will be used to forward the message to the retry and Dlt topics. If not specified, - * a bean with name {@code retryTopicDefaultKafkaTemplate} or {@code kafkaTemplate} + * a bean with name {@code defaultRetryTopicKafkaTemplate} or {@code kafkaTemplate} * will be looked up. * * @return the kafkaTemplate bean name. @@ -191,10 +193,12 @@ /** * Topic reuse strategy for sequential attempts made with a same backoff interval. + * Starting 3.2, change default behavior to {@code SameIntervalTopicReuseStrategy.SINGLE_TOPIC}. + * * @return the strategy. * @since 3.0.4 */ - SameIntervalTopicReuseStrategy sameIntervalTopicReuseStrategy() default SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS; + SameIntervalTopicReuseStrategy sameIntervalTopicReuseStrategy() default SameIntervalTopicReuseStrategy.SINGLE_TOPIC; /** * Whether or not create a DLT, and redeliver to the DLT if delivery fails or just give up. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopicAnnotationProcessor.java b/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopicAnnotationProcessor.java index b19ed2962c..8304db9809 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopicAnnotationProcessor.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/annotation/RetryableTopicAnnotationProcessor.java @@ -34,6 +34,7 @@ import org.springframework.beans.factory.config.ConfigurableBeanFactory; import org.springframework.context.expression.BeanFactoryResolver; import org.springframework.context.expression.StandardBeanExpressionResolver; +import org.springframework.core.MethodIntrospector; import org.springframework.core.annotation.AnnotationUtils; import org.springframework.expression.spel.support.StandardEvaluationContext; import org.springframework.kafka.core.KafkaOperations; @@ -63,6 +64,9 @@ * @author Tomaz Fernandes * @author Gary Russell * @author Adrian Chlebosz + * @author Wang Zhiyang + * @author Artem Bilan + * * @since 2.7 * */ @@ -115,6 +119,13 @@ public RetryableTopicAnnotationProcessor(@Nullable BeanFactory beanFactory, @Nul public RetryTopicConfiguration processAnnotation(String[] topics, Method method, RetryableTopic annotation, Object bean) { + Class clazz = method.getDeclaringClass(); + return processAnnotation(topics, clazz, annotation, bean); + } + + public RetryTopicConfiguration processAnnotation(String[] topics, Class clazz, RetryableTopic annotation, + Object bean) { + Long resolvedTimeout = resolveExpressionAsLong(annotation.timeout(), "timeout", false); long timeout = RetryTopicConstants.NOT_SET; if (resolvedTimeout != null) { @@ -140,7 +151,7 @@ public RetryTopicConfiguration processAnnotation(String[] topics, Method method, .customBackoff(createBackoffFromAnnotation(annotation.backoff(), this.beanFactory)) .retryTopicSuffix(resolveExpressionAsString(annotation.retryTopicSuffix(), "retryTopicSuffix")) .dltSuffix(resolveExpressionAsString(annotation.dltTopicSuffix(), "dltTopicSuffix")) - .dltHandlerMethod(getDltProcessor(method, bean)) + .dltHandlerMethod(getDltProcessor(clazz, bean)) .includeTopics(Arrays.asList(topics)) .listenerFactory(resolveExpressionAsString(annotation.listenerContainerFactory(), "listenerContainerFactory")) .autoCreateTopics(resolveExpressionAsBoolean(annotation.autoCreateTopics(), "autoCreateTopics"), @@ -218,10 +229,11 @@ private Map>> createDltRoutingSpecFromAnn .collect(Collectors.toMap(ExceptionBasedDltDestination::suffix, excBasedDestDlt -> Set.of(excBasedDestDlt.exceptions()))); } - private EndpointHandlerMethod getDltProcessor(Method listenerMethod, Object bean) { - Class declaringClass = listenerMethod.getDeclaringClass(); - return Arrays.stream(ReflectionUtils.getDeclaredMethods(declaringClass)) - .filter(method -> AnnotationUtils.findAnnotation(method, DltHandler.class) != null) + private EndpointHandlerMethod getDltProcessor(Class clazz, Object bean) { + ReflectionUtils.MethodFilter selector = + (method) -> AnnotationUtils.findAnnotation(method, DltHandler.class) != null; + return MethodIntrospector.selectMethods(clazz, selector) + .stream() .map(method -> RetryTopicConfigurer.createHandlerMethodWith(bean, method)) .findFirst() .orElse(RetryTopicConfigurer.DEFAULT_DLT_HANDLER); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaAvroBeanRegistrationAotProcessor.java b/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaAvroBeanRegistrationAotProcessor.java index 7d816a1c64..fb4b727e94 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaAvroBeanRegistrationAotProcessor.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaAvroBeanRegistrationAotProcessor.java @@ -41,6 +41,7 @@ * Detect and register Avro types for Apache Kafka listeners. * * @author Gary Russell + * @author Sagnhyeok An * @since 3.0 * */ @@ -80,7 +81,7 @@ public BeanRegistrationAotContribution processAheadOfTime(RegisteredBean registe } }, method -> method.getName().equals("onMessage")); } - if (avroTypes.size() > 0) { + if (!avroTypes.isEmpty()) { return (generationContext, beanRegistrationCode) -> { ReflectionHints reflectionHints = generationContext.getRuntimeHints().reflection(); avroTypes.forEach(type -> reflectionHints.registerType(type, diff --git a/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaRuntimeHints.java b/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaRuntimeHints.java index abde73ff19..666a2eef76 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaRuntimeHints.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/aot/KafkaRuntimeHints.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,33 +21,10 @@ import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.CooperativeStickyAssignor; -import org.apache.kafka.clients.consumer.RangeAssignor; -import org.apache.kafka.clients.consumer.RoundRobinAssignor; -import org.apache.kafka.clients.consumer.StickyAssignor; import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.RoundRobinPartitioner; import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic; import org.apache.kafka.common.protocol.Message; -import org.apache.kafka.common.serialization.ByteArrayDeserializer; -import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.apache.kafka.common.serialization.ByteBufferDeserializer; -import org.apache.kafka.common.serialization.ByteBufferSerializer; -import org.apache.kafka.common.serialization.BytesDeserializer; -import org.apache.kafka.common.serialization.BytesSerializer; -import org.apache.kafka.common.serialization.DoubleDeserializer; -import org.apache.kafka.common.serialization.DoubleSerializer; -import org.apache.kafka.common.serialization.FloatDeserializer; -import org.apache.kafka.common.serialization.FloatSerializer; -import org.apache.kafka.common.serialization.IntegerDeserializer; -import org.apache.kafka.common.serialization.IntegerSerializer; -import org.apache.kafka.common.serialization.ListDeserializer; -import org.apache.kafka.common.serialization.ListSerializer; -import org.apache.kafka.common.serialization.LongDeserializer; -import org.apache.kafka.common.serialization.LongSerializer; import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.AppInfoParser.AppInfo; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; @@ -91,6 +68,7 @@ * {@link RuntimeHintsRegistrar} for Spring for Apache Kafka. * * @author Gary Russell + * @author Soby Chacko * @since 3.0 * */ @@ -139,34 +117,10 @@ public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) Stream.of( AppInfo.class, - // standard assignors - CooperativeStickyAssignor.class, - RangeAssignor.class, - RoundRobinAssignor.class, - StickyAssignor.class, // standard partitioners org.apache.kafka.clients.producer.internals.DefaultPartitioner.class, - RoundRobinPartitioner.class, org.apache.kafka.clients.producer.UniformStickyPartitioner.class, // standard serialization - ByteArrayDeserializer.class, - ByteArraySerializer.class, - ByteBufferDeserializer.class, - ByteBufferSerializer.class, - BytesDeserializer.class, - BytesSerializer.class, - DoubleSerializer.class, - DoubleDeserializer.class, - FloatSerializer.class, - FloatDeserializer.class, - IntegerSerializer.class, - IntegerDeserializer.class, - ListDeserializer.class, - ListSerializer.class, - LongSerializer.class, - LongDeserializer.class, - StringDeserializer.class, - StringSerializer.class, // Spring serialization DelegatingByTopicDeserializer.class, DelegatingByTypeSerializer.class, @@ -179,17 +133,6 @@ public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) StringOrBytesSerializer.class, ToStringSerializer.class, Serdes.class, - Serdes.ByteArraySerde.class, - Serdes.BytesSerde.class, - Serdes.ByteBufferSerde.class, - Serdes.DoubleSerde.class, - Serdes.FloatSerde.class, - Serdes.IntegerSerde.class, - Serdes.LongSerde.class, - Serdes.ShortSerde.class, - Serdes.StringSerde.class, - Serdes.UUIDSerde.class, - Serdes.VoidSerde.class, CRC32C.class) .forEach(type -> reflectionHints.registerType(type, builder -> builder.withMembers(MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS))); @@ -199,13 +142,10 @@ public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) Stream.of( "sun.security.provider.ConfigFile", - "org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor", - "org.apache.kafka.streams.errors.DefaultProductionExceptionHandler", - "org.apache.kafka.streams.processor.FailOnInvalidTimestamp", - "org.apache.kafka.streams.processor.internals.assignment.HighAvailabilityTaskAssignor", "org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor", "org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor", - "org.apache.kafka.streams.errors.LogAndFailExceptionHandler") + "org.apache.kafka.streams.state.BuiltInDslStoreSuppliers$RocksDBDslStoreSuppliers", + "org.apache.kafka.streams.state.BuiltInDslStoreSuppliers$InMemoryDslStoreSuppliers") .forEach(type -> reflectionHints.registerTypeIfPresent(classLoader, type, builder -> builder.withMembers(MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS))); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerContainerFactory.java b/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerContainerFactory.java index 95f10a0fda..1b9ba2bed5 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerContainerFactory.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerContainerFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2014-2023 the original author or authors. + * Copyright 2014-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerEndpoint.java b/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerEndpoint.java index cdc8ca944a..6cc3d0e780 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerEndpoint.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/AbstractKafkaListenerEndpoint.java @@ -1,5 +1,5 @@ /* - * Copyright 2014-2023 the original author or authors. + * Copyright 2014-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,6 +61,9 @@ * @author Stephane Nicoll * @author Gary Russell * @author Artem Bilan + * @author Wang Zhiyang + * @author Sanghyeok An + * @author Borahm Lee * * @see MethodKafkaListenerEndpoint */ @@ -176,7 +179,7 @@ public String getId() { * @param groupId the group id. * @since 1.3 */ - public void setGroupId(String groupId) { + public void setGroupId(@Nullable String groupId) { this.groupId = groupId; } @@ -332,7 +335,7 @@ protected boolean isAckDiscarded() { } /** - * Set to true if the {@link #setRecordFilterStrategy(RecordFilterStrategy)} is in use. + * Set to true if the {@link #setRecordFilterStrategy(RecordFilterStrategy)} should ack discarded messages. * @param ackDiscarded the ackDiscarded. */ public void setAckDiscarded(boolean ackDiscarded) { @@ -535,8 +538,8 @@ private void setupMessageListener(MessageListenerContainer container, if (this.recordFilterStrategy != null) { if (isBatchListener()) { if (((MessagingMessageListenerAdapter) messageListener).isConsumerRecords()) { - this.logger.warn(() -> "Filter strategy ignored when consuming 'ConsumerRecords' instead of a List" - + (this.id != null ? " id: " + this.id : "")); + this.logger.warn(() -> "Filter strategy is ignored when consuming 'ConsumerRecords' directly instead of a List of records." + + (this.id != null ? " listenerId: " + this.id : "")); } else { messageListener = new FilteringBatchMessageListenerAdapter<>( diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistrar.java b/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistrar.java index a0ac942b08..af3f6a6bbc 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistrar.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistrar.java @@ -1,5 +1,5 @@ /* - * Copyright 2014-2023 the original author or authors. + * Copyright 2014-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,8 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import org.springframework.beans.factory.BeanFactory; import org.springframework.beans.factory.BeanFactoryAware; @@ -39,6 +41,8 @@ * @author Artem Bilan * @author Gary Russell * @author Filip Halemba + * @author Wang Zhiyang + * @author Omer Celik * * @see org.springframework.kafka.annotation.KafkaListenerConfigurer */ @@ -48,6 +52,8 @@ public class KafkaListenerEndpointRegistrar implements BeanFactoryAware, Initial private List customMethodArgumentResolvers = new ArrayList<>(); + private final Lock endpointsLock = new ReentrantLock(); + private KafkaListenerEndpointRegistry endpointRegistry; private MessageHandlerMethodFactory messageHandlerMethodFactory; @@ -187,7 +193,8 @@ public void afterPropertiesSet() { } protected void registerAllEndpoints() { - synchronized (this.endpointDescriptors) { + try { + this.endpointsLock.lock(); for (KafkaListenerEndpointDescriptor descriptor : this.endpointDescriptors) { if (descriptor.endpoint instanceof MultiMethodKafkaListenerEndpoint mmkle && this.validator != null) { @@ -198,6 +205,9 @@ protected void registerAllEndpoints() { } this.startImmediately = true; // trigger immediate startup } + finally { + this.endpointsLock.unlock(); + } } private KafkaListenerContainerFactory resolveContainerFactory(KafkaListenerEndpointDescriptor descriptor) { @@ -233,7 +243,8 @@ public void registerEndpoint(KafkaListenerEndpoint endpoint, @Nullable KafkaList Assert.hasText(endpoint.getId(), "Endpoint id must be set"); // Factory may be null, we defer the resolution right before actually creating the container KafkaListenerEndpointDescriptor descriptor = new KafkaListenerEndpointDescriptor(endpoint, factory); - synchronized (this.endpointDescriptors) { + try { + this.endpointsLock.lock(); if (this.startImmediately) { // Register and start immediately this.endpointRegistry.registerListenerContainer(descriptor.endpoint, resolveContainerFactory(descriptor), true); @@ -242,6 +253,9 @@ public void registerEndpoint(KafkaListenerEndpoint endpoint, @Nullable KafkaList this.endpointDescriptors.add(descriptor); } } + finally { + this.endpointsLock.unlock(); + } } /** @@ -256,19 +270,16 @@ public void registerEndpoint(KafkaListenerEndpoint endpoint) { } - private static final class KafkaListenerEndpointDescriptor { - - private final KafkaListenerEndpoint endpoint; + private record KafkaListenerEndpointDescriptor(KafkaListenerEndpoint endpoint, + KafkaListenerContainerFactory containerFactory) { - private final KafkaListenerContainerFactory containerFactory; + private KafkaListenerEndpointDescriptor(KafkaListenerEndpoint endpoint, + @Nullable KafkaListenerContainerFactory containerFactory) { - private KafkaListenerEndpointDescriptor(KafkaListenerEndpoint endpoint, - @Nullable KafkaListenerContainerFactory containerFactory) { + this.endpoint = endpoint; + this.containerFactory = containerFactory; + } - this.endpoint = endpoint; - this.containerFactory = containerFactory; } - } - } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistry.java b/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistry.java index 10841688cc..46072b7277 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistry.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaListenerEndpointRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2014-2023 the original author or authors. + * Copyright 2014-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiPredicate; +import java.util.function.Predicate; import org.apache.commons.logging.LogFactory; @@ -44,6 +46,7 @@ import org.springframework.kafka.listener.ListenerContainerRegistry; import org.springframework.kafka.listener.MessageListenerContainer; import org.springframework.kafka.support.EndpointHandlerMethod; +import org.springframework.kafka.support.EndpointHandlerMultiMethod; import org.springframework.lang.Nullable; import org.springframework.util.Assert; import org.springframework.util.StringUtils; @@ -66,6 +69,8 @@ * @author Artem Bilan * @author Gary Russell * @author Asi Bross + * @author Wang Zhiyang + * @author Joo Hyuk Kim * * @see KafkaListenerEndpoint * @see MessageListenerContainer @@ -94,8 +99,8 @@ public class KafkaListenerEndpointRegistry implements ListenerContainerRegistry, @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { - if (applicationContext instanceof ConfigurableApplicationContext) { - this.applicationContext = (ConfigurableApplicationContext) applicationContext; + if (applicationContext instanceof ConfigurableApplicationContext cac) { + this.applicationContext = cac; } } @@ -114,6 +119,47 @@ public MessageListenerContainer getListenerContainer(String id) { return this.listenerContainers.get(id); } + /** + * Return all {@link MessageListenerContainer} instances with id matching the predicate or + * empty {@link Collection} if no such container exists. + * @param idMatcher the predicate to match container id with + * @return the containers or empty {@link Collection} if no container with that id exists + * @since 3.2 + * @see #getListenerContainerIds() + * @see #getListenerContainer(String) + */ + @Override + public Collection getListenerContainersMatching(Predicate idMatcher) { + Assert.notNull(idMatcher, "'idMatcher' cannot be null"); + return this.listenerContainers.entrySet() + .stream() + .filter(entry -> idMatcher.test(entry.getKey())) + .map(Map.Entry::getValue) + .toList(); + } + + /** + * Return all {@link MessageListenerContainer} instances that satisfy the given bi-predicate. + * The {@code BiPredicate} takes the container id and the container itself as arguments. + * This allows for more sophisticated filtering, including properties or state of the container itself. + * @param idAndContainerMatcher the bi-predicate to match the container id and the container + * @return the containers that match the bi-predicate criteria or an empty {@link Collection} if no matching containers exist + * @since 3.2 + * @see #getListenerContainerIds() + * @see #getListenerContainersMatching(Predicate) + */ + @Override + public Collection getListenerContainersMatching( + BiPredicate idAndContainerMatcher + ) { + Assert.notNull(idAndContainerMatcher, "'idAndContainerMatcher' cannot be null"); + return this.listenerContainers.entrySet() + .stream() + .filter(entry -> idAndContainerMatcher.test(entry.getKey(), entry.getValue())) + .map(Map.Entry::getValue) + .toList(); + } + @Override @Nullable public MessageListenerContainer getUnregisteredListenerContainer(String id) { @@ -122,7 +168,7 @@ public MessageListenerContainer getUnregisteredListenerContainer(String id) { refreshContextContainers(); return this.unregisteredContainers.get(id); } - return null; + return container; } /** @@ -170,8 +216,7 @@ public Collection getListenerContainers() { */ @Override public Collection getAllListenerContainers() { - List containers = new ArrayList<>(); - containers.addAll(getListenerContainers()); + List containers = new ArrayList<>(getListenerContainers()); refreshContextContainers(); containers.addAll(this.unregisteredContainers.values()); return containers; @@ -232,7 +277,7 @@ public void registerListenerContainer(KafkaListenerEndpoint endpoint, KafkaListe group = appContext.getBean(groupName + ".group", ContainerGroup.class); } else { - containerGroup = new ArrayList(); + containerGroup = new ArrayList<>(); appContext.getBeanFactory().registerSingleton(groupName, containerGroup); // NOSONAR - hasText group = new ContainerGroup(groupName); appContext.getBeanFactory().registerSingleton(groupName + ".group", group); @@ -274,11 +319,19 @@ public MessageListenerContainer unregisterListenerContainer(String id) { protected MessageListenerContainer createListenerContainer(KafkaListenerEndpoint endpoint, KafkaListenerContainerFactory factory) { - if (endpoint instanceof MethodKafkaListenerEndpoint) { - MethodKafkaListenerEndpoint mkle = (MethodKafkaListenerEndpoint) endpoint; + if (endpoint instanceof MultiMethodKafkaListenerEndpoint mmkle) { + Object bean = mmkle.getBean(); + if (bean instanceof EndpointHandlerMultiMethod ehmm) { + ehmm = new EndpointHandlerMultiMethod(ehmm.resolveBean(this.applicationContext), + ehmm.getDefaultMethod(), ehmm.getMethods()); + mmkle.setBean(ehmm.resolveBean(this.applicationContext)); + mmkle.setDefaultMethod(ehmm.getDefaultMethod()); + mmkle.setMethods(ehmm.getMethods()); + } + } + else if (endpoint instanceof MethodKafkaListenerEndpoint mkle) { Object bean = mkle.getBean(); - if (bean instanceof EndpointHandlerMethod) { - EndpointHandlerMethod ehm = (EndpointHandlerMethod) bean; + if (bean instanceof EndpointHandlerMethod ehm) { ehm = new EndpointHandlerMethod(ehm.resolveBean(this.applicationContext), ehm.getMethodName()); mkle.setBean(ehm.resolveBean(this.applicationContext)); mkle.setMethod(ehm.getMethod()); @@ -286,9 +339,9 @@ protected MessageListenerContainer createListenerContainer(KafkaListenerEndpoint } MessageListenerContainer listenerContainer = factory.createListenerContainer(endpoint); - if (listenerContainer instanceof InitializingBean) { + if (listenerContainer instanceof InitializingBean initializingBean) { try { - ((InitializingBean) listenerContainer).afterPropertiesSet(); + initializingBean.afterPropertiesSet(); } catch (Exception ex) { throw new BeanInitializationException("Failed to initialize message listener container", ex); @@ -308,7 +361,6 @@ protected MessageListenerContainer createListenerContainer(KafkaListenerEndpoint return listenerContainer; } - @Override public void destroy() { for (MessageListenerContainer listenerContainer : getListenerContainers()) { @@ -324,11 +376,6 @@ public int getPhase() { return this.phase; } - @Override - public boolean isAutoStartup() { - return true; - } - @Override public void start() { for (MessageListenerContainer listenerContainer : getListenerContainers()) { @@ -349,7 +396,7 @@ public void stop() { public void stop(Runnable callback) { this.running = false; Collection listenerContainersToStop = getListenerContainers(); - if (listenerContainersToStop.size() > 0) { + if (!listenerContainersToStop.isEmpty()) { AggregatingCallback aggregatingCallback = new AggregatingCallback(listenerContainersToStop.size(), callback); for (MessageListenerContainer listenerContainer : listenerContainersToStop) { @@ -371,7 +418,6 @@ public boolean isRunning() { return this.running; } - @Override public void onApplicationEvent(ContextRefreshedEvent event) { if (event.getApplicationContext().equals(this.applicationContext)) { @@ -391,7 +437,6 @@ private void startIfNecessary(MessageListenerContainer listenerContainer) { } } - private static final class AggregatingCallback implements Runnable { private final AtomicInteger count; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaStreamsCustomizer.java b/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaStreamsCustomizer.java index 8155883ab3..28b43c6452 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaStreamsCustomizer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/KafkaStreamsCustomizer.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,17 @@ package org.springframework.kafka.config; +import java.util.Properties; + +import org.apache.kafka.streams.KafkaClientSupplier; import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.Topology; /** * Callback interface that can be used to configure {@link KafkaStreams} directly. * * @author Nurettin Yilmaz + * @author Almog Gavra * * @since 2.1.5 * @@ -30,6 +35,32 @@ @FunctionalInterface public interface KafkaStreamsCustomizer { + /** + * Customize the instantiation of the {@code KafkaStreams} instance. This + * happens before the modifications made by {@link StreamsBuilderFactoryBean}. + * + * @param topology the full topology + * @param properties the configuration properties + * @param clientSupplier the client supplier + * + * @return a new instance of {@link KafkaStreams} + * + * @since 3.3.0 + */ + default KafkaStreams initKafkaStreams( + Topology topology, + Properties properties, + KafkaClientSupplier clientSupplier + ) { + return new KafkaStreams(topology, properties, clientSupplier); + } + + /** + * Customize the instance of {@code KafkaStreams} after {@link StreamsBuilderFactoryBean} + * has applied its default configurations. + * + * @param kafkaStreams the instantiated Kafka Streams instance + */ void customize(KafkaStreams kafkaStreams); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/MultiMethodKafkaListenerEndpoint.java b/spring-kafka/src/main/java/org/springframework/kafka/config/MultiMethodKafkaListenerEndpoint.java index 577a90eb49..9cb90d44b4 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/MultiMethodKafkaListenerEndpoint.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/MultiMethodKafkaListenerEndpoint.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,15 +35,16 @@ * @param the value type. * * @author Gary Russell + * @author Wang Zhiyang * * @see org.springframework.kafka.annotation.KafkaHandler * @see DelegatingInvocableHandler */ public class MultiMethodKafkaListenerEndpoint extends MethodKafkaListenerEndpoint { - private final List methods; + private List methods; - private final Method defaultMethod; + private Method defaultMethod; private Validator validator; @@ -60,6 +61,42 @@ public MultiMethodKafkaListenerEndpoint(List methods, @Nullable Method d setBean(bean); } + /** + * Get a method list. + * @return the method list. + * @since 3.2 + */ + public List getMethods() { + return this.methods; + } + + /** + * Set a method list. + * @param methods the methods. + * @since 3.2 + */ + public void setMethods(List methods) { + this.methods = methods; + } + + /** + * Get a default method. + * @return the default method. + * @since 3.2 + */ + public Method getDefaultMethod() { + return this.defaultMethod; + } + + /** + * Set a default method. + * @param defaultMethod the default method. + * @since 3.2 + */ + public void setDefaultMethod(Method defaultMethod) { + this.defaultMethod = defaultMethod; + } + /** * Set a payload validator. * @param validator the validator. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/StreamsBuilderFactoryBean.java b/spring-kafka/src/main/java/org/springframework/kafka/config/StreamsBuilderFactoryBean.java index 8af071268a..19ce9e0abf 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/StreamsBuilderFactoryBean.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/StreamsBuilderFactoryBean.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,12 +27,15 @@ import org.apache.kafka.streams.KafkaClientSupplier; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.Topology; +import org.apache.kafka.streams.TopologyConfig; import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler; import org.apache.kafka.streams.processor.StateRestoreListener; import org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier; import org.springframework.beans.factory.BeanNameAware; +import org.springframework.beans.factory.SmartInitializingSingleton; import org.springframework.beans.factory.config.AbstractFactoryBean; import org.springframework.context.SmartLifecycle; import org.springframework.core.log.LogAccessor; @@ -55,11 +58,15 @@ * @author Nurettin Yilmaz * @author Denis Washington * @author Gary Russell + * @author Julien Wittouck + * @author Sanghyeok An + * @author Cédric Schaller + * @author Almog Gavra * * @since 1.1.4 */ public class StreamsBuilderFactoryBean extends AbstractFactoryBean - implements SmartLifecycle, BeanNameAware { + implements SmartLifecycle, BeanNameAware, SmartInitializingSingleton { /** * The default {@link Duration} of {@code 10 seconds} for close timeout. @@ -86,7 +93,7 @@ public class StreamsBuilderFactoryBean extends AbstractFactoryBean { }; private KafkaStreams.StateListener stateListener; @@ -100,6 +107,8 @@ public class StreamsBuilderFactoryBean extends AbstractFactoryBean topol.describe().toString()); - this.kafkaStreams = new KafkaStreams(topol, this.properties, this.clientSupplier); + this.kafkaStreams = this.kafkaStreamsCustomizer.initKafkaStreams( + this.topology, this.properties, this.clientSupplier + ); this.kafkaStreams.setStateListener(this.stateListener); this.kafkaStreams.setGlobalStateRestoreListener(this.stateRestoreListener); if (this.streamsUncaughtExceptionHandler != null) { this.kafkaStreams.setUncaughtExceptionHandler(this.streamsUncaughtExceptionHandler); } - if (this.kafkaStreamsCustomizer != null) { - this.kafkaStreamsCustomizer.customize(this.kafkaStreams); - } + this.kafkaStreamsCustomizer.customize(this.kafkaStreams); if (this.cleanupConfig.cleanupOnStart()) { this.kafkaStreams.cleanUp(); } @@ -383,7 +397,10 @@ public void stop() { if (this.running) { try { if (this.kafkaStreams != null) { - this.kafkaStreams.close(this.closeTimeout); + this.kafkaStreams.close(new KafkaStreams.CloseOptions() + .timeout(this.closeTimeout) + .leaveGroup(this.leaveGroupOnClose) + ); if (this.cleanupConfig.cleanupOnStop()) { this.kafkaStreams.cleanUp(); } @@ -417,6 +434,29 @@ public boolean isRunning() { } } + @Override + public void afterSingletonsInstantiated() { + try { + this.topology = getObject().build(this.properties); + this.infrastructureCustomizer.configureTopology(this.topology); + LOGGER.debug(() -> this.topology.describe().toString()); + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + private StreamsBuilder createStreamBuilder() { + if (this.properties == null) { + return new StreamsBuilder(); + } + else { + StreamsConfig streamsConfig = new StreamsConfig(this.properties); + TopologyConfig topologyConfig = new TopologyConfig(streamsConfig); + return new StreamsBuilder(topologyConfig); + } + } + /** * Called whenever a {@link KafkaStreams} is added or removed. * diff --git a/spring-kafka/src/main/java/org/springframework/kafka/config/TopicBuilder.java b/spring-kafka/src/main/java/org/springframework/kafka/config/TopicBuilder.java index af6c967d22..a979d01cfc 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/config/TopicBuilder.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/config/TopicBuilder.java @@ -30,6 +30,7 @@ * {@link Optional#empty()} indicating the broker defaults will be applied. * * @author Gary Russell + * @author Sanghyeok An * @since 2.3 * */ @@ -132,7 +133,7 @@ public NewTopic build() { NewTopic topic = this.replicasAssignments == null ? new NewTopic(this.name, this.partitions, this.replicas) : new NewTopic(this.name, this.replicasAssignments); - if (this.configs.size() > 0) { + if (!this.configs.isEmpty()) { topic.configs(this.configs); } return topic; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaConsumerFactory.java b/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaConsumerFactory.java index 873e0fdb91..dd95b3ef63 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaConsumerFactory.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaConsumerFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -36,7 +37,11 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.serialization.Deserializer; +import org.springframework.beans.BeansException; import org.springframework.beans.factory.BeanNameAware; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationContextAware; +import org.springframework.core.env.EnvironmentCapable; import org.springframework.core.log.LogAccessor; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -66,9 +71,13 @@ * @author Murali Reddy * @author Artem Bilan * @author Chris Gilbert + * @author Adrian Gygax + * @author Yaniv Nahoum + * @author Sanghyeok An + * @author Borahm Lee */ public class DefaultKafkaConsumerFactory extends KafkaResourceFactory - implements ConsumerFactory, BeanNameAware { + implements ConsumerFactory, BeanNameAware, ApplicationContextAware { private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(DefaultKafkaConsumerFactory.class)); @@ -86,6 +95,8 @@ public class DefaultKafkaConsumerFactory extends KafkaResourceFactory private boolean configureDeserializers = true; + private ApplicationContext applicationContext; + /** * Construct a factory with the provided configuration. * @param configs the configuration. @@ -159,42 +170,8 @@ public DefaultKafkaConsumerFactory(Map configs, this.configs = new ConcurrentHashMap<>(configs); this.configureDeserializers = configureDeserializers; - this.keyDeserializerSupplier = keyDeserializerSupplier(keyDeserializerSupplier); - this.valueDeserializerSupplier = valueDeserializerSupplier(valueDeserializerSupplier); - } - - private Supplier> keyDeserializerSupplier( - @Nullable Supplier> keyDeserializerSupplier) { - - if (!this.configureDeserializers) { - return keyDeserializerSupplier; - } - return keyDeserializerSupplier == null - ? () -> null - : () -> { - Deserializer deserializer = keyDeserializerSupplier.get(); - if (deserializer != null) { - deserializer.configure(this.configs, true); - } - return deserializer; - }; - } - - private Supplier> valueDeserializerSupplier( - @Nullable Supplier> valueDeserializerSupplier) { - - if (!this.configureDeserializers) { - return valueDeserializerSupplier; - } - return valueDeserializerSupplier == null - ? () -> null - : () -> { - Deserializer deserializer = valueDeserializerSupplier.get(); - if (deserializer != null) { - deserializer.configure(this.configs, false); - } - return deserializer; - }; + this.keyDeserializerSupplier = keyDeserializerSupplier; + this.valueDeserializerSupplier = valueDeserializerSupplier; } @Override @@ -209,7 +186,7 @@ public void setBeanName(String name) { * @param keyDeserializer the deserializer. */ public void setKeyDeserializer(@Nullable Deserializer keyDeserializer) { - this.keyDeserializerSupplier = keyDeserializerSupplier(() -> keyDeserializer); + this.keyDeserializerSupplier = () -> keyDeserializer; } /** @@ -219,7 +196,7 @@ public void setKeyDeserializer(@Nullable Deserializer keyDeserializer) { * @param valueDeserializer the value deserializer. */ public void setValueDeserializer(@Nullable Deserializer valueDeserializer) { - this.valueDeserializerSupplier = valueDeserializerSupplier(() -> valueDeserializer); + this.valueDeserializerSupplier = () -> valueDeserializer; } /** @@ -230,7 +207,7 @@ public void setValueDeserializer(@Nullable Deserializer valueDeserializer) { * @since 2.8 */ public void setKeyDeserializerSupplier(Supplier> keyDeserializerSupplier) { - this.keyDeserializerSupplier = keyDeserializerSupplier(keyDeserializerSupplier); + this.keyDeserializerSupplier = keyDeserializerSupplier; } /** @@ -241,13 +218,12 @@ public void setKeyDeserializerSupplier(Supplier> keyDeserializer * @since 2.8 */ public void setValueDeserializerSupplier(Supplier> valueDeserializerSupplier) { - this.valueDeserializerSupplier = valueDeserializerSupplier(valueDeserializerSupplier); + this.valueDeserializerSupplier = valueDeserializerSupplier; } - /** * Set to false (default true) to prevent programmatically provided deserializers (via - * constructor or setters) from being configured using the producer configuration, + * constructor or setters) from being configured using the consumer configuration, * e.g. if the deserializers are already fully configured. * @param configureDeserializers false to not configure. * @since 2.8.7 @@ -371,10 +347,26 @@ protected Consumer createKafkaConsumer(@Nullable String groupId, @Nullable if (clientIdSuffix == null) { clientIdSuffix = ""; } + + final boolean hasGroupIdOrClientIdInProperties = properties != null + && (properties.containsKey(ConsumerConfig.CLIENT_ID_CONFIG) || properties.containsKey(ConsumerConfig.GROUP_ID_CONFIG)); + final boolean hasGroupIdOrClientIdInConfig = this.configs.containsKey(ConsumerConfig.CLIENT_ID_CONFIG) + || this.configs.containsKey(ConsumerConfig.GROUP_ID_CONFIG); + if (!overrideClientIdPrefix && groupId == null && !hasGroupIdOrClientIdInProperties && !hasGroupIdOrClientIdInConfig) { + final String applicationName = Optional.ofNullable(this.applicationContext) + .map(EnvironmentCapable::getEnvironment) + .map(environment -> environment.getProperty("spring.application.name")) + .orElse(null); + if (applicationName != null) { + clientIdPrefix = applicationName + "-consumer"; + overrideClientIdPrefix = true; + } + } + boolean shouldModifyClientId = (this.configs.containsKey(ConsumerConfig.CLIENT_ID_CONFIG) && StringUtils.hasText(clientIdSuffix)) || overrideClientIdPrefix; if (groupId == null - && (properties == null || properties.stringPropertyNames().size() == 0) + && (properties == null || properties.stringPropertyNames().isEmpty()) && !shouldModifyClientId) { return createKafkaConsumer(new HashMap<>(this.configs)); } @@ -469,14 +461,41 @@ public boolean isAutoCommit() { : !(auto instanceof String) || Boolean.parseBoolean((String) auto); } + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + this.applicationContext = applicationContext; + } + + @Nullable + private Deserializer keyDeserializer(Map configs) { + Deserializer deserializer = + this.keyDeserializerSupplier != null + ? this.keyDeserializerSupplier.get() + : null; + if (deserializer != null && this.configureDeserializers) { + deserializer.configure(configs, true); + } + return deserializer; + } + + @Nullable + private Deserializer valueDeserializer(Map configs) { + Deserializer deserializer = + this.valueDeserializerSupplier != null + ? this.valueDeserializerSupplier.get() + : null; + if (deserializer != null && this.configureDeserializers) { + deserializer.configure(configs, false); + } + return deserializer; + } + protected class ExtendedKafkaConsumer extends KafkaConsumer { private String idForListeners; protected ExtendedKafkaConsumer(Map configProps) { - super(configProps, - DefaultKafkaConsumerFactory.this.keyDeserializerSupplier.get(), - DefaultKafkaConsumerFactory.this.valueDeserializerSupplier.get()); + super(configProps, keyDeserializer(configProps), valueDeserializer(configProps)); if (!DefaultKafkaConsumerFactory.this.listeners.isEmpty()) { Iterator metricIterator = metrics().keySet().iterator(); @@ -491,10 +510,19 @@ protected ExtendedKafkaConsumer(Map configProps) { } } + @Override + public void close() { + super.close(); + notifyConsumerRemoved(); + } + @Override public void close(Duration timeout) { super.close(timeout); + notifyConsumerRemoved(); + } + private void notifyConsumerRemoved() { for (Listener listener : DefaultKafkaConsumerFactory.this.listeners) { listener.consumerRemoved(this.idForListeners, this); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaProducerFactory.java b/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaProducerFactory.java index 86759b2bc8..17a91bc916 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaProducerFactory.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/DefaultKafkaProducerFactory.java @@ -23,6 +23,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Future; @@ -46,6 +47,7 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.OutOfOrderSequenceException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.TimeoutException; @@ -59,6 +61,7 @@ import org.springframework.context.ApplicationListener; import org.springframework.context.SmartLifecycle; import org.springframework.context.event.ContextStoppedEvent; +import org.springframework.core.env.EnvironmentCapable; import org.springframework.core.log.LogAccessor; import org.springframework.kafka.KafkaException; import org.springframework.lang.Nullable; @@ -110,6 +113,8 @@ * @author Artem Bilan * @author Chris Gilbert * @author Thomas Strauß + * @author Adrian Gygax + * @author Soby Chacko */ public class DefaultKafkaProducerFactory extends KafkaResourceFactory implements ProducerFactory, ApplicationContextAware, @@ -560,7 +565,6 @@ public ProducerFactory copyWithConfigurationOverride(Map o return newFactory; } - /** * Ensures that the returned properties map contains a transaction id prefix. * The {@link org.springframework.kafka.core.DefaultKafkaProducerFactory} @@ -981,9 +985,22 @@ public void closeThreadBoundProducer() { protected Map getProducerConfigs() { final Map newProducerConfigs = new HashMap<>(this.configs); checkBootstrap(newProducerConfigs); + + final String prefix; if (this.clientIdPrefix != null) { + prefix = this.clientIdPrefix; + } + else { + prefix = Optional.ofNullable(this.applicationContext) + .map(EnvironmentCapable::getEnvironment) + .map(environment -> environment.getProperty("spring.application.name")) + .map(applicationName -> applicationName + "-producer") + .orElse(null); + } + + if (prefix != null) { newProducerConfigs.put(ProducerConfig.CLIENT_ID_CONFIG, - this.clientIdPrefix + "-" + this.clientIdCounter.incrementAndGet()); + prefix + "-" + this.clientIdCounter.incrementAndGet()); } return newProducerConfigs; } @@ -1112,6 +1129,11 @@ public List partitionsFor(String topic) { return this.delegate.metrics(); } + @Override + public Uuid clientInstanceId(Duration timeout) { + return this.delegate.clientInstanceId(timeout); + } + @Override public void initTransactions() { this.delegate.initTransactions(); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaAdmin.java b/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaAdmin.java index 0ffe28735d..2c661fef87 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaAdmin.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaAdmin.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,11 +32,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.Predicate; import java.util.stream.Collectors; import org.apache.commons.logging.LogFactory; +import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.AlterConfigOp.OpType; import org.apache.kafka.clients.admin.AlterConfigsResult; @@ -59,6 +63,7 @@ import org.springframework.beans.factory.SmartInitializingSingleton; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; +import org.springframework.core.env.EnvironmentCapable; import org.springframework.core.log.LogAccessor; import org.springframework.kafka.KafkaException; import org.springframework.kafka.support.TopicForRetryable; @@ -66,11 +71,16 @@ import org.springframework.util.Assert; /** - * An admin that delegates to an {@link AdminClient} to create topics defined + * An admin that delegates to an {@link Admin} to create topics defined * in the application context. * * @author Gary Russell * @author Artem Bilan + * @author Adrian Gygax + * @author Sanghyeok An + * @author Valentina Armenise + * @author Anders Swanson + * @author Omer Celik * * @since 1.3 */ @@ -86,6 +96,10 @@ public class KafkaAdmin extends KafkaResourceFactory private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(KafkaAdmin.class)); + private static final AtomicInteger CLIENT_ID_COUNTER = new AtomicInteger(); + + private final Lock clusterIdLock = new ReentrantLock(); + private final Map configs; private ApplicationContext applicationContext; @@ -107,9 +121,9 @@ public class KafkaAdmin extends KafkaResourceFactory private String clusterId; /** - * Create an instance with an {@link AdminClient} based on the supplied + * Create an instance with an {@link Admin} based on the supplied * configuration. - * @param config the configuration for the {@link AdminClient}. + * @param config the configuration for the {@link Admin}. */ public KafkaAdmin(Map config) { this.configs = new HashMap<>(config); @@ -208,6 +222,15 @@ public void setClusterId(String clusterId) { this.clusterId = clusterId; } + /** + * Get the clusterId property. + * @return the cluster id. + * @since 3.1.8 + */ + public String getClusterId() { + return this.clusterId; + } + @Override public Map getConfigurationProperties() { Map configs2 = new HashMap<>(this.configs); @@ -234,8 +257,8 @@ public void afterSingletonsInstantiated() { */ public final boolean initialize() { Collection newTopics = newTopics(); - if (newTopics.size() > 0) { - AdminClient adminClient = null; + if (!newTopics.isEmpty()) { + Admin adminClient = null; try { adminClient = createAdmin(); } @@ -249,12 +272,7 @@ public final boolean initialize() { } if (adminClient != null) { try { - synchronized (this) { - if (this.clusterId != null) { - this.clusterId = adminClient.describeCluster().clusterId().get(this.operationTimeout, - TimeUnit.SECONDS); - } - } + updateClusterId(adminClient); addOrModifyTopicsIfNeeded(adminClient, newTopics); return true; } @@ -279,6 +297,19 @@ public final boolean initialize() { return false; } + private void updateClusterId(Admin adminClient) throws InterruptedException, ExecutionException, TimeoutException { + try { + this.clusterIdLock.lock(); + if (this.clusterId != null) { + this.clusterId = adminClient.describeCluster().clusterId().get(this.operationTimeout, + TimeUnit.SECONDS); + } + } + finally { + this.clusterIdLock.unlock(); + } + } + /** * Return a collection of {@link NewTopic}s to create or modify. The default * implementation retrieves all {@link NewTopic} beans in the application context and @@ -331,7 +362,7 @@ protected Collection newTopics() { @Nullable public String clusterId() { if (this.clusterId == null) { - try (AdminClient client = createAdmin()) { + try (Admin client = createAdmin()) { this.clusterId = client.describeCluster().clusterId().get(this.operationTimeout, TimeUnit.SECONDS); if (this.clusterId == null) { this.clusterId = "null"; @@ -349,14 +380,14 @@ public String clusterId() { @Override public void createOrModifyTopics(NewTopic... topics) { - try (AdminClient client = createAdmin()) { + try (Admin client = createAdmin()) { addOrModifyTopicsIfNeeded(client, Arrays.asList(topics)); } } @Override public Map describeTopics(String... topicNames) { - try (AdminClient admin = createAdmin()) { + try (Admin admin = createAdmin()) { Map results = new HashMap<>(); DescribeTopicsResult topics = admin.describeTopics(Arrays.asList(topicNames)); try { @@ -373,14 +404,34 @@ public Map describeTopics(String... topicNames) { } } - AdminClient createAdmin() { - Map configs2 = new HashMap<>(this.configs); + /** + * Creates a new {@link Admin} client instance using the {@link AdminClient} class. + * @return the new {@link Admin} client instance. + * @since 3.3.0 + * @see AdminClient#create(Map) + */ + protected Admin createAdmin() { + return AdminClient.create(getAdminConfig()); + } + + protected Map getAdminConfig() { + final Map configs2 = new HashMap<>(this.configs); checkBootstrap(configs2); - return AdminClient.create(configs2); + + if (!configs2.containsKey(AdminClientConfig.CLIENT_ID_CONFIG)) { + Optional.ofNullable(this.applicationContext) + .map(EnvironmentCapable::getEnvironment) + .map(environment -> environment.getProperty("spring.application.name")) + .ifPresent(applicationName -> configs2.put( + AdminClientConfig.CLIENT_ID_CONFIG, + applicationName + "-admin-" + CLIENT_ID_COUNTER.getAndIncrement()) + ); + } + return configs2; } - private void addOrModifyTopicsIfNeeded(AdminClient adminClient, Collection topics) { - if (topics.size() > 0) { + private void addOrModifyTopicsIfNeeded(Admin adminClient, Collection topics) { + if (!topics.isEmpty()) { Map topicNameToTopic = new HashMap<>(); topics.forEach(t -> topicNameToTopic.compute(t.name(), (k, v) -> t)); DescribeTopicsResult topicInfo = adminClient @@ -390,10 +441,10 @@ private void addOrModifyTopicsIfNeeded(AdminClient adminClient, Collection topicsToAdd = new ArrayList<>(); Map topicsWithPartitionMismatches = checkPartitions(topicNameToTopic, topicInfo, topicsToAdd); - if (topicsToAdd.size() > 0) { + if (!topicsToAdd.isEmpty()) { addTopics(adminClient, topicsToAdd); } - if (topicsWithPartitionMismatches.size() > 0) { + if (!topicsWithPartitionMismatches.isEmpty()) { createMissingPartitions(adminClient, topicsWithPartitionMismatches); } if (this.modifyTopicConfigs) { @@ -409,7 +460,7 @@ private void addOrModifyTopicsIfNeeded(AdminClient adminClient, Collection> checkTopicsForConfigMismatches( - AdminClient adminClient, Collection topics) { + Admin adminClient, Collection topics) { List configResources = topics.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic.name())) @@ -438,7 +489,7 @@ private Map> checkTopicsForConfigMismatches( configMismatchesEntries.add(actualConfigParameter); } } - if (configMismatchesEntries.size() > 0) { + if (!configMismatchesEntries.isEmpty()) { configMismatches.put(topicConfig.getKey(), configMismatchesEntries); } } @@ -454,7 +505,7 @@ private Map> checkTopicsForConfigMismatches( } } - private void adjustConfigMismatches(AdminClient adminClient, Collection topics, + private void adjustConfigMismatches(Admin adminClient, Collection topics, Map> mismatchingConfigs) { for (Map.Entry> mismatchingConfigsOfTopic : mismatchingConfigs.entrySet()) { ConfigResource topicConfigResource = mismatchingConfigsOfTopic.getKey(); @@ -472,7 +523,7 @@ private void adjustConfigMismatches(AdminClient adminClient, Collection 0) { + if (!alterConfigOperations.isEmpty()) { try { AlterConfigsResult alterConfigsResult = adminClient .incrementalAlterConfigs(Map.of(topicConfigResource, alterConfigOperations)); @@ -526,7 +577,7 @@ else if (topic.numPartitions() > topicDescription.partitions().size()) { return topicsToModify; } - private void addTopics(AdminClient adminClient, List topicsToAdd) { + private void addTopics(Admin adminClient, List topicsToAdd) { CreateTopicsResult topicResults = adminClient.createTopics(topicsToAdd); try { topicResults.all().get(this.operationTimeout, TimeUnit.SECONDS); @@ -549,7 +600,7 @@ private void addTopics(AdminClient adminClient, List topicsToAdd) { } } - private void createMissingPartitions(AdminClient adminClient, Map topicsToModify) { + private void createMissingPartitions(Admin adminClient, Map topicsToModify) { CreatePartitionsResult partitionsResult = adminClient.createPartitions(topicsToModify); try { partitionsResult.all().get(this.operationTimeout, TimeUnit.SECONDS); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaMetricsSupport.java b/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaMetricsSupport.java new file mode 100644 index 0000000000..ea01cee167 --- /dev/null +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaMetricsSupport.java @@ -0,0 +1,213 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.core; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import io.micrometer.core.instrument.ImmutableTag; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Tag; +import io.micrometer.core.instrument.binder.MeterBinder; +import io.micrometer.core.instrument.binder.kafka.KafkaClientMetrics; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.producer.Producer; + +import org.springframework.lang.Nullable; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.util.Assert; +import org.springframework.util.ReflectionUtils; + +/** + * An abstract class to manage {@link KafkaClientMetrics}. + * + * @param the Kafka Client type. + * + * @author Artem Bilan + * + * @since 3.3 + * + * @see KafkaClientMetrics + */ +public abstract class KafkaMetricsSupport { + + protected final MeterRegistry meterRegistry; + + protected final List tags; + + @Nullable + protected final ScheduledExecutorService scheduler; + + private final Map metrics = new HashMap<>(); + + /** + * Construct an instance with the provided registry. + * @param meterRegistry the registry. + */ + protected KafkaMetricsSupport(MeterRegistry meterRegistry) { + this(meterRegistry, Collections.emptyList()); + } + + /** + * Construct an instance with the provided {@link MeterRegistry} and {@link TaskScheduler}. + * @param meterRegistry the registry. + * @param taskScheduler the task scheduler. + */ + protected KafkaMetricsSupport(MeterRegistry meterRegistry, TaskScheduler taskScheduler) { + this(meterRegistry, Collections.emptyList(), taskScheduler); + } + + /** + * Construct an instance with the provided {@link MeterRegistry} and tags. + * @param meterRegistry the registry. + * @param tags the tags. + */ + protected KafkaMetricsSupport(MeterRegistry meterRegistry, List tags) { + Assert.notNull(meterRegistry, "The 'meterRegistry' cannot be null"); + this.meterRegistry = meterRegistry; + this.tags = tags; + this.scheduler = null; + } + + /** + * Construct an instance with the provided {@link MeterRegistry}, tags and {@link TaskScheduler}. + * @param meterRegistry the registry. + * @param tags the tags. + * @param taskScheduler the task scheduler. + */ + protected KafkaMetricsSupport(MeterRegistry meterRegistry, List tags, TaskScheduler taskScheduler) { + Assert.notNull(meterRegistry, "The 'meterRegistry' cannot be null"); + Assert.notNull(taskScheduler, "The 'taskScheduler' cannot be null"); + this.meterRegistry = meterRegistry; + this.tags = tags; + this.scheduler = obtainScheduledExecutorService(taskScheduler); + } + + /** + * Bind metrics for the Apache Kafka client with provided id. + * @param id the unique identifier for the client to manage in store. + * @param client the Kafka client instance to bind. + */ + protected final void bindClient(String id, C client) { + if (!this.metrics.containsKey(id)) { + List clientTags = new ArrayList<>(this.tags); + clientTags.add(new ImmutableTag("spring.id", id)); + this.metrics.put(id, createClientMetrics(client, clientTags)); + this.metrics.get(id).bindTo(this.meterRegistry); + } + } + + /** + * Create a {@code io.micrometer.core.instrument.binder.kafka.KafkaMetrics} instance + * for the provided Kafka client and metric tags. + * By default, this factory is aware of {@link Consumer}, {@link Producer} and {@link AdminClient} types. + * For other use-case this method can be overridden. + * @param client the client to create a {@code io.micrometer.core.instrument.binder.kafka.KafkaMetrics} instance for. + * @param tags the tags for the {@code io.micrometer.core.instrument.binder.kafka.KafkaMetrics}. + * @return the {@code io.micrometer.core.instrument.binder.kafka.KafkaMetrics}. + */ + protected MeterBinder createClientMetrics(C client, List tags) { + if (client instanceof Consumer consumer) { + return createConsumerMetrics(consumer, tags); + } + else if (client instanceof Producer producer) { + return createProducerMetrics(producer, tags); + } + else if (client instanceof AdminClient admin) { + return createAdminMetrics(admin, tags); + } + + throw new IllegalArgumentException("Unsupported client type: " + client.getClass()); + } + + private KafkaClientMetrics createConsumerMetrics(Consumer consumer, List tags) { + return this.scheduler != null + ? new KafkaClientMetrics(consumer, tags, this.scheduler) + : new KafkaClientMetrics(consumer, tags); + } + + private KafkaClientMetrics createProducerMetrics(Producer producer, List tags) { + return this.scheduler != null + ? new KafkaClientMetrics(producer, tags, this.scheduler) + : new KafkaClientMetrics(producer, tags); + } + + private KafkaClientMetrics createAdminMetrics(AdminClient adminClient, List tags) { + return this.scheduler != null + ? new KafkaClientMetrics(adminClient, tags, this.scheduler) + : new KafkaClientMetrics(adminClient, tags); + } + + /** + * Unbind a {@code io.micrometer.core.instrument.binder.kafka.KafkaMetrics} for the provided Kafka client. + * @param id the unique identifier for the client to manage in store. + * @param client the Kafka client instance to unbind. + */ + protected final void unbindClient(String id, C client) { + AutoCloseable removed = (AutoCloseable) this.metrics.remove(id); + if (removed != null) { + try { + removed.close(); + } + catch (Exception ex) { + ReflectionUtils.rethrowRuntimeException(ex); + } + } + } + + private static ScheduledExecutorService obtainScheduledExecutorService(TaskScheduler taskScheduler) { + if (taskScheduler instanceof ThreadPoolTaskScheduler threadPoolTaskScheduler) { + return threadPoolTaskScheduler.getScheduledExecutor(); + } + + return new ScheduledExecutorServiceAdapter(taskScheduler); + } + + private static final class ScheduledExecutorServiceAdapter extends ScheduledThreadPoolExecutor { + + private final TaskScheduler delegate; + + private ScheduledExecutorServiceAdapter(TaskScheduler delegate) { + super(0); + this.delegate = delegate; + } + + @Override + public ScheduledFuture scheduleAtFixedRate(Runnable command, + long initialDelay, + long period, + TimeUnit unit) { + + return this.delegate.scheduleAtFixedRate(command, + Instant.now().plus(initialDelay, unit.toChronoUnit()), + Duration.of(period, unit.toChronoUnit())); + } + + } + +} diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaTemplate.java b/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaTemplate.java index 9fe9e75574..14caa930db 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaTemplate.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/KafkaTemplate.java @@ -33,6 +33,8 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; +import io.micrometer.observation.Observation; +import io.micrometer.observation.ObservationRegistry; import org.apache.commons.logging.LogFactory; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.consumer.Consumer; @@ -80,9 +82,7 @@ import org.springframework.transaction.support.TransactionSynchronizationManager; import org.springframework.util.Assert; import org.springframework.util.CollectionUtils; - -import io.micrometer.observation.Observation; -import io.micrometer.observation.ObservationRegistry; +import org.springframework.util.StringUtils; /** * A template for executing high-level operations. When used with a @@ -102,6 +102,8 @@ * @author Thomas Strauß * @author Soby Chacko * @author Gurps Bassi + * @author Valentina Armenise + * @author Christian Fredriksson */ public class KafkaTemplate implements KafkaOperations, ApplicationContextAware, BeanNameAware, ApplicationListener, DisposableBean, SmartInitializingSingleton { @@ -308,7 +310,6 @@ public void setMessagingConverter(SmartMessageConverter messageConverter) { ((MessagingMessageConverter) this.messageConverter).setMessagingConverter(messageConverter); } - @Override public boolean isTransactional() { return this.transactional; @@ -456,6 +457,16 @@ public void setObservationConvention(KafkaTemplateObservationConvention observat this.observationConvention = observationConvention; } + /** + * Configure the {@link ObservationRegistry} to use for recording observations. + * @param observationRegistry the observation registry to use. + * @since 3.3.1 + */ + public void setObservationRegistry(ObservationRegistry observationRegistry) { + Assert.notNull(observationRegistry, "'observationRegistry' must not be null"); + this.observationRegistry = observationRegistry; + } + /** * Return the {@link KafkaAdmin}, used to find the cluster id for observation, if * present. @@ -479,20 +490,27 @@ public void setKafkaAdmin(KafkaAdmin kafkaAdmin) { @Override public void afterSingletonsInstantiated() { if (this.observationEnabled && this.applicationContext != null) { - this.observationRegistry = this.applicationContext.getBeanProvider(ObservationRegistry.class) - .getIfUnique(() -> this.observationRegistry); + if (this.observationRegistry.isNoop()) { + this.observationRegistry = this.applicationContext.getBeanProvider(ObservationRegistry.class) + .getIfUnique(() -> this.observationRegistry); + } if (this.kafkaAdmin == null) { this.kafkaAdmin = this.applicationContext.getBeanProvider(KafkaAdmin.class).getIfUnique(); if (this.kafkaAdmin != null) { - Object producerServers = this.producerFactory.getConfigurationProperties() - .get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG); - String adminServers = this.kafkaAdmin.getBootstrapServers(); + String producerServers = this.producerFactory.getConfigurationProperties() + .get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG).toString(); + producerServers = removeLeadingAndTrailingBrackets(producerServers); + String adminServers = getAdminBootstrapAddress(); if (!producerServers.equals(adminServers)) { Map props = new HashMap<>(this.kafkaAdmin.getConfigurationProperties()); props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, producerServers); int opTo = this.kafkaAdmin.getOperationTimeout(); + String clusterId = this.kafkaAdmin.getClusterId(); this.kafkaAdmin = new KafkaAdmin(props); this.kafkaAdmin.setOperationTimeout(opTo); + if (clusterId != null && !clusterId.isEmpty()) { + this.kafkaAdmin.setClusterId(clusterId); + } } } } @@ -502,6 +520,19 @@ else if (this.micrometerEnabled) { } } + private String getAdminBootstrapAddress() { + // Retrieve bootstrap servers from KafkaAdmin bootstrap supplier if available + String adminServers = this.kafkaAdmin.getBootstrapServers(); + // Fallback to configuration properties if bootstrap servers are not set + if (adminServers == null) { + adminServers = this.kafkaAdmin.getConfigurationProperties().getOrDefault( + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, + "" + ).toString(); + } + return removeLeadingAndTrailingBrackets(adminServers); + } + @Nullable private String clusterId() { if (this.kafkaAdmin != null && this.clusterId == null) { @@ -590,7 +621,6 @@ public CompletableFuture> send(Message message) { return observeSend((ProducerRecord) producerRecord); } - @Override public List partitionsFor(String topic) { Producer producer = getTheProducer(); @@ -763,6 +793,7 @@ protected void closeProducer(Producer producer, boolean inTx) { } } + @SuppressWarnings("try") private CompletableFuture> observeSend(final ProducerRecord producerRecord) { Observation observation = KafkaTemplateObservation.TEMPLATE_OBSERVATION.observation( this.observationConvention, DefaultKafkaTemplateObservationConvention.INSTANCE, @@ -770,7 +801,9 @@ private CompletableFuture> observeSend(final ProducerRecord interceptorProducerRecord(ProducerRecord prod return producerRecord; } + @SuppressWarnings("try") private Callback buildCallback(final ProducerRecord producerRecord, final Producer producer, final CompletableFuture> future, @Nullable Object sample, Observation observation) { @@ -839,36 +873,32 @@ private Callback buildCallback(final ProducerRecord producerRecord, final } } catch (Exception e) { - KafkaTemplate.this.logger.warn(e, () -> "Error executing interceptor onAcknowledgement callback"); + this.logger.warn(e, () -> "Error executing interceptor onAcknowledgement callback"); } - try { + try (Observation.Scope ignored = observation.openScope()) { if (exception == null) { successTimer(sample, producerRecord); - observation.stop(); future.complete(new SendResult<>(producerRecord, metadata)); - if (KafkaTemplate.this.producerListener != null) { - KafkaTemplate.this.producerListener.onSuccess(producerRecord, metadata); + if (this.producerListener != null) { + this.producerListener.onSuccess(producerRecord, metadata); } - KafkaTemplate.this.logger.trace(() -> "Sent ok: " + KafkaUtils.format(producerRecord) + this.logger.trace(() -> "Sent ok: " + KafkaUtils.format(producerRecord) + ", metadata: " + metadata); } else { failureTimer(sample, exception, producerRecord); observation.error(exception); - observation.stop(); future.completeExceptionally( new KafkaProducerException(producerRecord, "Failed to send", exception)); - if (KafkaTemplate.this.producerListener != null) { - KafkaTemplate.this.producerListener.onError(producerRecord, metadata, exception); + if (this.producerListener != null) { + this.producerListener.onError(producerRecord, metadata, exception); } - KafkaTemplate.this.logger.debug(exception, () -> "Failed to send: " - + KafkaUtils.format(producerRecord)); + this.logger.debug(exception, () -> "Failed to send: " + KafkaUtils.format(producerRecord)); } } finally { - if (!KafkaTemplate.this.transactional) { - closeProducer(producer, false); - } + observation.stop(); + closeProducer(producer, this.transactional); } }; } @@ -985,6 +1015,10 @@ public void destroy() { } } + private static String removeLeadingAndTrailingBrackets(String str) { + return StringUtils.trimTrailingCharacter(StringUtils.trimLeadingCharacter(str, '['), ']'); + } + @SuppressWarnings("serial") private static final class SkipAbortException extends RuntimeException { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerConsumerListener.java b/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerConsumerListener.java index bfbf4ea874..7a70eaa0c2 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerConsumerListener.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerConsumerListener.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,15 @@ package org.springframework.kafka.core; -import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import org.apache.kafka.clients.consumer.Consumer; - -import io.micrometer.core.instrument.ImmutableTag; import io.micrometer.core.instrument.MeterRegistry; import io.micrometer.core.instrument.Tag; import io.micrometer.core.instrument.binder.kafka.KafkaClientMetrics; +import org.apache.kafka.clients.consumer.Consumer; + +import org.springframework.scheduling.TaskScheduler; /** * A consumer factory listener that manages {@link KafkaClientMetrics}. @@ -36,16 +33,12 @@ * @param the value type. * * @author Gary Russell - * @since 2.5 + * @author Artem Bilan * + * @since 2.5 */ -public class MicrometerConsumerListener implements ConsumerFactory.Listener { - - private final MeterRegistry meterRegistry; - - private final List tags; - - private final Map metrics = new HashMap<>(); +public class MicrometerConsumerListener extends KafkaMetricsSupport> + implements ConsumerFactory.Listener { /** * Construct an instance with the provided registry. @@ -55,32 +48,44 @@ public MicrometerConsumerListener(MeterRegistry meterRegistry) { this(meterRegistry, Collections.emptyList()); } + /** + * Construct an instance with the provided registry and task scheduler. + * @param meterRegistry the registry. + * @param taskScheduler the task scheduler. + * @since 3.3 + */ + public MicrometerConsumerListener(MeterRegistry meterRegistry, TaskScheduler taskScheduler) { + this(meterRegistry, Collections.emptyList(), taskScheduler); + } + /** * Construct an instance with the provided registry and tags. * @param meterRegistry the registry. * @param tags the tags. */ public MicrometerConsumerListener(MeterRegistry meterRegistry, List tags) { - this.meterRegistry = meterRegistry; - this.tags = tags; + super(meterRegistry, tags); + } + + /** + * Construct an instance with the provided registry, tags and task scheduler. + * @param meterRegistry the registry. + * @param tags the tags. + * @param taskScheduler the task scheduler. + * @since 3.3 + */ + public MicrometerConsumerListener(MeterRegistry meterRegistry, List tags, TaskScheduler taskScheduler) { + super(meterRegistry, tags, taskScheduler); } @Override public synchronized void consumerAdded(String id, Consumer consumer) { - if (!this.metrics.containsKey(id)) { - List consumerTags = new ArrayList<>(this.tags); - consumerTags.add(new ImmutableTag("spring.id", id)); - this.metrics.put(id, new KafkaClientMetrics(consumer, consumerTags)); - this.metrics.get(id).bindTo(this.meterRegistry); - } + bindClient(id, consumer); } @Override public synchronized void consumerRemoved(String id, Consumer consumer) { - KafkaClientMetrics removed = this.metrics.remove(id); - if (removed != null) { - removed.close(); - } + unbindClient(id, consumer); } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerProducerListener.java b/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerProducerListener.java index 9a45845753..85c54ffd30 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerProducerListener.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/MicrometerProducerListener.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,15 @@ package org.springframework.kafka.core; -import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import org.apache.kafka.clients.producer.Producer; - -import io.micrometer.core.instrument.ImmutableTag; import io.micrometer.core.instrument.MeterRegistry; import io.micrometer.core.instrument.Tag; import io.micrometer.core.instrument.binder.kafka.KafkaClientMetrics; +import org.apache.kafka.clients.producer.Producer; + +import org.springframework.scheduling.TaskScheduler; /** * A producer factory listener that manages {@link KafkaClientMetrics}. @@ -36,16 +33,12 @@ * @param the value type. * * @author Gary Russell - * @since 2.5 + * @author Artem Bilan * + * @since 2.5 */ -public class MicrometerProducerListener implements ProducerFactory.Listener { - - private final MeterRegistry meterRegistry; - - private final List tags; - - private final Map metrics = new HashMap<>(); +public class MicrometerProducerListener extends KafkaMetricsSupport> + implements ProducerFactory.Listener { /** * Construct an instance with the provided registry. @@ -55,33 +48,44 @@ public MicrometerProducerListener(MeterRegistry meterRegistry) { this(meterRegistry, Collections.emptyList()); } + /** + * Construct an instance with the provided registry and task scheduler. + * @param meterRegistry the registry. + * @param taskScheduler the task scheduler. + * @since 3.3 + */ + public MicrometerProducerListener(MeterRegistry meterRegistry, TaskScheduler taskScheduler) { + this(meterRegistry, Collections.emptyList(), taskScheduler); + } + /** * Construct an instance with the provided registry and tags. * @param meterRegistry the registry. * @param tags the tags. */ public MicrometerProducerListener(MeterRegistry meterRegistry, List tags) { - this.meterRegistry = meterRegistry; - this.tags = tags; + super(meterRegistry, tags); } + /** + * Construct an instance with the provided registry, tags and task scheduler. + * @param meterRegistry the registry. + * @param tags the tags. + * @param taskScheduler the task scheduler. + * @since 3.3 + */ + public MicrometerProducerListener(MeterRegistry meterRegistry, List tags, TaskScheduler taskScheduler) { + super(meterRegistry, tags, taskScheduler); + } @Override public synchronized void producerAdded(String id, Producer producer) { - if (!this.metrics.containsKey(id)) { - List producerTags = new ArrayList<>(this.tags); - producerTags.add(new ImmutableTag("spring.id", id)); - this.metrics.put(id, new KafkaClientMetrics(producer, producerTags)); - this.metrics.get(id).bindTo(this.meterRegistry); - } + bindClient(id, producer); } @Override public synchronized void producerRemoved(String id, Producer producer) { - KafkaClientMetrics removed = this.metrics.remove(id); - if (removed != null) { - removed.close(); - } + unbindClient(id, producer); } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/ProducerFactory.java b/spring-kafka/src/main/java/org/springframework/kafka/core/ProducerFactory.java index 5da1d21a6b..8d574a9d04 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/ProducerFactory.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/ProducerFactory.java @@ -35,6 +35,7 @@ * * @author Gary Russell * @author Thomas Strauß + * @author Kwon YongHyun */ public interface ProducerFactory { @@ -43,6 +44,11 @@ public interface ProducerFactory { */ Duration DEFAULT_PHYSICAL_CLOSE_TIMEOUT = Duration.ofSeconds(30); + /** + * Error message for unsupported factory methods. + */ + String FACTORY_DOES_NOT_SUPPORT_METHOD = "This factory does not support this method"; + /** * Create a producer which will be transactional if the factory is so configured. * @return the producer. @@ -57,7 +63,7 @@ public interface ProducerFactory { * @since 2.3 */ default Producer createProducer(@Nullable @SuppressWarnings("unused") String txIdPrefix) { - throw new UnsupportedOperationException("This factory does not support this method"); + throw new UnsupportedOperationException(FACTORY_DOES_NOT_SUPPORT_METHOD); } /** @@ -67,7 +73,7 @@ default Producer createProducer(@Nullable @SuppressWarnings("unused") Stri * @see #transactionCapable() */ default Producer createNonTransactionalProducer() { - throw new UnsupportedOperationException("This factory does not support this method"); + throw new UnsupportedOperationException(FACTORY_DOES_NOT_SUPPORT_METHOD); } /** diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaConsumerTemplate.java b/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaConsumerTemplate.java index 0dc15d7b60..cd5aaf3fdd 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaConsumerTemplate.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaConsumerTemplate.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -30,9 +30,6 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; - -import org.springframework.util.Assert; - import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.kafka.receiver.KafkaReceiver; @@ -42,6 +39,8 @@ import reactor.util.function.Tuple2; import reactor.util.function.Tuples; +import org.springframework.util.Assert; + /** * Reactive kafka consumer operations implementation. * @@ -50,6 +49,7 @@ * * @author Mark Norkin * @author Adrian Chlebosz + * @author Marcus Voltolim * * @since 2.3.0 */ @@ -71,6 +71,10 @@ public Flux> receive() { return this.kafkaReceiver.receive(); } + public Flux>> receiveBatch() { + return this.kafkaReceiver.receiveBatch(); + } + public Flux> receiveAutoAck() { return this.kafkaReceiver.receiveAutoAck().concatMap(Function.identity()); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplate.java b/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplate.java index e2e9af4771..18b4d5aeeb 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplate.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplate.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,14 +26,6 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.reactivestreams.Publisher; - -import org.springframework.beans.factory.DisposableBean; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.kafka.support.converter.MessagingMessageConverter; -import org.springframework.kafka.support.converter.RecordMessageConverter; -import org.springframework.messaging.Message; -import org.springframework.util.Assert; - import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.kafka.sender.KafkaSender; @@ -44,6 +36,14 @@ import reactor.util.function.Tuple2; import reactor.util.function.Tuples; +import org.springframework.beans.factory.DisposableBean; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.support.converter.MessagingMessageConverter; +import org.springframework.kafka.support.converter.RecordMessageConverter; +import org.springframework.lang.Nullable; +import org.springframework.messaging.Message; +import org.springframework.util.Assert; + /** * Reactive kafka producer operations implementation. * @@ -52,6 +52,7 @@ * * @author Mark Norkin * @author Adrian Chlebosz + * @author Juhyun Kim * * @since 2.3.0 */ @@ -93,19 +94,19 @@ public Mono> sendTransactionally(SenderRecord recor return sendTransactionally.single(); } - public Mono> send(String topic, V value) { + public Mono> send(String topic, @Nullable V value) { return send(new ProducerRecord<>(topic, value)); } - public Mono> send(String topic, K key, V value) { + public Mono> send(String topic, K key, @Nullable V value) { return send(new ProducerRecord<>(topic, key, value)); } - public Mono> send(String topic, int partition, K key, V value) { + public Mono> send(String topic, int partition, K key, @Nullable V value) { return send(new ProducerRecord<>(topic, partition, key, value)); } - public Mono> send(String topic, int partition, long timestamp, K key, V value) { + public Mono> send(String topic, int partition, long timestamp, K key, @Nullable V value) { return send(new ProducerRecord<>(topic, partition, timestamp, key, value)); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/event/ConcurrentContainerStoppedEvent.java b/spring-kafka/src/main/java/org/springframework/kafka/event/ConcurrentContainerStoppedEvent.java new file mode 100644 index 0000000000..2bdbb1fc0d --- /dev/null +++ b/spring-kafka/src/main/java/org/springframework/kafka/event/ConcurrentContainerStoppedEvent.java @@ -0,0 +1,58 @@ +/* + * Copyright 2018-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.event; + +import java.io.Serial; + +/** + * An event published when a concurrent container is stopped. + * + * @author Lokesh Alamuri + * @since 3.3 + * + */ +public class ConcurrentContainerStoppedEvent extends KafkaEvent { + + @Serial + private static final long serialVersionUID = 1L; + + private final ConsumerStoppedEvent.Reason reason; + + /** + * Construct an instance with the provided source and container. + * @param source the container instance that generated the event. + * @param reason the reason. + */ + public ConcurrentContainerStoppedEvent(Object source, ConsumerStoppedEvent.Reason reason) { + super(source, source); + this.reason = reason; + } + + /** + * Return the reason why the container was stopped. + * @return the reason. + */ + public ConsumerStoppedEvent.Reason getReason() { + return this.reason; + } + + @Override + public String toString() { + return "ConcurrentContainerStoppedEvent [source=" + getSource() + ", reason=" + this.reason + "]"; + } + +} diff --git a/spring-kafka/src/main/java/org/springframework/kafka/event/ConsumerPartitionPausedEvent.java b/spring-kafka/src/main/java/org/springframework/kafka/event/ConsumerPartitionPausedEvent.java index c0bcc54f91..9f9fc4e75b 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/event/ConsumerPartitionPausedEvent.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/event/ConsumerPartitionPausedEvent.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2021 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ * An event published when a consumer partition is paused. * * @author Tomaz Fernandes + * @author Borahm Lee * @since 2.7 * */ @@ -46,14 +47,25 @@ public ConsumerPartitionPausedEvent(Object source, Object container, TopicPartit /** * Return the paused partition. * @return the partition. + * @deprecated replaced by {@link #getPartition()} */ + @Deprecated(since = "3.3", forRemoval = true) public TopicPartition getPartitions() { return this.partition; } + /** + * Return the paused partition. + * @return the partition. + * @since 3.3 + */ + public TopicPartition getPartition() { + return this.partition; + } + @Override public String toString() { - return "ConsumerPausedEvent [partitions=" + this.partition + "]"; + return "ConsumerPartitionPausedEvent [partition=" + this.partition + "]"; } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/event/KafkaEvent.java b/spring-kafka/src/main/java/org/springframework/kafka/event/KafkaEvent.java index 144cf3effd..6f359b93f8 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/event/KafkaEvent.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/event/KafkaEvent.java @@ -1,5 +1,5 @@ /* - * Copyright 2015-2023 the original author or authors. + * Copyright 2015-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ import org.springframework.context.ApplicationEvent; import org.springframework.util.Assert; - /** * Base class for events. * diff --git a/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionIdleEvent.java b/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionIdleEvent.java index 0b0249f117..7f94b6d6a9 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionIdleEvent.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionIdleEvent.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2021 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ * is configured to do so. * * @author Tomaz Fernandes + * @author Borahm Lee * @since 2.7 */ public class ListenerContainerPartitionIdleEvent extends KafkaEvent { @@ -108,7 +109,7 @@ public boolean isPaused() { @Override public String toString() { - return "ListenerContainerIdleEvent [idleTime=" + return "ListenerContainerPartitionIdleEvent [idleTime=" + ((float) this.idleTime / 1000) + "s, listenerId=" + this.listenerId // NOSONAR magic # + ", container=" + getSource() + ", paused=" + this.paused diff --git a/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionNoLongerIdleEvent.java b/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionNoLongerIdleEvent.java index 385b6a4c32..03332d166c 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionNoLongerIdleEvent.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/event/ListenerContainerPartitionNoLongerIdleEvent.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2021 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ * idle events. * * @author Gary Russell + * @author Borahm Lee * @since 2.6.2 */ public class ListenerContainerPartitionNoLongerIdleEvent extends KafkaEvent { @@ -92,9 +93,9 @@ public String getListenerId() { @Override public String toString() { - return "ListenerContainerNoLongerIdleEvent [idleTime=" + return "ListenerContainerPartitionNoLongerIdleEvent [idleTime=" + ((float) this.idleTime / 1000) + "s, listenerId=" + this.listenerId // NOSONAR magic # + ", container=" + getSource() - + ", topicPartitions=" + this.topicPartition + "]"; + + ", topicPartition=" + this.topicPartition + "]"; } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractConsumerSeekAware.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractConsumerSeekAware.java index 093a1c3568..0af8a3d27a 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractConsumerSeekAware.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractConsumerSeekAware.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,19 @@ package org.springframework.kafka.listener; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; import org.apache.kafka.common.TopicPartition; import org.springframework.lang.Nullable; +import org.springframework.util.CollectionUtils; /** * Manages the {@link ConsumerSeekAware.ConsumerSeekCallback} s for the listener. If the @@ -33,6 +36,7 @@ * having to keep track of the callbacks itself. * * @author Gary Russell + * @author Borahm Lee * @since 2.3 * */ @@ -40,9 +44,9 @@ public abstract class AbstractConsumerSeekAware implements ConsumerSeekAware { private final Map callbackForThread = new ConcurrentHashMap<>(); - private final Map callbacks = new ConcurrentHashMap<>(); + private final Map> topicToCallbacks = new ConcurrentHashMap<>(); - private final Map> callbacksToTopic = new ConcurrentHashMap<>(); + private final Map> callbackToTopics = new ConcurrentHashMap<>(); @Override public void registerSeekCallback(ConsumerSeekCallback callback) { @@ -54,8 +58,8 @@ public void onPartitionsAssigned(Map assignments, Consumer ConsumerSeekCallback threadCallback = this.callbackForThread.get(Thread.currentThread()); if (threadCallback != null) { assignments.keySet().forEach(tp -> { - this.callbacks.put(tp, threadCallback); - this.callbacksToTopic.computeIfAbsent(threadCallback, key -> new LinkedList<>()).add(tp); + this.topicToCallbacks.computeIfAbsent(tp, key -> new ArrayList<>()).add(threadCallback); + this.callbackToTopics.computeIfAbsent(threadCallback, key -> new LinkedList<>()).add(tp); }); } } @@ -63,15 +67,17 @@ public void onPartitionsAssigned(Map assignments, Consumer @Override public void onPartitionsRevoked(Collection partitions) { partitions.forEach(tp -> { - ConsumerSeekCallback removed = this.callbacks.remove(tp); - if (removed != null) { - List topics = this.callbacksToTopic.get(removed); - if (topics != null) { - topics.remove(tp); - if (topics.size() == 0) { - this.callbacksToTopic.remove(removed); + List removedCallbacks = this.topicToCallbacks.remove(tp); + if (removedCallbacks != null && !removedCallbacks.isEmpty()) { + removedCallbacks.forEach(cb -> { + List topics = this.callbackToTopics.get(cb); + if (topics != null) { + topics.remove(tp); + if (topics.isEmpty()) { + this.callbackToTopics.remove(cb); + } } - } + }); } }); } @@ -82,21 +88,55 @@ public void unregisterSeekCallback() { } /** - * Return the callback for the specified topic/partition. - * @param topicPartition the topic/partition. - * @return the callback (or null if there is no assignment). - */ + * Return the callback for the specified topic/partition. + * @param topicPartition the topic/partition. + * @return the callback (or null if there is no assignment). + * @deprecated Replaced by {@link #getSeekCallbacksFor(TopicPartition)} + */ + @Deprecated(since = "3.3", forRemoval = true) @Nullable protected ConsumerSeekCallback getSeekCallbackFor(TopicPartition topicPartition) { - return this.callbacks.get(topicPartition); + List callbacks = getSeekCallbacksFor(topicPartition); + if (CollectionUtils.isEmpty(callbacks)) { + return null; + } + return callbacks.get(0); + } + + /** + * Return the callbacks for the specified topic/partition. + * @param topicPartition the topic/partition. + * @return the callbacks (or null if there is no assignment). + * @since 3.3 + */ + @Nullable + protected List getSeekCallbacksFor(TopicPartition topicPartition) { + return this.topicToCallbacks.get(topicPartition); } /** * The map of callbacks for all currently assigned partitions. * @return the map. + * @deprecated Replaced by {@link #getTopicsAndCallbacks()} */ + @Deprecated(since = "3.3", forRemoval = true) protected Map getSeekCallbacks() { - return Collections.unmodifiableMap(this.callbacks); + Map> topicsAndCallbacks = getTopicsAndCallbacks(); + return topicsAndCallbacks.entrySet().stream() + .filter(entry -> !entry.getValue().isEmpty()) + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue().get(0) + )); + } + + /** + * The map of callbacks for all currently assigned partitions. + * @return the map. + * @since 3.3 + */ + protected Map> getTopicsAndCallbacks() { + return Collections.unmodifiableMap(this.topicToCallbacks); } /** @@ -105,7 +145,7 @@ protected Map getSeekCallbacks() { * @since 2.6 */ protected Map> getCallbacksAndTopics() { - return Collections.unmodifiableMap(this.callbacksToTopic); + return Collections.unmodifiableMap(this.callbackToTopics); } /** @@ -113,7 +153,7 @@ protected Map> getCallbacksAndTopics( * @since 2.6 */ public void seekToBeginning() { - getCallbacksAndTopics().forEach((cb, topics) -> cb.seekToBeginning(topics)); + getCallbacksAndTopics().forEach(ConsumerSeekCallback::seekToBeginning); } /** @@ -121,7 +161,7 @@ public void seekToBeginning() { * @since 2.6 */ public void seekToEnd() { - getCallbacksAndTopics().forEach((cb, topics) -> cb.seekToEnd(topics)); + getCallbacksAndTopics().forEach(ConsumerSeekCallback::seekToEnd); } /** diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractMessageListenerContainer.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractMessageListenerContainer.java index 8667f34c54..80c732f46e 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractMessageListenerContainer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/AbstractMessageListenerContainer.java @@ -70,10 +70,12 @@ * @author Tomaz Fernandes * @author Wang Zhiyang * @author Soby Chacko + * @author Sanghyeok An + * @author Lokesh Alamuri */ public abstract class AbstractMessageListenerContainer implements GenericMessageListenerContainer, BeanNameAware, ApplicationEventPublisherAware, - ApplicationContextAware { + ApplicationContextAware { /** * The default {@link org.springframework.context.SmartLifecycle} phase for listener @@ -124,6 +126,8 @@ public abstract class AbstractMessageListenerContainer private volatile boolean running = false; + private volatile boolean fenced = false; + private volatile boolean paused; private volatile boolean stoppedNormally = true; @@ -139,7 +143,6 @@ public abstract class AbstractMessageListenerContainer @Nullable private KafkaAdmin kafkaAdmin; - /** * Construct an instance with the provided factory and properties. * @param consumerFactory the factory. @@ -275,6 +278,10 @@ public boolean isRunning() { return this.running; } + protected void setFenced(boolean fenced) { + this.fenced = fenced; + } + @Deprecated(since = "3.2", forRemoval = true) protected boolean isPaused() { return this.paused; @@ -509,6 +516,7 @@ public final void start() { if (!isRunning()) { Assert.state(this.containerProperties.getMessageListener() instanceof GenericMessageListener, () -> "A " + GenericMessageListener.class.getName() + " implementation must be provided"); + Assert.state(!this.fenced, "Container Fenced. It is not allowed to start."); doStart(); } } @@ -563,7 +571,7 @@ protected void checkTopics() { catch (Exception e) { this.logger.error(e, "Failed to check topic existence"); } - if (missing != null && missing.size() > 0) { + if (missing != null && !missing.isEmpty()) { throw new IllegalStateException( "Topic(s) " + missing.toString() + " is/are not present and missingTopicsFatal is true"); @@ -600,28 +608,36 @@ public final void stop() { * @since 2.3.8 */ public final void stop(boolean wait) { - this.lifecycleLock.lock(); - try { - if (isRunning()) { - if (wait) { - final CountDownLatch latch = new CountDownLatch(1); + if (isRunning()) { + if (wait) { + final CountDownLatch latch = new CountDownLatch(1); + this.lifecycleLock.lock(); + try { + doStop(latch::countDown); - try { - latch.await(this.containerProperties.getShutdownTimeout(), TimeUnit.MILLISECONDS); // NOSONAR - publishContainerStoppedEvent(); - } - catch (@SuppressWarnings("unused") InterruptedException e) { - Thread.currentThread().interrupt(); - } } - else { + finally { + this.lifecycleLock.unlock(); + } + try { + latch.await(this.containerProperties.getShutdownTimeout(), TimeUnit.MILLISECONDS); // NOSONAR + publishContainerStoppedEvent(); + } + catch (@SuppressWarnings("unused") InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + else { + this.lifecycleLock.lock(); + try { doStop(this::publishContainerStoppedEvent); } + finally { + this.lifecycleLock.unlock(); + } + } } - finally { - this.lifecycleLock.unlock(); - } } @Override @@ -652,8 +668,14 @@ public void stop(Runnable callback) { @Override public void stopAbnormally(Runnable callback) { - doStop(callback, false); - publishContainerStoppedEvent(); + this.lifecycleLock.lock(); + try { + doStop(callback, false); + publishContainerStoppedEvent(); + } + finally { + this.lifecycleLock.unlock(); + } } protected void doStop(Runnable callback) { @@ -691,7 +713,7 @@ public void onPartitionsAssigned(Collection partitions) { @Override public void onPartitionsLost(Collection partitions) { AbstractMessageListenerContainer.this.logger.info(() -> - getGroupId() + ": partitions lost: " + partitions); + getGroupId() + ": partitions lost: " + partitions); } }; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/BatchMessageListener.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/BatchMessageListener.java index 19f5cac2bd..bc71bbb437 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/BatchMessageListener.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/BatchMessageListener.java @@ -1,5 +1,5 @@ /* - * Copyright 2015-2019 the original author or authors. + * Copyright 2015-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,7 +40,6 @@ @FunctionalInterface public interface BatchMessageListener extends GenericMessageListener>> { - /** * Listener receives the original {@link ConsumerRecords} object instead of a * list of {@link ConsumerRecord}. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler.java index 4b783e7c57..374bbc3ede 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -83,7 +83,6 @@ public void handleOtherException(Exception thrownException, Consumer consu stopContainer(container, thrownException); } - @Override public void handleRemaining(Exception thrownException, List> records, Consumer consumer, MessageListenerContainer container) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonDelegatingErrorHandler.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonDelegatingErrorHandler.java index d510701f35..5e4c1ddc10 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonDelegatingErrorHandler.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonDelegatingErrorHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,6 +38,8 @@ * * @author Gary Russell * @author Adrian Chlebosz + * @author Antonin Arquey + * @author Dan Blackney * @since 2.8 * */ @@ -65,6 +67,7 @@ public CommonDelegatingErrorHandler(CommonErrorHandler defaultErrorHandler) { * Set the delegate error handlers; a {@link LinkedHashMap} argument is recommended so * that the delegates are searched in a known order. * @param delegates the delegates. + * @throws IllegalArgumentException if any of the delegates is not compatible with the default error handler. */ public void setErrorHandlers(Map, CommonErrorHandler> delegates) { Assert.notNull(delegates, "'delegates' cannot be null"); @@ -109,6 +112,7 @@ public void setAckAfterHandle(boolean ack) { * Add a delegate to the end of the current collection. * @param throwable the throwable for this handler. * @param handler the handler. + * @throws IllegalArgumentException if the handler is not compatible with the default error handler. */ public void addDelegate(Class throwable, CommonErrorHandler handler) { Map, CommonErrorHandler> delegatesToCheck = new LinkedHashMap<>(this.delegates); @@ -118,13 +122,12 @@ public void addDelegate(Class throwable, CommonErrorHandler this.delegates.putAll(delegatesToCheck); } - @SuppressWarnings("deprecation") private void checkDelegatesAndUpdateClassifier(Map, CommonErrorHandler> delegatesToCheck) { boolean ackAfterHandle = this.defaultErrorHandler.isAckAfterHandle(); boolean seeksAfterHandling = this.defaultErrorHandler.seeksAfterHandling(); - this.delegates.values().forEach(handler -> { + delegatesToCheck.values().forEach(handler -> { Assert.isTrue(ackAfterHandle == handler.isAckAfterHandle(), "All delegates must return the same value when calling 'isAckAfterHandle()'"); Assert.isTrue(seeksAfterHandling == handler.seeksAfterHandling(), @@ -179,6 +182,19 @@ public void handleOtherException(Exception thrownException, Consumer consu } } + @Override + public boolean handleOne(Exception thrownException, ConsumerRecord record, Consumer consumer, + MessageListenerContainer container) { + + CommonErrorHandler handler = findDelegate(thrownException); + if (handler != null) { + return handler.handleOne(thrownException, record, consumer, container); + } + else { + return this.defaultErrorHandler.handleOne(thrownException, record, consumer, container); + } + } + @Nullable private CommonErrorHandler findDelegate(Throwable thrownException) { Throwable cause = findCause(thrownException); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonLoggingErrorHandler.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonLoggingErrorHandler.java index 93d5174d43..9e731c05ae 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonLoggingErrorHandler.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/CommonLoggingErrorHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -47,7 +47,6 @@ public void setAckAfterHandle(boolean ackAfterHandle) { this.ackAfterHandle = ackAfterHandle; } - @Override public boolean handleOne(Exception thrownException, ConsumerRecord record, Consumer consumer, MessageListenerContainer container) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainer.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainer.java index 45d9dd85a6..06b136f434 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainer.java @@ -25,7 +25,6 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; @@ -36,6 +35,7 @@ import org.springframework.core.task.AsyncTaskExecutor; import org.springframework.core.task.SimpleAsyncTaskExecutor; import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.event.ConcurrentContainerStoppedEvent; import org.springframework.kafka.event.ConsumerStoppedEvent.Reason; import org.springframework.kafka.support.TopicPartitionOffset; import org.springframework.lang.Nullable; @@ -59,6 +59,7 @@ * @author Vladimir Tsanev * @author Tomaz Fernandes * @author Wang Zhiyang + * @author Lokesh Alamuri */ public class ConcurrentMessageListenerContainer extends AbstractMessageListenerContainer { @@ -66,7 +67,7 @@ public class ConcurrentMessageListenerContainer extends AbstractMessageLis private final List executors = new ArrayList<>(); - private final AtomicInteger stoppedContainers = new AtomicInteger(); + private final AtomicInteger startedContainers = new AtomicInteger(); private int concurrency = 1; @@ -121,12 +122,12 @@ public void setAlwaysClientIdSuffix(boolean alwaysClientIdSuffix) { public List> getContainers() { this.lifecycleLock.lock(); try { - return Collections.unmodifiableList(new ArrayList<>(this.containers)); + return List.copyOf(this.containers); } finally { this.lifecycleLock.unlock(); } -} + } @Override public MessageListenerContainer getContainerFor(String topic, int partition) { @@ -157,7 +158,7 @@ public Collection getAssignedPartitions() { .map(KafkaMessageListenerContainer::getAssignedPartitions) .filter(Objects::nonNull) .flatMap(Collection::stream) - .collect(Collectors.toList()); + .toList(); } finally { this.lifecycleLock.unlock(); @@ -203,14 +204,20 @@ public boolean isContainerPaused() { @Override public boolean isChildRunning() { - if (!isRunning()) { - return false; - } - for (MessageListenerContainer container : this.containers) { - if (container.isRunning()) { + this.lifecycleLock.lock(); + try { + for (MessageListenerContainer container : this.containers) { + if (container.isRunning()) { + return true; + } + } + if (this.startedContainers.get() > 0) { return true; } } + finally { + this.lifecycleLock.unlock(); + } return false; } @@ -244,6 +251,7 @@ protected void doStart() { + topicPartitions.length); this.concurrency = topicPartitions.length; } + clearState(); setRunning(true); for (int i = 0; i < this.concurrency; i++) { @@ -259,7 +267,6 @@ protected void doStart() { } } - @SuppressWarnings("deprecation") private void configureChildContainer(int index, KafkaMessageListenerContainer container) { String beanName = getBeanName(); beanName = (beanName == null ? "consumer" : beanName) + "-" + index; @@ -308,13 +315,17 @@ private KafkaMessageListenerContainer constructContainer(ContainerProperti return container; } + @Nullable private TopicPartitionOffset[] partitionSubset(ContainerProperties containerProperties, int index) { TopicPartitionOffset[] topicPartitions = containerProperties.getTopicPartitions(); + if (topicPartitions == null) { + return null; + } if (this.concurrency == 1) { - return topicPartitions; // NOSONAR + return topicPartitions; } else { - int numPartitions = topicPartitions.length; // NOSONAR + int numPartitions = topicPartitions.length; if (numPartitions == this.concurrency) { return new TopicPartitionOffset[] { topicPartitions[index] }; } @@ -350,6 +361,7 @@ protected void doStop(final Runnable callback, boolean normal) { } } for (KafkaMessageListenerContainer container : this.containers) { + container.setFenced(true); if (container.isRunning()) { if (normal) { container.stop(() -> { @@ -367,29 +379,65 @@ protected void doStop(final Runnable callback, boolean normal) { } } } - this.containers.clear(); setStoppedNormally(normal); + // All the containers are stopped before calling stop API + if (this.startedContainers.get() == 0) { + publishConcurrentContainerStoppedEvent(this.reason); + } } } @Override - public void childStopped(MessageListenerContainer child, Reason reason) { - if (this.reason == null || reason.equals(Reason.AUTH)) { - this.reason = reason; + public void childStarted(MessageListenerContainer child) { + this.lifecycleLock.lock(); + try { + if (this.containers.contains(child)) { + this.startedContainers.incrementAndGet(); + } } - if (Reason.AUTH.equals(this.reason) - && getContainerProperties().isRestartAfterAuthExceptions() - && this.concurrency == this.stoppedContainers.incrementAndGet()) { - - this.reason = null; - this.stoppedContainers.set(0); + finally { + this.lifecycleLock.unlock(); + } + } - // This has to run on another thread to avoid a deadlock on lifecycleMonitor - AsyncTaskExecutor exec = getContainerProperties().getListenerTaskExecutor(); - if (exec == null) { - exec = new SimpleAsyncTaskExecutor(getListenerId() + ".authRestart"); + @Override + public void childStopped(MessageListenerContainer child, Reason reason) { + this.lifecycleLock.lock(); + try { + if (!this.containers.contains(child)) { + return; + } + if (this.reason == null || reason.equals(Reason.AUTH)) { + this.reason = reason; + } + int startedContainersCount = this.startedContainers.decrementAndGet(); + if (startedContainersCount == 0) { + if (!isRunning()) { + this.containers.clear(); + publishConcurrentContainerStoppedEvent(this.reason); + } + boolean restartContainer = Reason.AUTH.equals(this.reason) + && getContainerProperties().isRestartAfterAuthExceptions(); + this.reason = null; + if (restartContainer) { + // This has to run on another thread to avoid a deadlock on lifecycleMonitor + AsyncTaskExecutor exec = getContainerProperties().getListenerTaskExecutor(); + if (exec == null) { + exec = new SimpleAsyncTaskExecutor(getListenerId() + ".authRestart"); + } + exec.execute(this::start); + } } - exec.execute(() -> start()); + } + finally { + this.lifecycleLock.unlock(); + } + } + + private void publishConcurrentContainerStoppedEvent(Reason reason) { + ApplicationEventPublisher eventPublisher = getApplicationEventPublisher(); + if (eventPublisher != null) { + eventPublisher.publishEvent(new ConcurrentContainerStoppedEvent(this, reason)); } } @@ -477,10 +525,15 @@ public boolean isPartitionPaused(TopicPartition topicPartition) { public boolean isInExpectedState() { this.lifecycleLock.lock(); try { - return (isRunning() || isStoppedNormally()) && this.containers - .stream() - .map(container -> container.isInExpectedState()) - .allMatch(bool -> Boolean.TRUE.equals(bool)); + boolean isInExpectedState = isRunning() || isStoppedNormally(); + if (isInExpectedState) { + for (KafkaMessageListenerContainer container : this.containers) { + if (!container.isInExpectedState()) { + return false; + } + } + } + return isInExpectedState; } finally { this.lifecycleLock.unlock(); @@ -492,6 +545,12 @@ private boolean containsPartition(TopicPartition topicPartition, KafkaMessageLis return assignedPartitions != null && assignedPartitions.contains(topicPartition); } + private void clearState() { + this.containers.clear(); + this.startedContainers.set(0); + this.reason = null; + } + @Override public String toString() { return "ConcurrentMessageListenerContainer [concurrency=" + this.concurrency + ", beanName=" diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListener.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListener.java index 4a1b1fffe9..12a12b1024 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListener.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListener.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,7 +34,6 @@ */ public interface ConsumerAwareRebalanceListener extends ConsumerRebalanceListener { - /** * The same as {@link #onPartitionsRevoked(Collection)} with the additional consumer * parameter. It is invoked by the container before any pending offsets are committed. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRecordRecoverer.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRecordRecoverer.java index e44cdd5b79..bc9fae7cd8 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRecordRecoverer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerAwareRecordRecoverer.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,7 +32,6 @@ @FunctionalInterface public interface ConsumerAwareRecordRecoverer extends ConsumerRecordRecoverer { - @Override default void accept(ConsumerRecord record, Exception exception) { accept(record, null, exception); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerProperties.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerProperties.java index 097842cf4a..624a6127e3 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerProperties.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerProperties.java @@ -35,6 +35,7 @@ * Common consumer properties. * * @author Gary Russell + * @author Sagnhyeok An * @since 2.3 * */ @@ -520,7 +521,7 @@ protected final String renderProperties() { + (this.offsetAndMetadataProvider != null ? "\n offsetAndMetadataProvider=" + this.offsetAndMetadataProvider : "") + "\n syncCommits=" + this.syncCommits + (this.syncCommitTimeout != null ? "\n syncCommitTimeout=" + this.syncCommitTimeout : "") - + (this.kafkaConsumerProperties.size() > 0 ? "\n properties=" + this.kafkaConsumerProperties : "") + + (!this.kafkaConsumerProperties.isEmpty() ? "\n properties=" + this.kafkaConsumerProperties : "") + "\n authExceptionRetryInterval=" + this.authExceptionRetryInterval + "\n commitRetries=" + this.commitRetries + "\n fixTxOffsets" + this.fixTxOffsets; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerSeekAware.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerSeekAware.java index 2b48e09778..fdac0a661f 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerSeekAware.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ConsumerSeekAware.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,17 +18,21 @@ import java.util.Collection; import java.util.Map; +import java.util.function.Function; import org.apache.kafka.common.TopicPartition; +import org.springframework.lang.Nullable; + /** * Listeners that implement this interface are provided with a * {@link ConsumerSeekCallback} which can be used to perform a * seek operation. * * @author Gary Russell + * @author Soby Chacko + * @author Borahm Lee * @since 1.1 - * */ public interface ConsumerSeekAware { @@ -105,6 +109,23 @@ interface ConsumerSeekCallback { */ void seek(String topic, int partition, long offset); + /** + * Perform a seek operation based on the given function to compute the offset to seek to. + * The function provides the user with access to the current offset in the consumer which + * is the current position, i.e, the next offset to be fetched. + * When called from {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} + * or from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)} + * perform the seek immediately on the consumer. When called from elsewhere, + * queue the seek operation to the consumer. The queued seek will occur after any + * pending offset commits. The consumer must be currently assigned the specified + * partition. + * @param topic the topic. + * @param partition the partition. + * @param offsetComputeFunction function to compute the absolute offset to seek to. + * @since 3.2.0 + */ + void seek(String topic, int partition, Function offsetComputeFunction); + /** * Perform a seek to beginning operation. When called from * {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or @@ -210,6 +231,17 @@ default void seekToEnd(Collection partitions) { */ void seekToTimestamp(Collection topicPartitions, long timestamp); + /** + * Retrieve the group ID associated with this consumer seek callback, if available. + * This method returns {@code null} by default, indicating that the group ID is not specified. + * Implementations may override this method to provide a specific group ID value. + * @return the consumer group ID. + * @since 3.3 + */ + @Nullable + default String getGroupId() { + return null; + } } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerGroupSequencer.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerGroupSequencer.java index fdb2ad1d75..1e02ebf844 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerGroupSequencer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerGroupSequencer.java @@ -37,6 +37,7 @@ * idle. * * @author Gary Russell + * @author Sanghyeok An * @since 2.7.3 * */ @@ -186,7 +187,7 @@ public void initialize() { for (String group : this.groupNames) { this.groups.add(this.applicationContext.getBean(group + ".group", ContainerGroup.class)); } - if (this.groups.size() > 0) { + if (!this.groups.isEmpty()) { this.iterator = this.groups.iterator(); this.currentGroup = this.iterator.next(); this.groups.forEach(grp -> { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerPartitionPausingBackOffManager.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerPartitionPausingBackOffManager.java index 4b9889afe6..8782290b30 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerPartitionPausingBackOffManager.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerPartitionPausingBackOffManager.java @@ -32,12 +32,13 @@ * * @author Tomaz Fernandes * @author Gary Russell + * @author Borahm Lee * @since 2.9 * @see DefaultErrorHandler */ public class ContainerPartitionPausingBackOffManager implements KafkaConsumerBackoffManager { - private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(KafkaConsumerBackoffManager.class)); + private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(ContainerPartitionPausingBackOffManager.class)); private final ListenerContainerRegistry listenerContainerRegistry; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerProperties.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerProperties.java index 616db9bea3..221e70204a 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerProperties.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ContainerProperties.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ import java.util.function.Function; import java.util.regex.Pattern; +import io.micrometer.observation.ObservationRegistry; import org.aopalliance.aop.Advice; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -35,6 +36,7 @@ import org.springframework.core.task.AsyncTaskExecutor; import org.springframework.kafka.support.TopicPartitionOffset; import org.springframework.kafka.support.micrometer.KafkaListenerObservationConvention; +import org.springframework.kafka.transaction.KafkaAwareTransactionManager; import org.springframework.lang.Nullable; import org.springframework.scheduling.TaskScheduler; import org.springframework.transaction.PlatformTransactionManager; @@ -257,8 +259,11 @@ public enum EOSMode { private double idleBeforeDataMultiplier = DEFAULT_IDLE_BEFORE_DATA_MULTIPLIER; + @Deprecated(since = "3.2") private PlatformTransactionManager transactionManager; + private KafkaAwareTransactionManager kafkaAwareTransactionManager; + private boolean batchRecoverAfterRollback = false; private int monitorInterval = DEFAULT_MONITOR_INTERVAL; @@ -277,6 +282,8 @@ public enum EOSMode { private boolean observationEnabled; + private ObservationRegistry observationRegistry = ObservationRegistry.NOOP; + private Duration consumerStartTimeout = DEFAULT_CONSUMER_START_TIMEOUT; private Boolean subBatchPerPartition; @@ -371,7 +378,7 @@ public void setMessageListener(Object messageListener) { * calling thread and sometimes not. * * @param ackMode the {@link AckMode}; default BATCH. - * @see #setTransactionManager(PlatformTransactionManager) + * @see #setKafkaAwareTransactionManager(KafkaAwareTransactionManager) */ public void setAckMode(AckMode ackMode) { Assert.notNull(ackMode, "'ackMode' cannot be null"); @@ -525,6 +532,7 @@ public Long getIdlePartitionEventInterval() { return this.idlePartitionEventInterval; } + @Deprecated(since = "3.2", forRemoval = true) @Nullable public PlatformTransactionManager getTransactionManager() { return this.transactionManager; @@ -542,10 +550,25 @@ public PlatformTransactionManager getTransactionManager() { * @since 1.3 * @see #setAckMode(AckMode) */ + @Deprecated(since = "3.2", forRemoval = true) public void setTransactionManager(@Nullable PlatformTransactionManager transactionManager) { this.transactionManager = transactionManager; } + @Nullable + public KafkaAwareTransactionManager getKafkaAwareTransactionManager() { + return this.kafkaAwareTransactionManager; + } + + /** + * Set the transaction manager to start a transaction; replace {@link #setTransactionManager}. + * @param kafkaAwareTransactionManager the transaction manager. + * @since 3.2 + */ + public void setKafkaAwareTransactionManager(@Nullable KafkaAwareTransactionManager kafkaAwareTransactionManager) { + this.kafkaAwareTransactionManager = kafkaAwareTransactionManager; + } + /** * Recover batch records after rollback if true. * @return true to recover. @@ -696,6 +719,20 @@ public void setObservationEnabled(boolean observationEnabled) { this.observationEnabled = observationEnabled; } + public ObservationRegistry getObservationRegistry() { + return this.observationRegistry; + } + + /** + * Configure the {@link ObservationRegistry} to use for recording observations. + * @param observationRegistry the observation registry to use. + * @since 3.3.1 + */ + public void setObservationRegistry(ObservationRegistry observationRegistry) { + Assert.notNull(observationRegistry, "'observationRegistry' must not be null"); + this.observationRegistry = observationRegistry; + } + /** * Set additional tags for the Micrometer listener timers. * @param tags the tags. @@ -763,7 +800,7 @@ public void setConsumerStartTimeout(Duration consumerStartTimeout) { * @since 2.3.2 */ public boolean isSubBatchPerPartition() { - return this.subBatchPerPartition == null ? false : this.subBatchPerPartition; + return this.subBatchPerPartition != null && this.subBatchPerPartition; } /** @@ -857,8 +894,8 @@ public TransactionDefinition getTransactionDefinition() { /** * Set a transaction definition with properties (e.g. timeout) that will be copied to * the container's transaction template. Note that this is only generally useful when - * used with a {@link #setTransactionManager(PlatformTransactionManager) - * PlatformTransactionManager} that supports a custom definition; this does NOT + * used with a {@link #setKafkaAwareTransactionManager(KafkaAwareTransactionManager) + * KafkaAwareTransactionManager} that supports a custom definition; this does NOT * include the {@link org.springframework.kafka.transaction.KafkaTransactionManager} * which has no concept of transaction timeout. It can be useful to start, for example * a database transaction, in the container, rather than using {@code @Transactional} @@ -866,7 +903,7 @@ public TransactionDefinition getTransactionDefinition() { * can participate in the transaction. * @param transactionDefinition the definition. * @since 2.5.4 - * @see #setTransactionManager(PlatformTransactionManager) + * @see #setKafkaAwareTransactionManager(KafkaAwareTransactionManager) */ public void setTransactionDefinition(@Nullable TransactionDefinition transactionDefinition) { this.transactionDefinition = transactionDefinition; @@ -1059,6 +1096,7 @@ public String toString() { + "\n ackMode=" + this.ackMode + "\n ackCount=" + this.ackCount + "\n ackTime=" + this.ackTime + + "\n consumerStartTimeout=" + this.consumerStartTimeout + "\n messageListener=" + this.messageListener + (this.listenerTaskExecutor != null ? "\n listenerTaskExecutor=" + this.listenerTaskExecutor @@ -1071,24 +1109,35 @@ public String toString() { + (this.transactionManager != null ? "\n transactionManager=" + this.transactionManager : "") + + (this.kafkaAwareTransactionManager != null + ? "\n kafkaAwareTransactionManager=" + this.kafkaAwareTransactionManager + : "") + "\n monitorInterval=" + this.monitorInterval + (this.scheduler != null ? "\n scheduler=" + this.scheduler : "") + "\n noPollThreshold=" + this.noPollThreshold + + "\n pauseImmediate=" + this.pauseImmediate + "\n pollTimeoutWhilePaused=" + this.pollTimeoutWhilePaused + "\n subBatchPerPartition=" + this.subBatchPerPartition + "\n assignmentCommitOption=" + this.assignmentCommitOption + "\n deliveryAttemptHeader=" + this.deliveryAttemptHeader + + "\n batchRecoverAfterRollback=" + this.batchRecoverAfterRollback + "\n eosMode=" + this.eosMode + "\n transactionDefinition=" + this.transactionDefinition + "\n stopContainerWhenFenced=" + this.stopContainerWhenFenced + "\n stopImmediate=" + this.stopImmediate + "\n asyncAcks=" + this.asyncAcks + + "\n logContainerConfig=" + this.logContainerConfig + + "\n missingTopicsFatal=" + this.missingTopicsFatal + "\n idleBeforeDataMultiplier=" + this.idleBeforeDataMultiplier + + "\n idleBetweenPolls=" + this.idleBetweenPolls + "\n micrometerEnabled=" + this.micrometerEnabled + "\n observationEnabled=" + this.observationEnabled + (this.observationConvention != null ? "\n observationConvention=" + this.observationConvention : "") + + (this.observationRegistry != null + ? "\n observationRegistry=" + this.observationRegistry + : "") + "\n restartAfterAuthExceptions=" + this.restartAfterAuthExceptions + "\n]"; } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java index 54400edb70..924e4ebceb 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -64,6 +64,8 @@ * * @author Gary Russell * @author Tomaz Fernandes + * @author Watlas R + * @author Borahm Lee * @since 2.2 * */ @@ -75,7 +77,7 @@ public class DeadLetterPublishingRecoverer extends ExceptionClassifier implement protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR private static final BiFunction, Exception, TopicPartition> - DEFAULT_DESTINATION_RESOLVER = (cr, e) -> new TopicPartition(cr.topic() + ".DLT", cr.partition()); + DEFAULT_DESTINATION_RESOLVER = (cr, e) -> new TopicPartition(cr.topic() + "-dlt", cr.partition()); private static final long FIVE = 5L; @@ -570,7 +572,7 @@ private void sendOrThrow(ProducerRecord outRecord, private void maybeThrow(ConsumerRecord record, Exception exception) { String message = String.format("No destination returned for record %s and exception %s. " + - "failIfNoDestinationReturned: %s", KafkaUtils.format(record), exception, + "throwIfNoDestinationReturned: %s", KafkaUtils.format(record), exception, this.throwIfNoDestinationReturned); this.logger.warn(message); if (this.throwIfNoDestinationReturned) { @@ -1364,6 +1366,7 @@ public ExceptionInfo exceptionCauseFqcn(String exceptionCauseFqcn) { this.exceptionCauseFqcn = exceptionCauseFqcn; return this; } + /** * Sets the name of the header that will be used to store the keyExceptionMessage * of the original record. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessor.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessor.java index f14114f0f8..7f422bde3e 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessor.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessor.java @@ -51,6 +51,7 @@ * @author Gary Russell * @author Francois Rosiere * @author Wang Zhiyang + * @author Sanghyeok An * * @since 1.3.5 * @@ -210,7 +211,7 @@ public void processBatch(ConsumerRecords records, List offsets = new HashMap<>(); records.forEach(rec -> offsets.put(new TopicPartition(rec.topic(), rec.partition()), ListenerUtils.createOffsetAndMetadata(container, rec.offset() + 1))); - if (offsets.size() > 0 && this.kafkaTemplate != null && this.kafkaTemplate.isTransactional()) { + if (!offsets.isEmpty() && this.kafkaTemplate != null && this.kafkaTemplate.isTransactional()) { this.kafkaTemplate.sendOffsetsToTransaction(offsets, consumer.groupMetadata()); } clearThreadState(); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListener.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListener.java new file mode 100644 index 0000000000..b35fbb371d --- /dev/null +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListener.java @@ -0,0 +1,64 @@ +/* + * Copyright 2021-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.listener; + +import java.nio.ByteBuffer; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.header.internals.RecordHeader; + +import org.springframework.kafka.support.KafkaHeaders; + +/** + * The DeliveryAttemptAwareRetryListener class for {@link RetryListener} implementations. + * The DeliveryAttemptAwareRetryListener adds the {@link KafkaHeaders}.DELIVERY_ATTEMPT header + * to the record's headers when batch records fail and are retried. + * Note that DeliveryAttemptAwareRetryListener modifies the headers of the original record. + * + * @author Sanghyeok An + * @since 3.3 + */ + +public class DeliveryAttemptAwareRetryListener implements RetryListener { + + @Override + public void failedDelivery(ConsumerRecord record, Exception ex, int deliveryAttempt) { + // Pass + } + + /** + * Invoke after delivery failure for batch records. + * If the {@link KafkaHeaders}.DELIVERY_ATTEMPT header already exists in the {@link ConsumerRecord}'s headers, + * it will be removed. Then, the provided `deliveryAttempt` is added to the {@link ConsumerRecord}'s headers. + * @param records the records. + * @param ex the exception. + * @param deliveryAttempt the delivery attempt, if available. + */ + @Override + public void failedDelivery(ConsumerRecords records, Exception ex, int deliveryAttempt) { + for (ConsumerRecord record : records) { + record.headers().remove(KafkaHeaders.DELIVERY_ATTEMPT); + + byte[] buff = new byte[4]; // NOSONAR (magic #) + ByteBuffer bb = ByteBuffer.wrap(buff); + bb.putInt(deliveryAttempt); + record.headers().add(new RecordHeader(KafkaHeaders.DELIVERY_ATTEMPT, buff)); + } + } + +} diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ErrorHandlingUtils.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ErrorHandlingUtils.java index 79d02a1183..ed3a7d2db7 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ErrorHandlingUtils.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ErrorHandlingUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -49,6 +49,8 @@ * @author Gary Russell * @author Andrii Pelesh * @author Antonio Tomac + * @author Wang Zhiyang + * * @since 2.8 * */ @@ -245,7 +247,7 @@ public static Exception findRootCause(Exception exception) { * @since 3.0.10 */ public static boolean checkDeserializer(ConsumerFactory consumerFactory, - Properties consumerOverrides, boolean isValue, ClassLoader classLoader) { + Properties consumerOverrides, boolean isValue, @Nullable ClassLoader classLoader) { Object deser = findDeserializerClass(consumerFactory, consumerOverrides, isValue); Class deserializer = null; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/FailedBatchProcessor.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/FailedBatchProcessor.java index 3ee7da9f90..343b40b1b8 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/FailedBatchProcessor.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/FailedBatchProcessor.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -44,12 +44,12 @@ * the listener throws a {@link BatchListenerFailedException}, the offsets prior to the * failed record are committed and the remaining records have seeks performed. When the * retries are exhausted, the failed record is sent to the recoverer instead of being - * included in the seeks. If other exceptions are thrown processing is delegated to the - * fallback handler. + * included in the seeks. If other exceptions are thrown, the fallback handler takes the processing. * * @author Gary Russell * @author Francois Rosiere * @author Wang Zhiyang + * @author Artem Bilan * @since 2.8 * */ @@ -63,10 +63,10 @@ public abstract class FailedBatchProcessor extends FailedRecordProcessor { * Construct an instance with the provided properties. * @param recoverer the recoverer. * @param backOff the back off. - * @param fallbackHandler the fall back handler. + * @param fallbackHandler the fallback handler. */ public FailedBatchProcessor(@Nullable BiConsumer, Exception> recoverer, BackOff backOff, - CommonErrorHandler fallbackHandler) { + CommonErrorHandler fallbackHandler) { this(recoverer, backOff, null, fallbackHandler); } @@ -76,11 +76,11 @@ public FailedBatchProcessor(@Nullable BiConsumer, Exception * @param recoverer the recoverer. * @param backOff the back off. * @param backOffHandler the {@link BackOffHandler} - * @param fallbackHandler the fall back handler. + * @param fallbackHandler the fallback handler. * @since 2.9 */ public FailedBatchProcessor(@Nullable BiConsumer, Exception> recoverer, BackOff backOff, - @Nullable BackOffHandler backOffHandler, CommonErrorHandler fallbackHandler) { + @Nullable BackOffHandler backOffHandler, CommonErrorHandler fallbackHandler) { super(recoverer, backOff, backOffHandler); this.fallbackBatchHandler = fallbackHandler; @@ -103,7 +103,7 @@ public void setLogLevel(Level logLevel) { } /** - * Set to false to not reclassify the exception if different from the previous + * Set to {@code false} to not reclassify the exception if different from the previous * failure. If the changed exception is classified as retryable, the existing back off * sequence is used; a new sequence is not started. Default true. Only applies when * the fallback batch error handler (for exceptions other than @@ -195,7 +195,7 @@ private void fallback(Exception thrownException, ConsumerRecords data, Con this.fallbackBatchHandler.handleBatch(thrownException, data, consumer, container, invokeListener); } - private int findIndex(ConsumerRecords data, ConsumerRecord record) { + private int findIndex(ConsumerRecords data, @Nullable ConsumerRecord record) { if (record == null) { return -1; } @@ -229,11 +229,18 @@ private ConsumerRecords seekOrRecover(Exception thrownException, @N remaining.add(datum); } } - if (offsets.size() > 0) { - commit(consumer, container, offsets); + + try { + if (!offsets.isEmpty()) { + commit(consumer, container, offsets); + } } + catch (Exception ex) { + // Ignore and follow with seek below + } + if (isSeekAfterError()) { - if (remaining.size() > 0) { + if (!remaining.isEmpty()) { SeekUtils.seekOrRecover(thrownException, remaining, consumer, container, false, getFailureTracker(), this.logger, getLogLevel()); ConsumerRecord recovered = remaining.get(0); @@ -247,7 +254,7 @@ private ConsumerRecords seekOrRecover(Exception thrownException, @N return ConsumerRecords.empty(); } else { - if (remaining.size() > 0) { + if (!remaining.isEmpty()) { try { if (getFailureTracker().recovered(remaining.get(0), thrownException, container, consumer)) { @@ -275,7 +282,7 @@ private ConsumerRecords seekOrRecover(Exception thrownException, @N } } - private void commit(Consumer consumer, MessageListenerContainer container, + private static void commit(Consumer consumer, MessageListenerContainer container, Map offsets) { ContainerProperties properties = container.getContainerProperties(); @@ -292,7 +299,7 @@ private void commit(Consumer consumer, MessageListenerContainer container, } @Nullable - private BatchListenerFailedException getBatchListenerFailedException(Throwable throwableArg) { + private static BatchListenerFailedException getBatchListenerFailedException(@Nullable Throwable throwableArg) { if (throwableArg == null || throwableArg instanceof BatchListenerFailedException) { return (BatchListenerFailedException) throwableArg; } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/KafkaMessageListenerContainer.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/KafkaMessageListenerContainer.java index 0b57ac238f..fe18836a62 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/KafkaMessageListenerContainer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/KafkaMessageListenerContainer.java @@ -38,6 +38,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.CountDownLatch; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledFuture; @@ -48,6 +49,8 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +import io.micrometer.observation.Observation; +import io.micrometer.observation.ObservationRegistry; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.consumer.CommitFailedException; import org.apache.kafka.clients.consumer.Consumer; @@ -73,8 +76,8 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; +import org.springframework.aop.support.AopUtils; import org.springframework.beans.BeanUtils; -import org.springframework.beans.factory.ObjectProvider; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationEventPublisher; import org.springframework.core.log.LogAccessor; @@ -106,6 +109,8 @@ import org.springframework.kafka.listener.ContainerProperties.AssignmentCommitOption; import org.springframework.kafka.listener.ContainerProperties.EOSMode; import org.springframework.kafka.listener.adapter.AsyncRepliesAware; +import org.springframework.kafka.listener.adapter.KafkaBackoffAwareMessageListenerAdapter; +import org.springframework.kafka.listener.adapter.RecordMessagingMessageListenerAdapter; import org.springframework.kafka.support.Acknowledgment; import org.springframework.kafka.support.KafkaHeaders; import org.springframework.kafka.support.KafkaUtils; @@ -134,10 +139,6 @@ import org.springframework.util.ObjectUtils; import org.springframework.util.StringUtils; -import io.micrometer.observation.Observation; -import io.micrometer.observation.ObservationRegistry; - - /** * Single-threaded Message listener container using the Java {@link Consumer} supporting * auto-partition assignment or user-configured assignment. @@ -163,6 +164,13 @@ * @author Daniel Gentes * @author Soby Chacko * @author Wang Zhiyang + * @author Raphael Rösch + * @author Christian Mergenthaler + * @author Mikael Carlstedt + * @author Borahm Lee + * @author Lokesh Alamuri + * @author Sanghyeok An + * @author Christian Fredriksson */ public class KafkaMessageListenerContainer // NOSONAR line count extends AbstractMessageListenerContainer implements ConsumerPauseResumeEventPublisher { @@ -173,8 +181,6 @@ public class KafkaMessageListenerContainer // NOSONAR line count private static final String RAWTYPES = "rawtypes"; - private static final int DEFAULT_ACK_TIME = 5000; - private static final Map CONSUMER_CONFIG_DEFAULTS = ConsumerConfig.configDef().defaultValues(); private final AbstractMessageListenerContainer thisOrParentContainer; @@ -277,13 +283,8 @@ public Collection getAssignedPartitions() { else if (partitionsListenerConsumer.assignedPartitions != null) { return Collections.unmodifiableCollection(partitionsListenerConsumer.assignedPartitions); } - else { - return null; - } - } - else { - return null; } + return null; } @Override @@ -293,9 +294,7 @@ public Map> getAssignmentsByClientId() { if (partitionsListenerConsumer != null) { return Collections.singletonMap(partitionsListenerConsumer.getClientId(), getAssignedPartitions()); } - else { - return null; - } + return null; } @Override @@ -305,8 +304,7 @@ public boolean isContainerPaused() { @Override public boolean isPartitionPaused(TopicPartition topicPartition) { - return this.listenerConsumer != null && this.listenerConsumer - .isPartitionPaused(topicPartition); + return this.listenerConsumer != null && this.listenerConsumer.isPartitionPaused(topicPartition); } @Override @@ -317,33 +315,28 @@ public boolean isInExpectedState() { @Override public void enforceRebalance() { this.thisOrParentContainer.enforceRebalanceRequested.set(true); - KafkaMessageListenerContainer.ListenerConsumer consumer = this.listenerConsumer; - if (consumer != null) { - consumer.wakeIfNecessary(); - } + consumerWakeIfNecessary(); } @Override public void pause() { super.pause(); - KafkaMessageListenerContainer.ListenerConsumer consumer = this.listenerConsumer; - if (consumer != null) { - consumer.wakeIfNecessary(); - } + consumerWakeIfNecessary(); } @Override public void resume() { super.resume(); - KafkaMessageListenerContainer.ListenerConsumer consumer = this.listenerConsumer; - if (consumer != null) { - consumer.wakeIfNecessary(); - } + consumerWakeIfNecessary(); } @Override public void resumePartition(TopicPartition topicPartition) { super.resumePartition(topicPartition); + consumerWakeIfNecessary(); + } + + private void consumerWakeIfNecessary() { KafkaMessageListenerContainer.ListenerConsumer consumer = this.listenerConsumer; if (consumer != null) { consumer.wakeIfNecessary(); @@ -369,7 +362,6 @@ protected void doStart() { checkTopics(); } ContainerProperties containerProperties = getContainerProperties(); - checkAckMode(containerProperties); Object messageListener = containerProperties.getMessageListener(); AsyncTaskExecutor consumerExecutor = containerProperties.getListenerTaskExecutor(); @@ -380,14 +372,15 @@ protected void doStart() { } GenericMessageListener listener = (GenericMessageListener) messageListener; ListenerType listenerType = determineListenerType(listener); - ObservationRegistry observationRegistry = ObservationRegistry.NOOP; - ApplicationContext applicationContext = getApplicationContext(); - if (applicationContext != null && containerProperties.isObservationEnabled()) { - ObjectProvider registry = - applicationContext.getBeanProvider(ObservationRegistry.class); - ObservationRegistry reg = registry.getIfUnique(); - if (reg != null) { - observationRegistry = reg; + ObservationRegistry observationRegistry = containerProperties.getObservationRegistry(); + if (observationRegistry.isNoop()) { + ApplicationContext applicationContext = getApplicationContext(); + if (applicationContext != null && containerProperties.isObservationEnabled()) { + ObservationRegistry reg = applicationContext.getBeanProvider(ObservationRegistry.class) + .getIfUnique(); + if (reg != null) { + observationRegistry = reg; + } } } this.listenerConsumer = new ListenerConsumer(listener, listenerType, observationRegistry); @@ -408,29 +401,12 @@ protected void doStart() { } } - private void checkAckMode(ContainerProperties containerProperties) { - if (!this.consumerFactory.isAutoCommit()) { - AckMode ackMode = containerProperties.getAckMode(); - if (ackMode.equals(AckMode.COUNT) || ackMode.equals(AckMode.COUNT_TIME)) { - Assert.state(containerProperties.getAckCount() > 0, "'ackCount' must be > 0"); - } - if ((ackMode.equals(AckMode.TIME) || ackMode.equals(AckMode.COUNT_TIME)) - && containerProperties.getAckTime() == 0) { - containerProperties.setAckTime(DEFAULT_ACK_TIME); - } - } - } - private ListenerType determineListenerType(GenericMessageListener listener) { - ListenerType listenerType = ListenerUtils.determineListenerType(listener); - if (listener instanceof DelegatingMessageListener) { - Object delegating = listener; - while (delegating instanceof DelegatingMessageListener dml) { - delegating = dml.getDelegate(); - } - listenerType = ListenerUtils.determineListenerType(delegating); + Object delegating = listener; + while (delegating instanceof DelegatingMessageListener dml) { + delegating = dml.getDelegate(); } - return listenerType; + return ListenerUtils.determineListenerType(delegating); } @Override @@ -625,7 +601,6 @@ public String toString() { + "]"; } - private final class ListenerConsumer implements SchedulingAwareRunnable, ConsumerSeekCallback { private static final String COMMITTING = "Committing: "; @@ -648,7 +623,7 @@ private final class ListenerConsumer implements SchedulingAwareRunnable, Consume private final Consumer consumer; - private final Map> offsets = new LinkedHashMap<>(); + private final Map offsets = new LinkedHashMap<>(); private final Collection assignedPartitions = new LinkedHashSet<>(); @@ -678,12 +653,16 @@ private final class ListenerConsumer implements SchedulingAwareRunnable, Consume private final boolean autoCommit; + private final AckMode ackMode; + private final boolean isManualAck; private final boolean isCountAck; private final boolean isTimeOnlyAck; + private final boolean isTimeAck; + private final boolean isManualImmediateAck; private final boolean isAnyManualAck; @@ -696,16 +675,20 @@ private final class ListenerConsumer implements SchedulingAwareRunnable, Consume private final CommonErrorHandler commonErrorHandler; - private final PlatformTransactionManager transactionManager = this.containerProperties.getTransactionManager(); + @Deprecated(since = "3.2", forRemoval = true) + @SuppressWarnings("removal") + private final PlatformTransactionManager transactionManager = + this.containerProperties.getKafkaAwareTransactionManager() != null ? + this.containerProperties.getKafkaAwareTransactionManager() : + this.containerProperties.getTransactionManager(); - @SuppressWarnings(RAWTYPES) - private final KafkaAwareTransactionManager kafkaTxManager = - this.transactionManager instanceof KafkaAwareTransactionManager - ? ((KafkaAwareTransactionManager) this.transactionManager) : null; + private final KafkaAwareTransactionManager kafkaTxManager = + this.transactionManager instanceof KafkaAwareTransactionManager kafkaAwareTransactionManager ? + kafkaAwareTransactionManager : null; private final TransactionTemplate transactionTemplate; - private final String consumerGroupId = getGroupId(); + private final String consumerGroupId = KafkaMessageListenerContainer.this.getGroupId(); private final TaskScheduler taskScheduler; @@ -758,9 +741,11 @@ private final class ListenerConsumer implements SchedulingAwareRunnable, Consume private final MicrometerHolder micrometerHolder; + private final boolean observationEnabled; + private final AtomicBoolean polling = new AtomicBoolean(); - private final boolean subBatchPerPartition; + private final boolean subBatchPerPartition = this.containerProperties.isSubBatchPerPartition(); private final Duration authExceptionRetryInterval = this.containerProperties.getAuthExceptionRetryInterval(); @@ -840,8 +825,6 @@ private final class ListenerConsumer implements SchedulingAwareRunnable, Consume private Producer producer; - private boolean commitRecovered; - private boolean wasIdle; private boolean batchFailed; @@ -862,29 +845,27 @@ private final class ListenerConsumer implements SchedulingAwareRunnable, Consume private volatile long lastPoll = System.currentTimeMillis(); + private final ConcurrentLinkedDeque> failedRecords = new ConcurrentLinkedDeque<>(); + @SuppressWarnings(UNCHECKED) ListenerConsumer(GenericMessageListener listener, ListenerType listenerType, ObservationRegistry observationRegistry) { this.asyncReplies = listener instanceof AsyncRepliesAware hmd && hmd.isAsyncReplies() || this.containerProperties.isAsyncAcks(); - AckMode ackMode = determineAckMode(); - this.isManualAck = ackMode.equals(AckMode.MANUAL); - this.isCountAck = ackMode.equals(AckMode.COUNT) - || ackMode.equals(AckMode.COUNT_TIME); - this.isTimeOnlyAck = ackMode.equals(AckMode.TIME); - this.isManualImmediateAck = - ackMode.equals(AckMode.MANUAL_IMMEDIATE); + this.ackMode = determineAckMode(); + this.isCountAck = AckMode.COUNT.equals(this.ackMode) + || AckMode.COUNT_TIME.equals(this.ackMode); + this.isTimeOnlyAck = AckMode.TIME.equals(this.ackMode); + this.isTimeAck = this.isTimeOnlyAck + || AckMode.COUNT_TIME.equals(this.ackMode); + this.isManualAck = AckMode.MANUAL.equals(this.ackMode); + this.isManualImmediateAck = AckMode.MANUAL_IMMEDIATE.equals(this.ackMode); this.isAnyManualAck = this.isManualAck || this.isManualImmediateAck; - this.isRecordAck = ackMode.equals(AckMode.RECORD); - this.offsetsInThisBatch = - this.isAnyManualAck && this.asyncReplies - ? new ConcurrentHashMap<>() - : null; - this.deferredOffsets = - this.isAnyManualAck && this.asyncReplies - ? new ConcurrentHashMap<>() - : null; + this.isRecordAck = this.ackMode.equals(AckMode.RECORD); + boolean isOutOfCommit = this.isAnyManualAck && this.asyncReplies; + this.offsetsInThisBatch = isOutOfCommit ? new ConcurrentHashMap<>() : null; + this.deferredOffsets = isOutOfCommit ? new ConcurrentHashMap<>() : null; this.observationRegistry = observationRegistry; Properties consumerProperties = propertiesFromConsumerPropertyOverrides(); @@ -912,6 +893,7 @@ private final class ListenerConsumer implements SchedulingAwareRunnable, Consume this.isBatchListener = true; this.wantsFullRecords = this.batchListener.wantsPollResult(); this.pollThreadStateProcessor = setUpPollProcessor(true); + this.observationEnabled = false; } else if (listener instanceof MessageListener) { this.listener = (MessageListener) listener; @@ -919,6 +901,17 @@ else if (listener instanceof MessageListener) { this.isBatchListener = false; this.wantsFullRecords = false; this.pollThreadStateProcessor = setUpPollProcessor(false); + this.observationEnabled = this.containerProperties.isObservationEnabled(); + + if (!AopUtils.isAopProxy(this.genericListener) && + this.genericListener instanceof KafkaBackoffAwareMessageListenerAdapter) { + KafkaBackoffAwareMessageListenerAdapter genListener = + (KafkaBackoffAwareMessageListenerAdapter) this.genericListener; + if (genListener.getDelegate() instanceof RecordMessagingMessageListenerAdapter adapterListener) { + // This means that the async retry feature is supported only for SingleRecordListener with @RetryableTopic. + adapterListener.setCallbackForAsyncFailure(this::callbackForAsyncFailure); + } + } } else { throw new IllegalArgumentException("Listener must be one of 'MessageListener', " @@ -968,11 +961,14 @@ else if (listener instanceof MessageListener) { this.maxPollInterval = obtainMaxPollInterval(consumerProperties); this.micrometerHolder = obtainMicrometerHolder(); this.deliveryAttemptAware = setupDeliveryAttemptAware(); - this.subBatchPerPartition = setupSubBatchPerPartition(); this.lastReceivePartition = new HashMap<>(); this.lastAlertPartition = new HashMap<>(); this.wasIdlePartition = new HashMap<>(); this.kafkaAdmin = obtainAdmin(); + + if (isListenerAdapterObservationAware()) { + ((RecordMessagingMessageListenerAdapter) this.listener).setObservationRegistry(observationRegistry); + } } private AckMode determineAckMode() { @@ -999,7 +995,7 @@ private Object determineBootstrapServers(Properties consumerProperties) { @Nullable private KafkaAdmin obtainAdmin() { KafkaAdmin customAdmin = KafkaMessageListenerContainer.this.thisOrParentContainer.getKafkaAdmin(); - if (customAdmin == null && this.containerProperties.isObservationEnabled()) { + if (customAdmin == null && this.observationEnabled) { ApplicationContext applicationContext = getApplicationContext(); if (applicationContext != null) { KafkaAdmin admin = applicationContext.getBeanProvider(KafkaAdmin.class).getIfUnique(); @@ -1079,14 +1075,6 @@ private void checkGroupInstance(Properties properties, ConsumerFactory con } } - private boolean setupSubBatchPerPartition() { - Boolean subBatching = this.containerProperties.getSubBatchPerPartition(); - if (subBatching != null) { - return subBatching; - } - return false; - } - @Nullable private DeliveryAttemptAware setupDeliveryAttemptAware() { DeliveryAttemptAware aware = null; @@ -1201,8 +1189,7 @@ else if (autoCommitOverride != null) { isAutoCommit = KafkaMessageListenerContainer.this.consumerFactory.isAutoCommit(); } Assert.state(!this.isAnyManualAck || !isAutoCommit, - () -> "Consumer cannot be configured for auto commit for ackMode " - + this.containerProperties.getAckMode()); + () -> "Consumer cannot be configured for auto commit for ackMode " + this.ackMode); return isAutoCommit; } @@ -1241,6 +1228,10 @@ else if (timeout instanceof String str) { } } + private boolean isListenerAdapterObservationAware() { + return this.listener != null && RecordMessagingMessageListenerAdapter.class.equals(this.listener.getClass()); + } + private void subscribeOrAssignTopics(final Consumer subscribingConsumer) { if (KafkaMessageListenerContainer.this.topicPartitions == null) { ConsumerRebalanceListener rebalanceListener = new ListenerConsumerRebalanceListener(); @@ -1279,7 +1270,7 @@ private MicrometerHolder obtainMicrometerHolder() { MicrometerHolder holder = null; try { if (KafkaUtils.MICROMETER_PRESENT && this.containerProperties.isMicrometerEnabled() - && !this.containerProperties.isObservationEnabled()) { + && !this.observationEnabled) { Function> mergedProvider = cr -> this.containerProperties.getMicrometerTags(); @@ -1328,6 +1319,15 @@ public void run() { boolean failedAuthRetry = false; this.lastReceive = System.currentTimeMillis(); while (isRunning()) { + + try { + handleAsyncFailure(); + } + catch (Exception e) { + ListenerConsumer.this.logger.error( + "Failed to process async retry messages. skip this time, try it again next loop."); + } + try { pollAndInvoke(); if (failedAuthRetry) { @@ -1398,11 +1398,12 @@ protected void initialize() { } publishConsumerStartingEvent(); this.consumerThread = Thread.currentThread(); - setupSeeks(); KafkaUtils.setConsumerGroupId(this.consumerGroupId); + setupSeeks(); this.count = 0; this.last = System.currentTimeMillis(); initAssignedPartitions(); + KafkaMessageListenerContainer.this.thisOrParentContainer.childStarted(KafkaMessageListenerContainer.this); publishConsumerStartedEvent(); } @@ -1468,6 +1469,33 @@ protected void pollAndInvoke() { } } + protected void handleAsyncFailure() { + List> copyFailedRecords = new ArrayList<>(this.failedRecords); + + // If we use failedRecords.clear() to remove copied record from failed records, + // We may encounter race condition during this operation. + // Other, the thread which execute this block, may miss one failed record. + int capturedRecordsCount = copyFailedRecords.size(); + for (int i = 0; i < capturedRecordsCount; i++) { + this.failedRecords.pollFirst(); + } + + // If any copied and failed record fails to complete due to an unexpected error, + // We will give up on retrying with the remaining copied and failed Records. + for (FailedRecordTuple copyFailedRecord : copyFailedRecords) { + try { + invokeErrorHandlerBySingleRecord(copyFailedRecord); + } + catch (Exception e) { + this.logger.warn(() -> + "Async failed record failed to complete, thus skip it. record :" + + copyFailedRecord.toString() + + ", Exception : " + + e.getMessage()); + } + } + } + private void doProcessCommits() { if (!this.autoCommit && !this.isRecordAck) { try { @@ -1578,7 +1606,7 @@ private void fixTxOffsetsIfNeeded() { this.lastCommits.forEach((tp, oamd) -> { long position = this.consumer.position(tp); Long saved = this.savedPositions.get(tp); - if (saved != null && saved.longValue() != position) { + if (saved != null && saved != position) { this.logger.debug(() -> "Skipping TX offset correction - seek(s) have been performed; " + "saved: " + this.savedPositions + ", " + "committed: " + oamd + ", " @@ -1592,18 +1620,11 @@ private void fixTxOffsetsIfNeeded() { if (!toFix.isEmpty()) { this.logger.debug(() -> "Fixing TX offsets: " + toFix); if (this.kafkaTxManager == null) { - if (this.syncCommits) { - commitSync(toFix); - } - else { - commitAsync(toFix); - } + commitOffsets(toFix); } else { this.transactionTemplate.executeWithoutResult(status -> { - doSendOffsets(((KafkaResourceHolder) TransactionSynchronizationManager - .getResource(this.kafkaTxManager.getProducerFactory())) - .getProducer(), toFix); + doSendOffsets(getTxProducer(), toFix); }); } } @@ -1949,7 +1970,7 @@ private void wrapUp(@Nullable Throwable throwable) { this.consumerSeekAwareListener.onPartitionsRevoked(partitions); this.consumerSeekAwareListener.unregisterSeekCallback(); } - this.logger.info(() -> getGroupId() + ": Consumer stopped"); + this.logger.info(() -> this.consumerGroupId + ": Consumer stopped"); publishConsumerStoppedEvent(throwable); } @@ -2080,7 +2101,7 @@ private synchronized void ackInOrder(ConsumerRecord cRecord) { offs.remove(0); ConsumerRecord recordToAck = cRecord; if (!deferred.isEmpty()) { - Collections.sort(deferred, (a, b) -> Long.compare(a.offset(), b.offset())); + deferred.sort((a, b) -> Long.compare(a.offset(), b.offset())); while (!ObjectUtils.isEmpty(deferred) && deferred.get(0).offset() == recordToAck.offset() + 1) { recordToAck = deferred.remove(0); offs.remove(0); @@ -2107,19 +2128,8 @@ else if (cRecord.offset() < offs.get(0)) { } private void ackImmediate(ConsumerRecord cRecord) { - Map commits = Collections.singletonMap( - new TopicPartition(cRecord.topic(), cRecord.partition()), - createOffsetAndMetadata(cRecord.offset() + 1)); - this.commitLogger.log(() -> COMMITTING + commits); - if (this.producer != null) { - doSendOffsets(this.producer, commits); - } - else if (this.syncCommits) { - commitSync(commits); - } - else { - commitAsync(commits); - } + Map commits = buildSingleCommits(cRecord); + commitOffsetsInTransactions(commits); } private void ackImmediate(ConsumerRecords records) { @@ -2128,25 +2138,7 @@ private void ackImmediate(ConsumerRecords records) { commits.put(part, createOffsetAndMetadata(records.records(part) .get(records.records(part).size() - 1).offset() + 1)); } - this.commitLogger.log(() -> COMMITTING + commits); - if (this.producer != null) { - doSendOffsets(this.producer, commits); - } - else if (this.syncCommits) { - commitSync(commits); - } - else { - commitAsync(commits); - } - } - - private void commitAsync(Map commits) { - this.consumer.commitAsync(commits, (offsetsAttempted, exception) -> { - this.commitCallback.onComplete(offsetsAttempted, exception); - if (exception == null && this.fixTxOffsets) { - this.lastCommits.putAll(commits); - } - }); + commitOffsetsInTransactions(commits); } private void invokeListener(final ConsumerRecords records) { @@ -2177,7 +2169,6 @@ private void invokeBatchListener(final ConsumerRecords recordsArg) { } } - @SuppressWarnings(RAWTYPES) private void invokeBatchListenerInTx(final ConsumerRecords records, @Nullable final List> recordList) { @@ -2187,9 +2178,7 @@ private void invokeBatchListenerInTx(final ConsumerRecords records, @Override public void doInTransactionWithoutResult(TransactionStatus s) { if (ListenerConsumer.this.kafkaTxManager != null) { - ListenerConsumer.this.producer = ((KafkaResourceHolder) TransactionSynchronizationManager - .getResource(ListenerConsumer.this.kafkaTxManager.getProducerFactory())) - .getProducer(); // NOSONAR nullable + ListenerConsumer.this.producer = getTxProducer(); } RuntimeException aborted = doInvokeBatchListener(records, recordList); if (aborted != null) { @@ -2264,12 +2253,8 @@ private List> createRecordList(final ConsumerRecords @Nullable private RuntimeException doInvokeBatchListener(final ConsumerRecords records, // NOSONAR List> recordList) { - - Object sample = startMicrometerSample(); try { invokeBatchOnMessage(records, recordList); - batchInterceptAfter(records, null); - successTimer(sample, null); if (this.batchFailed) { this.batchFailed = false; if (this.commonErrorHandler != null) { @@ -2282,16 +2267,17 @@ private RuntimeException doInvokeBatchListener(final ConsumerRecords recor } } catch (RuntimeException e) { - failureTimer(sample, null); - batchInterceptAfter(records, e); if (this.commonErrorHandler == null) { throw e; } try { - this.batchFailed = true; invokeBatchErrorHandler(records, recordList, e); commitOffsetsIfNeededAfterHandlingError(records); } + catch (RecordInRetryException rire) { + this.logger.info("Record in retry and not yet recovered"); + return rire; + } catch (KafkaException ke) { ke.selfLog(ERROR_HANDLER_THREW_AN_EXCEPTION, this.logger); return ke; @@ -2316,9 +2302,7 @@ private void commitOffsetsIfNeededAfterHandlingError(final ConsumerRecords || this.producer != null) { if (this.remainingRecords != null) { ConsumerRecord firstUncommitted = this.remainingRecords.iterator().next(); - Iterator> it = records.iterator(); - while (it.hasNext()) { - ConsumerRecord next = it.next(); + for (ConsumerRecord next : records) { if (!next.equals(firstUncommitted)) { this.acks.add(next); } @@ -2438,15 +2422,26 @@ private void invokeBatchOnMessageWithRecordsOrList(final ConsumerRecords r recordList = createRecordList(records); } } - if (this.wantsFullRecords) { - this.batchListener.onMessage(records, // NOSONAR - this.isAnyManualAck - ? new ConsumerBatchAcknowledgment(records, recordList) - : null, - this.consumer); + Object sample = startMicrometerSample(); + try { + if (this.wantsFullRecords) { + this.batchListener.onMessage(records, // NOSONAR + this.isAnyManualAck + ? new ConsumerBatchAcknowledgment(records, recordList) + : null, + this.consumer); + } + else { + doInvokeBatchOnMessage(records, recordList); // NOSONAR + } + batchInterceptAfter(records, null); + successTimer(sample, null); } - else { - doInvokeBatchOnMessage(records, recordList); // NOSONAR + catch (RuntimeException e) { + this.batchFailed = true; + failureTimer(sample, null); + batchInterceptAfter(records, e); + throw e; } } @@ -2508,7 +2503,6 @@ private void invokeRecordListener(final ConsumerRecords records) { * Invoke the listener with each record in a separate transaction. * @param records the records. */ - @SuppressWarnings(RAWTYPES) // NOSONAR complexity private void invokeRecordListenerInTx(final ConsumerRecords records) { Iterator> iterator = records.iterator(); while (iterator.hasNext()) { @@ -2553,9 +2547,7 @@ private void invokeInTransaction(Iterator> iterator, final @Override public void doInTransactionWithoutResult(TransactionStatus s) { if (ListenerConsumer.this.kafkaTxManager != null) { - ListenerConsumer.this.producer = ((KafkaResourceHolder) TransactionSynchronizationManager - .getResource(ListenerConsumer.this.kafkaTxManager.getProducerFactory())) - .getProducer(); // NOSONAR + ListenerConsumer.this.producer = getTxProducer(); } RuntimeException aborted = doInvokeRecordListener(cRecord, iterator); if (aborted != null) { @@ -2658,6 +2650,7 @@ private ConsumerRecords checkEarlyIntercept(ConsumerRecords nextArg) catch (InterruptedException e) { Thread.currentThread().interrupt(); } + this.earlyBatchInterceptor.success(nextArg, this.consumer); } } return next; @@ -2673,6 +2666,8 @@ private ConsumerRecord checkEarlyIntercept(ConsumerRecord recordArg) this.logger.debug(() -> "RecordInterceptor returned null, skipping: " + KafkaUtils.format(recordArg)); ackCurrent(recordArg); + this.earlyRecordInterceptor.success(recordArg, this.consumer); + this.earlyRecordInterceptor.afterRecord(recordArg, this.consumer); } } return cRecord; @@ -2744,6 +2739,13 @@ private void pauseForNackSleep() { this.nackSleepDurationMillis = -1; } + @SuppressWarnings(RAWTYPES) + private Producer getTxProducer() { + return ((KafkaResourceHolder) TransactionSynchronizationManager + .getResource(ListenerConsumer.this.kafkaTxManager.getProducerFactory())) + .getProducer(); // NOSONAR + } + /** * Actually invoke the listener. * @param cRecord the record. @@ -2759,59 +2761,69 @@ private RuntimeException doInvokeRecordListener(final ConsumerRecord cReco Observation observation = KafkaListenerObservation.LISTENER_OBSERVATION.observation( this.containerProperties.getObservationConvention(), DefaultKafkaListenerObservationConvention.INSTANCE, - () -> new KafkaRecordReceiverContext(cRecord, getListenerId(), this::clusterId), + () -> new KafkaRecordReceiverContext(cRecord, getListenerId(), getClientId(), this.consumerGroupId, + this::clusterId), this.observationRegistry); - return observation.observe(() -> { + + observation.start(); + Observation.Scope observationScope = observation.openScope(); + // We cannot use 'try-with-resource' because the resource is closed just before catch block + try { + invokeOnMessage(cRecord); + successTimer(sample, cRecord); + recordInterceptAfter(cRecord, null); + } + catch (RuntimeException e) { + failureTimer(sample, cRecord); + recordInterceptAfter(cRecord, e); + if (!isListenerAdapterObservationAware()) { + observation.error(e); + } + if (this.commonErrorHandler == null) { + throw e; + } try { - invokeOnMessage(cRecord); - successTimer(sample, cRecord); - recordInterceptAfter(cRecord, null); + invokeErrorHandler(cRecord, iterator, e); + commitOffsetsIfNeededAfterHandlingError(cRecord); } - catch (RuntimeException e) { - failureTimer(sample, cRecord); - recordInterceptAfter(cRecord, e); - if (this.commonErrorHandler == null) { - throw e; - } - try { - invokeErrorHandler(cRecord, iterator, e); - commitOffsetsIfNeededAfterHandlingError(cRecord); - } - catch (KafkaException ke) { - ke.selfLog(ERROR_HANDLER_THREW_AN_EXCEPTION, this.logger); - return ke; - } - catch (RuntimeException ee) { - this.logger.error(ee, ERROR_HANDLER_THREW_AN_EXCEPTION); - return ee; - } - catch (Error er) { // NOSONAR - this.logger.error(er, "Error handler threw an error"); - throw er; - } + catch (RecordInRetryException rire) { + this.logger.info("Record in retry and not yet recovered"); + return rire; } - return null; - }); + catch (KafkaException ke) { + ke.selfLog(ERROR_HANDLER_THREW_AN_EXCEPTION, this.logger); + return ke; + } + catch (RuntimeException ee) { + this.logger.error(ee, ERROR_HANDLER_THREW_AN_EXCEPTION); + return ee; + } + catch (Error er) { // NOSONAR + this.logger.error(er, "Error handler threw an error"); + throw er; + } + } + finally { + if (!isListenerAdapterObservationAware()) { + observation.stop(); + } + observationScope.close(); + } + return null; } private void commitOffsetsIfNeededAfterHandlingError(final ConsumerRecord cRecord) { if ((!this.autoCommit && this.commonErrorHandler.isAckAfterHandle() && this.consumerGroupId != null) || this.producer != null) { - if (this.isManualAck) { - this.commitRecovered = true; - } if (this.remainingRecords == null || !cRecord.equals(this.remainingRecords.iterator().next())) { if (this.offsetsInThisBatch != null) { // NOSONAR (sync) ackInOrder(cRecord); } else { - ackCurrent(cRecord); + ackCurrent(cRecord, this.isManualAck); } } - if (this.isManualAck) { - this.commitRecovered = false; - } } } @@ -2886,6 +2898,44 @@ private void doInvokeOnMessage(final ConsumerRecord recordArg) { } } + private void invokeErrorHandlerBySingleRecord(FailedRecordTuple failedRecordTuple) { + final ConsumerRecord cRecord = failedRecordTuple.record; + RuntimeException rte = failedRecordTuple.ex; + if (this.commonErrorHandler.seeksAfterHandling() || rte instanceof CommitFailedException) { + try { + if (this.producer == null) { + processCommits(); + } + } + catch (Exception ex) { // NO SONAR + this.logger.error(ex, "Failed to commit before handling error"); + } + List> records = new ArrayList<>(); + records.add(cRecord); + this.commonErrorHandler.handleRemaining(rte, records, this.consumer, + KafkaMessageListenerContainer.this.thisOrParentContainer); + } + else { + boolean handled = false; + try { + handled = this.commonErrorHandler.handleOne(rte, cRecord, this.consumer, + KafkaMessageListenerContainer.this.thisOrParentContainer); + } + catch (Exception ex) { + this.logger.error(ex, "ErrorHandler threw unexpected exception"); + } + Map>> records = new LinkedHashMap<>(); + if (!handled) { + records.computeIfAbsent(new TopicPartition(cRecord.topic(), cRecord.partition()), + tp -> new ArrayList<>()).add(cRecord); + } + if (!records.isEmpty()) { + this.remainingRecords = new ConsumerRecords<>(records); + this.pauseForPending = true; + } + } + } + private void invokeErrorHandler(final ConsumerRecord cRecord, Iterator> iterator, RuntimeException rte) { @@ -2918,11 +2968,11 @@ private void invokeErrorHandler(final ConsumerRecord cRecord, Map>> records = new LinkedHashMap<>(); if (!handled) { records.computeIfAbsent(new TopicPartition(cRecord.topic(), cRecord.partition()), - tp -> new ArrayList>()).add(cRecord); + tp -> new ArrayList<>()).add(cRecord); while (iterator.hasNext()) { ConsumerRecord next = iterator.next(); records.computeIfAbsent(new TopicPartition(next.topic(), next.partition()), - tp -> new ArrayList>()).add(next); + tp -> new ArrayList<>()).add(next); } } if (!records.isEmpty()) { @@ -2993,31 +3043,22 @@ public void checkDeser(final ConsumerRecord cRecord, String headerName) { } public void ackCurrent(final ConsumerRecord cRecord) { + ackCurrent(cRecord, false); + } - if (this.isRecordAck) { - Map offsetsToCommit = - Collections.singletonMap(new TopicPartition(cRecord.topic(), cRecord.partition()), - createOffsetAndMetadata(cRecord.offset() + 1)); - if (this.producer == null) { - this.commitLogger.log(() -> COMMITTING + offsetsToCommit); - if (this.syncCommits) { - commitSync(offsetsToCommit); - } - else { - commitAsync(offsetsToCommit); - } - } - else { - this.acks.add(cRecord); - } + public void ackCurrent(final ConsumerRecord cRecord, boolean commitRecovered) { + if (this.isRecordAck && this.producer == null) { + Map offsetsToCommit = buildSingleCommits(cRecord); + this.commitLogger.log(() -> COMMITTING + offsetsToCommit); + commitOffsets(offsetsToCommit); } - else if (this.producer != null - || ((!this.isAnyManualAck || this.commitRecovered) && !this.autoCommit)) { + else if (this.producer != null) { this.acks.add(cRecord); - } - if (this.producer != null) { sendOffsetsToTransaction(); } + else if (!this.autoCommit && (!this.isAnyManualAck || commitRecovered)) { + this.acks.add(cRecord); + } } private void sendOffsetsToTransaction() { @@ -3027,8 +3068,10 @@ private void sendOffsetsToTransaction() { doSendOffsets(this.producer, commits); } - @SuppressWarnings("deprecation") private void doSendOffsets(Producer prod, Map commits) { + if (CollectionUtils.isEmpty(commits)) { + return; + } prod.sendOffsetsToTransaction(commits, this.consumer.groupMetadata()); if (this.fixTxOffsets) { this.lastCommits.putAll(commits); @@ -3038,74 +3081,86 @@ private void doSendOffsets(Producer prod, Map= this.containerProperties.getAckCount(); - if ((!this.isTimeOnlyAck && !this.isCountAck) || countExceeded) { - if (this.isCountAck) { - this.logger.debug(() -> "Committing in " + ackMode.name() + " because count " - + this.count - + " exceeds configured limit of " + this.containerProperties.getAckCount()); - } - commitIfNecessary(); - this.count = 0; - } - else { - timedAcks(ackMode); - } + if (this.isCountAck) { + countAcks(); + } + else if (this.isTimeAck) { + timedAcks(); + } + else if (!this.isManualImmediateAck) { + commitIfNecessary(); + this.count = 0; } } - private void timedAcks(AckMode ackMode) { - long now; - now = System.currentTimeMillis(); - boolean elapsed = now - this.last > this.containerProperties.getAckTime(); - if (AckMode.TIME.equals(ackMode) && elapsed) { - this.logger.debug(() -> "Committing in AckMode.TIME " + - "because time elapsed exceeds configured limit of " + - this.containerProperties.getAckTime()); + private void countAcks() { + boolean countExceeded = this.isCountAck && this.count >= this.containerProperties.getAckCount(); + if (countExceeded) { + this.logger.debug(() -> "Committing in " + this.ackMode.name() + " because count " + + this.count + + " exceeds configured limit of " + this.containerProperties.getAckCount()); commitIfNecessary(); - this.last = now; + this.count = 0; + if (AckMode.COUNT_TIME.equals(this.ackMode)) { + this.last = System.currentTimeMillis(); + } } - else if (AckMode.COUNT_TIME.equals(ackMode) && elapsed) { - this.logger.debug(() -> "Committing in AckMode.COUNT_TIME " + + } + + private void timedAcks() { + long now = System.currentTimeMillis(); + boolean elapsed = this.isTimeAck && now - this.last > this.containerProperties.getAckTime(); + if (elapsed) { + this.logger.debug(() -> "Committing in " + this.ackMode.name() + "because time elapsed exceeds configured limit of " + this.containerProperties.getAckTime()); commitIfNecessary(); this.last = now; - this.count = 0; + if (AckMode.COUNT_TIME.equals(this.ackMode)) { + this.count = 0; + } + } + } + + private boolean checkPartitionAssignedBeforeSeek(@Nullable Collection assigned, TopicPartition topicPartition) { + if (assigned != null && assigned.contains(topicPartition)) { + return true; } + this.logger.warn("No current assignment for partition '" + topicPartition + + "' due to partition reassignment prior to seeking."); + return false; } private void processSeeks() { - processTimestampSeeks(); + Collection assigned = getAssignedPartitions(); + processTimestampSeeks(assigned); TopicPartitionOffset offset = this.seeks.poll(); while (offset != null) { traceSeek(offset); try { + TopicPartition topicPartition = offset.getTopicPartition(); + if (!checkPartitionAssignedBeforeSeek(assigned, topicPartition)) { + offset = this.seeks.poll(); + continue; + } SeekPosition position = offset.getPosition(); Long whereTo = offset.getOffset(); + Function offsetComputeFunction = offset.getOffsetComputeFunction(); if (position == null) { if (offset.isRelativeToCurrent()) { - whereTo += this.consumer.position(offset.getTopicPartition()); + whereTo += this.consumer.position(topicPartition); whereTo = Math.max(whereTo, 0); } - this.consumer.seek(offset.getTopicPartition(), whereTo); - } - else if (position.equals(SeekPosition.BEGINNING)) { - this.consumer.seekToBeginning(Collections.singletonList(offset.getTopicPartition())); - if (whereTo != null) { - this.consumer.seek(offset.getTopicPartition(), whereTo); + else if (offsetComputeFunction != null) { + whereTo = offsetComputeFunction.apply(this.consumer.position(topicPartition)); } + this.consumer.seek(topicPartition, whereTo); } - else if (position.equals(SeekPosition.TIMESTAMP)) { + else if (SeekPosition.TIMESTAMP.equals(position)) { // possible late addition since the grouped processing above Map offsetsForTimes = this.consumer .offsetsForTimes( - Collections.singletonMap(offset.getTopicPartition(), offset.getOffset())); + Collections.singletonMap(topicPartition, offset.getOffset())); offsetsForTimes.forEach((tp, ot) -> { if (ot != null) { this.consumer.seek(tp, ot.offset()); @@ -3113,10 +3168,15 @@ else if (position.equals(SeekPosition.TIMESTAMP)) { }); } else { - this.consumer.seekToEnd(Collections.singletonList(offset.getTopicPartition())); + if (SeekPosition.BEGINNING.equals(position)) { + this.consumer.seekToBeginning(Collections.singletonList(topicPartition)); + } + else { + this.consumer.seekToEnd(Collections.singletonList(topicPartition)); + } if (whereTo != null) { - whereTo += this.consumer.position(offset.getTopicPartition()); - this.consumer.seek(offset.getTopicPartition(), whereTo); + whereTo += this.consumer.position(topicPartition); + this.consumer.seek(topicPartition, whereTo); } } } @@ -3128,11 +3188,15 @@ else if (position.equals(SeekPosition.TIMESTAMP)) { } } - private void processTimestampSeeks() { + private void processTimestampSeeks(@Nullable Collection assigned) { Iterator seekIterator = this.seeks.iterator(); Map timestampSeeks = null; while (seekIterator.hasNext()) { TopicPartitionOffset tpo = seekIterator.next(); + if (!checkPartitionAssignedBeforeSeek(assigned, tpo.getTopicPartition())) { + seekIterator.remove(); + continue; + } if (SeekPosition.TIMESTAMP.equals(tpo.getPosition())) { if (timestampSeeks == null) { timestampSeeks = new HashMap<>(); @@ -3178,6 +3242,7 @@ private void initPartitionsIfNeeded() { .filter(e -> SeekPosition.TIMESTAMP.equals(e.getValue().seekPosition)) .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().offset)); if (!times.isEmpty()) { + times.forEach((key, value) -> partitions.remove(key)); Map offsetsForTimes = this.consumer.offsetsForTimes(times); offsetsForTimes.forEach((tp, off) -> { if (off == null) { @@ -3192,7 +3257,7 @@ private void initPartitionsIfNeeded() { if (this.consumerSeekAwareListener != null) { this.consumerSeekAwareListener.onPartitionsAssigned(this.definedPartitions.keySet().stream() .map(tp -> new SimpleEntry<>(tp, this.consumer.position(tp))) - .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue())), + .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue)), this.seekCallback); } } @@ -3240,17 +3305,9 @@ private void logReset(TopicPartition topicPartition, long newOffset) { this.logger.debug(() -> "Reset " + topicPartition + " to offset " + newOffset); } - private void updatePendingOffsets() { - ConsumerRecord cRecord = this.acks.poll(); - while (cRecord != null) { - addOffset(cRecord); - cRecord = this.acks.poll(); - } - } - private void addOffset(ConsumerRecord cRecord) { - this.offsets.computeIfAbsent(cRecord.topic(), v -> new ConcurrentHashMap<>()) - .compute(cRecord.partition(), (k, v) -> v == null ? cRecord.offset() : Math.max(v, cRecord.offset())); + this.offsets.compute(new TopicPartition(cRecord.topic(), cRecord.partition()), + (k, v) -> v == null ? cRecord.offset() : Math.max(v, cRecord.offset())); } private void commitIfNecessary() { @@ -3259,12 +3316,7 @@ private void commitIfNecessary() { if (!commits.isEmpty()) { this.commitLogger.log(() -> COMMITTING + commits); try { - if (this.syncCommits) { - commitSync(commits); - } - else { - commitAsync(commits); - } + commitOffsets(commits); } catch (@SuppressWarnings(UNUSED) WakeupException e) { // ignore - not polling @@ -3273,6 +3325,37 @@ private void commitIfNecessary() { } } + private void commitOffsetsInTransactions(Map commits) { + this.commitLogger.log(() -> COMMITTING + commits); + if (this.producer != null) { + doSendOffsets(this.producer, commits); + } + else { + commitOffsets(commits); + } + } + + private void commitOffsets(Map commits) { + if (CollectionUtils.isEmpty(commits)) { + return; + } + if (this.syncCommits) { + commitSync(commits); + } + else { + commitAsync(commits); + } + } + + private void commitAsync(Map commits) { + this.consumer.commitAsync(commits, (offsetsAttempted, exception) -> { + this.commitCallback.onComplete(offsetsAttempted, exception); + if (exception == null && this.fixTxOffsets) { + this.lastCommits.putAll(commits); + } + }); + } + private void commitSync(Map commits) { doCommitSync(commits, 0); } @@ -3283,6 +3366,10 @@ private void doCommitSync(Map commits, int re if (this.fixTxOffsets) { this.lastCommits.putAll(commits); } + if (!this.commitsDuringRebalance.isEmpty()) { + // Remove failed commits during last rebalance that are superseded by these commits + this.commitsDuringRebalance.keySet().removeAll(commits.keySet()); + } } catch (RetriableCommitFailedException e) { if (retries >= this.containerProperties.getCommitRetries()) { @@ -3296,14 +3383,17 @@ private void doCommitSync(Map commits, int re } } + Map buildSingleCommits(ConsumerRecord cRecord) { + return Collections.singletonMap( + new TopicPartition(cRecord.topic(), cRecord.partition()), + createOffsetAndMetadata(cRecord.offset() + 1)); + } + private Map buildCommits() { Map commits = new LinkedHashMap<>(); - for (Entry> entry : this.offsets.entrySet()) { - for (Entry offset : entry.getValue().entrySet()) { - commits.put(new TopicPartition(entry.getKey(), offset.getKey()), - createOffsetAndMetadata(offset.getValue() + 1)); - } - } + this.offsets.forEach((topicPartition, offset) -> { + commits.put(topicPartition, createOffsetAndMetadata(offset + 1)); + }); this.offsets.clear(); return commits; } @@ -3318,11 +3408,20 @@ private Collection> getHighestOffsetRecords(ConsumerRecords .values(); } + private void callbackForAsyncFailure(ConsumerRecord cRecord, RuntimeException ex) { + this.failedRecords.addLast(new FailedRecordTuple<>(cRecord, ex)); + } + @Override public void seek(String topic, int partition, long offset) { this.seeks.add(new TopicPartitionOffset(topic, partition, offset)); } + @Override + public void seek(String topic, int partition, Function offsetComputeFunction) { + this.seeks.add(new TopicPartitionOffset(topic, partition, offsetComputeFunction)); + } + @Override public void seekToBeginning(String topic, int partition) { this.seeks.add(new TopicPartitionOffset(topic, partition, SeekPosition.BEGINNING)); @@ -3350,7 +3449,7 @@ public void seekToEnd(Collection partitions) { @Override public void seekRelative(String topic, int partition, long offset, boolean toCurrent) { if (toCurrent) { - this.seeks.add(new TopicPartitionOffset(topic, partition, offset, toCurrent)); + this.seeks.add(new TopicPartitionOffset(topic, partition, offset, true)); } else if (offset >= 0) { this.seeks.add(new TopicPartitionOffset(topic, partition, offset, SeekPosition.BEGINNING)); @@ -3370,6 +3469,11 @@ public void seekToTimestamp(Collection topicParts, long timestam topicParts.forEach(tp -> seekToTimestamp(tp.topic(), tp.partition(), timestamp)); } + @Override + public String getGroupId() { + return this.consumerGroupId; + } + @Override public String toString() { return "KafkaMessageListenerContainer.ListenerConsumer [" @@ -3454,10 +3558,10 @@ public void acknowledge() { if (!this.acked) { Map> offs = ListenerConsumer.this.offsetsInThisBatch; Map>> deferred = ListenerConsumer.this.deferredOffsets; - for (ConsumerRecord cRecord : getHighestOffsetRecords(this.records)) { + for (TopicPartition topicPartition : this.records.partitions()) { if (offs != null) { - offs.remove(new TopicPartition(cRecord.topic(), cRecord.partition())); - deferred.remove(new TopicPartition(cRecord.topic(), cRecord.partition())); + offs.remove(topicPartition); + deferred.remove(topicPartition); } } processAcks(this.records); @@ -3641,8 +3745,7 @@ public void onPartitionsAssigned(Collection partitions) { private void repauseIfNeeded(Collection partitions) { boolean pending = false; synchronized (ListenerConsumer.this) { - Map> pendingOffsets = ListenerConsumer.this.offsetsInThisBatch; - if (!ObjectUtils.isEmpty(pendingOffsets)) { + if (!ObjectUtils.isEmpty(ListenerConsumer.this.offsetsInThisBatch)) { pending = true; } } @@ -3720,7 +3823,7 @@ protected void doInTransactionWithoutResult(TransactionStatus status) { .getProducerFactory()); if (holder != null) { doSendOffsets(holder.getProducer(), - Collections.singletonMap(partition, offsetAndMetadata)); + Collections.singletonMap(partition, offsetAndMetadata)); } } @@ -3767,6 +3870,13 @@ public void seek(String topic, int partition, long offset) { ListenerConsumer.this.consumer.seek(new TopicPartition(topic, partition), offset); } + @Override + public void seek(String topic, int partition, Function offsetComputeFunction) { + ListenerConsumer.this.consumer.seek(new TopicPartition(topic, partition), + offsetComputeFunction.apply( + ListenerConsumer.this.consumer.position(new TopicPartition(topic, partition)))); + } + @Override public void seekToBeginning(String topic, int partition) { ListenerConsumer.this.consumer.seekToBeginning( @@ -3874,20 +3984,13 @@ private Long computeBackwardWhereTo(long offset, boolean toCurrent, TopicPartiti } - private static final class OffsetMetadata { - - final Long offset; // NOSONAR - - final boolean relativeToCurrent; // NOSONAR - - final SeekPosition seekPosition; // NOSONAR - - OffsetMetadata(Long offset, boolean relativeToCurrent, SeekPosition seekPosition) { - this.offset = offset; - this.relativeToCurrent = relativeToCurrent; - this.seekPosition = seekPosition; - } - + /** + * Offset metadata record. + * @param offset current offset. + * @param relativeToCurrent relative to current. + * @param seekPosition seek position strategy. + */ + private record OffsetMetadata(Long offset, boolean relativeToCurrent, SeekPosition seekPosition) { } private class StopCallback implements BiConsumer { @@ -3923,4 +4026,6 @@ private static class StopAfterFenceException extends KafkaException { } + private record FailedRecordTuple(ConsumerRecord record, RuntimeException ex) { } + } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/ListenerContainerRegistry.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/ListenerContainerRegistry.java index 968d3a8705..176df09d34 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/ListenerContainerRegistry.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/ListenerContainerRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ import java.util.Collection; import java.util.Set; +import java.util.function.BiPredicate; +import java.util.function.Predicate; import org.springframework.lang.Nullable; @@ -25,6 +27,7 @@ * A registry for listener containers. * * @author Gary Russell + * @author Joo Hyuk Kim * @since 2.7 * */ @@ -41,6 +44,30 @@ public interface ListenerContainerRegistry { @Nullable MessageListenerContainer getListenerContainer(String id); + /** + * Return all {@link MessageListenerContainer} instances with id matching the predicate or + * empty {@link Collection} if no such container exists. + * @param idMatcher the predicate to match the container id with + * @return the containers or empty {@link Collection} if no container with that id exists + * @since 3.2 + * @see #getListenerContainerIds() + * @see #getListenerContainer(String) + */ + Collection getListenerContainersMatching(Predicate idMatcher); + + /** + * Return all {@link MessageListenerContainer} instances that satisfy the given bi-predicate. + * The {@code BiPredicate} takes the container id and the container itself as arguments. + * This allows for more sophisticated filtering, including properties or state of the container itself. + * @param idAndContainerMatcher the bi-predicate to match the container id and the container + * @return the containers that match the bi-predicate criteria or an empty {@link Collection} if no matching containers exist + * @since 3.2 + * @see #getListenerContainerIds() + * @see #getListenerContainersMatching(Predicate) + */ + Collection getListenerContainersMatching( + BiPredicate idAndContainerMatcher); + /** * Return the {@link MessageListenerContainer} with the specified id or {@code null} * if no such container exists. Returns containers that are not registered with the diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/MessageListenerContainer.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/MessageListenerContainer.java index def96b35fe..c8e9870fe6 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/MessageListenerContainer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/MessageListenerContainer.java @@ -39,6 +39,7 @@ * @author Tomaz Fernandes * @author Francois Rosiere * @author Soby Chacko + * @author Lokesh Alamuri */ public interface MessageListenerContainer extends SmartLifecycle, DisposableBean { @@ -281,6 +282,14 @@ default MessageListenerContainer getContainerFor(String topic, int partition) { default void childStopped(MessageListenerContainer child, ConsumerStoppedEvent.Reason reason) { } + /** + * Notify a parent container that a child container has started. + * @param child the container. + * @since 3.3 + */ + default void childStarted(MessageListenerContainer child) { + } + @Override default void destroy() { stop(); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/RecordInRetryException.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/RecordInRetryException.java new file mode 100644 index 0000000000..0673b0f006 --- /dev/null +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/RecordInRetryException.java @@ -0,0 +1,46 @@ +/* + * Copyright 2024-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.listener; + +import javax.annotation.Nullable; + +import org.springframework.core.NestedRuntimeException; + +/** + * Internal {@link NestedRuntimeException} that is used as an exception thrown + * when the record is in retry and not yet recovered during error handling. + * This is to prevent the record from being prematurely committed in the middle of a retry. + * + * Intended only for framework use and thus the package-protected access. + * + * @author Soby Chacko + * @since 3.3.0 + */ +@SuppressWarnings("serial") +class RecordInRetryException extends NestedRuntimeException { + + /** + * Package protected constructor to create an instance with the provided properties. + * + * @param message logging message + * @param cause {@link Throwable} + */ + RecordInRetryException(String message, @Nullable Throwable cause) { + super(message, cause); + } + +} diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/SeekUtils.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/SeekUtils.java index 4a4aa10419..332294357b 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/SeekUtils.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/SeekUtils.java @@ -32,7 +32,6 @@ import org.springframework.core.NestedRuntimeException; import org.springframework.core.log.LogAccessor; -import org.springframework.kafka.KafkaException; import org.springframework.kafka.KafkaException.Level; import org.springframework.kafka.listener.ContainerProperties.AckMode; import org.springframework.kafka.support.KafkaUtils; @@ -46,6 +45,7 @@ * @author Gary Russell * @author Francois Rosiere * @author Wang Zhiyang + * @author Soby Chacko * @since 2.2 * */ @@ -224,7 +224,7 @@ public static void seekOrRecover(Exception thrownException, @Nullable List resultType) { return CompletableFuture.class.isAssignableFrom(resultType); } + /** + * Return the true when type is {@code Continuation}. + * @param parameterType {@code MethodParameter} parameter type. + * @return type is {@code Continuation}. + * @since 3.2.1 + */ + public static boolean isKotlinContinuation(Class parameterType) { + return "kotlin.coroutines.Continuation".equals(parameterType.getName()); + } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapter.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapter.java index b7241c0fd3..1cec947983 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,6 @@ import org.springframework.messaging.support.MessageBuilder; import org.springframework.util.Assert; - /** * A {@link org.springframework.kafka.listener.MessageListener MessageListener} * adapter that invokes a configurable {@link HandlerAdapter}; used when the factory is @@ -55,6 +54,7 @@ * @author Artem Bilan * @author Venil Noronha * @author Wang ZhiYang + * @author Sanghyeok An * @since 1.1 */ public class BatchMessagingMessageListenerAdapter extends MessagingMessageListenerAdapter diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/ContinuationHandlerMethodArgumentResolver.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/ContinuationHandlerMethodArgumentResolver.java index f475f72a58..d17fc19080 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/ContinuationHandlerMethodArgumentResolver.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/ContinuationHandlerMethodArgumentResolver.java @@ -16,12 +16,12 @@ package org.springframework.kafka.listener.adapter; +import reactor.core.publisher.Mono; + import org.springframework.core.MethodParameter; import org.springframework.messaging.Message; import org.springframework.messaging.handler.invocation.HandlerMethodArgumentResolver; -import reactor.core.publisher.Mono; - /** * No-op resolver for method arguments of type {@link kotlin.coroutines.Continuation}. *

@@ -30,6 +30,7 @@ * but for regular {@link HandlerMethodArgumentResolver} contract. * * @author Wang Zhiyang + * @author Huijin Hong * * @since 3.2 * @@ -39,7 +40,7 @@ public class ContinuationHandlerMethodArgumentResolver implements HandlerMethodA @Override public boolean supportsParameter(MethodParameter parameter) { - return "kotlin.coroutines.Continuation".equals(parameter.getParameterType().getName()); + return AdapterUtils.isKotlinContinuation(parameter.getParameterType()); } @Override diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/DelegatingInvocableHandler.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/DelegatingInvocableHandler.java index 9f94c33d54..3770dc6eed 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/DelegatingInvocableHandler.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/DelegatingInvocableHandler.java @@ -47,7 +47,6 @@ import org.springframework.util.Assert; import org.springframework.validation.Validator; - /** * Delegates to an {@link InvocableHandlerMethod} based on the message payload type. * Matches a single, non-annotated parameter or one that is annotated with diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/FilteringBatchMessageListenerAdapter.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/FilteringBatchMessageListenerAdapter.java index a7eb2c03f7..e3f0970851 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/FilteringBatchMessageListenerAdapter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/FilteringBatchMessageListenerAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2021 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -36,6 +36,7 @@ * @param the value type. * * @author Gary Russell + * @author Sanghyeok An * */ public class FilteringBatchMessageListenerAdapter @@ -44,6 +45,8 @@ public class FilteringBatchMessageListenerAdapter private final boolean ackDiscarded; + private final boolean consumerAware; + /** * Create an instance with the supplied strategy and delegate listener. * @param delegate the delegate. @@ -51,9 +54,7 @@ public class FilteringBatchMessageListenerAdapter */ public FilteringBatchMessageListenerAdapter(BatchMessageListener delegate, RecordFilterStrategy recordFilterStrategy) { - - super(delegate, recordFilterStrategy); - this.ackDiscarded = false; + this(delegate, recordFilterStrategy, false); } /** @@ -71,22 +72,25 @@ public FilteringBatchMessageListenerAdapter(BatchMessageListener delegate, super(delegate, recordFilterStrategy); this.ackDiscarded = ackDiscarded; + this.consumerAware = this.delegateType.equals(ListenerType.ACKNOWLEDGING_CONSUMER_AWARE) || + this.delegateType.equals(ListenerType.CONSUMER_AWARE); } @Override public void onMessage(List> records, @Nullable Acknowledgment acknowledgment, Consumer consumer) { - List> consumerRecords = getRecordFilterStrategy().filterBatch(records); + final RecordFilterStrategy recordFilterStrategy = getRecordFilterStrategy(); + final List> consumerRecords = recordFilterStrategy.filterBatch(records); Assert.state(consumerRecords != null, "filter returned null from filterBatch"); - boolean consumerAware = this.delegateType.equals(ListenerType.ACKNOWLEDGING_CONSUMER_AWARE) - || this.delegateType.equals(ListenerType.CONSUMER_AWARE); - /* - * An empty list goes to the listener if ackDiscarded is false and the listener can ack - * either through the acknowledgment - */ - if (consumerRecords.size() > 0 || consumerAware - || (!this.ackDiscarded && this.delegateType.equals(ListenerType.ACKNOWLEDGING))) { + + if (recordFilterStrategy.ignoreEmptyBatch() && + consumerRecords.isEmpty() && + acknowledgment != null) { + acknowledgment.acknowledge(); + } + else if (!consumerRecords.isEmpty() || this.consumerAware + || (!this.ackDiscarded && this.delegateType.equals(ListenerType.ACKNOWLEDGING))) { invokeDelegate(consumerRecords, acknowledgment, consumer); } else { @@ -98,6 +102,7 @@ public void onMessage(List> records, @Nullable Acknowledgme private void invokeDelegate(List> consumerRecords, Acknowledgment acknowledgment, Consumer consumer) { + switch (this.delegateType) { case ACKNOWLEDGING_CONSUMER_AWARE: this.delegate.onMessage(consumerRecords, acknowledgment, consumer); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/HandlerAdapter.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/HandlerAdapter.java index 82caa738fd..31f7743bb8 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/HandlerAdapter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/HandlerAdapter.java @@ -65,6 +65,7 @@ public boolean isAsyncReplies() { return this.asyncReplies; } + @Nullable public Object invoke(Message message, Object... providedArgs) throws Exception { //NOSONAR if (this.invokerHandlerMethod != null) { return this.invokerHandlerMethod.invoke(message, providedArgs); // NOSONAR diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapter.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapter.java index 76ae8f1453..59440ccda3 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapter.java @@ -28,16 +28,20 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.function.BiConsumer; import java.util.stream.Collectors; +import io.micrometer.observation.Observation; +import io.micrometer.observation.ObservationRegistry; import org.apache.commons.logging.LogFactory; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.TopicPartition; +import reactor.core.publisher.Mono; import org.springframework.context.expression.MapAccessor; -import org.springframework.core.KotlinDetector; import org.springframework.core.MethodParameter; import org.springframework.core.log.LogAccessor; import org.springframework.expression.BeanResolver; @@ -73,8 +77,7 @@ import org.springframework.util.ClassUtils; import org.springframework.util.ObjectUtils; import org.springframework.util.StringUtils; - -import reactor.core.publisher.Mono; +import org.springframework.util.TypeUtils; /** * An abstract {@link org.springframework.kafka.listener.MessageListener} adapter @@ -90,6 +93,9 @@ * @author Venil Noronha * @author Nathan Xu * @author Wang ZhiYang + * @author Huijin Hong + * @author Soby Chacko + * @author Sanghyeok An */ public abstract class MessagingMessageListenerAdapter implements ConsumerSeekAware, AsyncRepliesAware { @@ -151,6 +157,10 @@ public abstract class MessagingMessageListenerAdapter implements ConsumerS private String correlationHeaderName = KafkaHeaders.CORRELATION_ID; + private ObservationRegistry observationRegistry = ObservationRegistry.NOOP; + + private BiConsumer, RuntimeException> asyncRetryCallback; + /** * Create an instance with the provided bean and method. * @param bean the bean. @@ -245,6 +255,15 @@ public void setHandlerMethod(HandlerAdapter handlerMethod) { this.handlerMethod = handlerMethod; } + /** + * Set the {@link ObservationRegistry} to handle observability. + * @param observationRegistry {@link ObservationRegistry} instance. + * @since 3.3.0 + */ + public void setObservationRegistry(ObservationRegistry observationRegistry) { + this.observationRegistry = observationRegistry; + } + public boolean isAsyncReplies() { return this.handlerMethod != null && this.handlerMethod.isAsyncReplies(); } @@ -302,6 +321,20 @@ public void setBeanResolver(BeanResolver beanResolver) { this.evaluationContext.addPropertyAccessor(new MapAccessor()); } + /** + * Set the retry callback for failures of both {@link CompletableFuture} and {@link Mono}. + * {@link MessagingMessageListenerAdapter#asyncFailure(Object, Acknowledgment, Consumer, Throwable, Message)} + * will invoke {@link MessagingMessageListenerAdapter#asyncRetryCallback} when + * {@link CompletableFuture} or {@link Mono} fails to complete. + * @param asyncRetryCallback the callback for async retry. + * @since 3.3 + */ + public void setCallbackForAsyncFailure( + @Nullable BiConsumer, RuntimeException> asyncRetryCallback) { + + this.asyncRetryCallback = asyncRetryCallback; + } + protected boolean isMessageList() { return this.isMessageList; } @@ -374,21 +407,41 @@ public void onIdleContainer(Map assignments, ConsumerSeekC protected Message toMessagingMessage(ConsumerRecord cRecord, @Nullable Acknowledgment acknowledgment, Consumer consumer) { + return getMessageConverter().toMessage(cRecord, acknowledgment, consumer, getType()); } protected void invoke(Object records, @Nullable Acknowledgment acknowledgment, Consumer consumer, final Message message) { + Throwable listenerError = null; + Object result = null; + Observation currentObservation = getCurrentObservation(); try { - Object result = invokeHandler(records, acknowledgment, message, consumer); + result = invokeHandler(records, acknowledgment, message, consumer); if (result != null) { handleResult(result, records, acknowledgment, consumer, message); } } - catch (ListenerExecutionFailedException e) { // NOSONAR ex flow control + catch (ListenerExecutionFailedException e) { + listenerError = e; + currentObservation.error(e); handleException(records, acknowledgment, consumer, message, e); } + catch (Error e) { + listenerError = e; + currentObservation.error(e); + } + finally { + if (listenerError != null || result == null) { + currentObservation.stop(); + } + } + } + + private Observation getCurrentObservation() { + Observation currentObservation = this.observationRegistry.getCurrentObservation(); + return currentObservation == null ? Observation.NOOP : currentObservation; } /** @@ -400,6 +453,7 @@ protected void invoke(Object records, @Nullable Acknowledgment acknowledgment, C * @param consumer the consumer. * @return the result of invocation. */ + @Nullable protected final Object invokeHandler(Object data, @Nullable Acknowledgment acknowledgment, Message message, Consumer consumer) { @@ -420,7 +474,7 @@ else if (this.hasMetadataParameter) { return this.handlerMethod.invoke(message, data, ack, consumer); } } - catch (org.springframework.messaging.converter.MessageConversionException ex) { + catch (MessageConversionException ex) { throw checkAckArg(ack, message, new MessageConversionException("Cannot handle message", ex)); } catch (MethodArgumentNotValidException ex) { @@ -440,8 +494,7 @@ private RuntimeException checkAckArg(@Nullable Acknowledgment acknowledgment, Me if (this.hasAckParameter && acknowledgment == null) { return new ListenerExecutionFailedException("invokeHandler Failed", new IllegalStateException("No Acknowledgment available as an argument, " - + "the listener container must have a MANUAL AckMode to populate the Acknowledgment.", - ex)); + + "the listener container must have a MANUAL AckMode to populate the Acknowledgment.")); } return new ListenerExecutionFailedException(createMessagingErrorMessage("Listener method could not " + "be invoked with the incoming message", message.getPayload()), ex); @@ -459,7 +512,7 @@ private RuntimeException checkAckArg(@Nullable Acknowledgment acknowledgment, Me */ protected void handleResult(Object resultArg, Object request, @Nullable Acknowledgment acknowledgment, Consumer consumer, @Nullable Message source) { - + final Observation observation = getCurrentObservation(); this.logger.debug(() -> "Listener method returned result [" + resultArg + "] - generating response message for it"); String replyTopic = evaluateReplyTopic(request, source, resultArg); @@ -473,35 +526,42 @@ protected void handleResult(Object resultArg, Object request, @Nullable Acknowle invocationResult.messageReturnType() : this.messageReturnType; - if (result instanceof CompletableFuture completable) { + CompletableFuture completableFutureResult; + + if (monoPresent && result instanceof Mono mono) { + if (acknowledgment == null || !acknowledgment.isOutOfOrderCommit()) { + this.logger.warn("Container 'Acknowledgment' must be async ack for Mono return type " + + "(or Kotlin suspend function); otherwise the container will ack the message immediately"); + } + completableFutureResult = mono.toFuture(); + } + else if (!(result instanceof CompletableFuture)) { + completableFutureResult = CompletableFuture.completedFuture(result); + } + else { + completableFutureResult = (CompletableFuture) result; if (acknowledgment == null || !acknowledgment.isOutOfOrderCommit()) { this.logger.warn("Container 'Acknowledgment' must be async ack for Future return type; " + "otherwise the container will ack the message immediately"); } - completable.whenComplete((r, t) -> { + } + + completableFutureResult.whenComplete((r, t) -> { + try { if (t == null) { asyncSuccess(r, replyTopic, source, messageReturnType); acknowledge(acknowledgment); } else { - asyncFailure(request, acknowledgment, consumer, t, source); + Throwable cause = t instanceof CompletionException ? t.getCause() : t; + observation.error(cause); + asyncFailure(request, acknowledgment, consumer, cause, source); } - }); - } - else if (monoPresent && result instanceof Mono mono) { - if (acknowledgment == null || !acknowledgment.isOutOfOrderCommit()) { - this.logger.warn("Container 'Acknowledgment' must be async ack for Mono return type " + - "(or Kotlin suspend function); otherwise the container will ack the message immediately"); } - mono.subscribe( - r -> asyncSuccess(r, replyTopic, source, messageReturnType), - t -> asyncFailure(request, acknowledgment, consumer, t, source), - () -> acknowledge(acknowledgment) - ); - } - else { - sendResponse(result, replyTopic, source, messageReturnType); - } + finally { + observation.stop(); + } + }); } @Nullable @@ -638,6 +698,7 @@ private void sendReplyForMessageSource(Object result, String topic, Message s builder.setHeader(this.correlationHeaderName, correlationId); } setPartition(builder, source); + setKey(builder, source); this.replyTemplate.send(builder.build()); } @@ -664,16 +725,27 @@ protected void asyncFailure(Object request, @Nullable Acknowledgment acknowledgm Throwable t, Message source) { try { + Throwable cause = t instanceof CompletionException ? t.getCause() : t; handleException(request, acknowledgment, consumer, source, - new ListenerExecutionFailedException(createMessagingErrorMessage( - "Async Fail", source.getPayload()), t)); + new ListenerExecutionFailedException(createMessagingErrorMessage( + "Async Fail", source.getPayload()), cause)); } catch (Throwable ex) { this.logger.error(t, () -> "Future, Mono, or suspend function was completed with an exception for " + source); acknowledge(acknowledgment); + if (canAsyncRetry(request, ex) && this.asyncRetryCallback != null) { + @SuppressWarnings("unchecked") + ConsumerRecord record = (ConsumerRecord) request; + this.asyncRetryCallback.accept(record, (RuntimeException) ex); + } } } + private static boolean canAsyncRetry(Object request, Throwable exception) { + // The async retry with @RetryableTopic is only supported for SingleRecord Listener. + return request instanceof ConsumerRecord && exception instanceof RuntimeException; + } + protected void handleException(Object records, @Nullable Acknowledgment acknowledgment, Consumer consumer, Message message, ListenerExecutionFailedException e) { @@ -719,6 +791,14 @@ private void setPartition(MessageBuilder builder, Message source) { } } + private void setKey(MessageBuilder builder, Message source) { + Object key = source.getHeaders().get(KafkaHeaders.RECEIVED_KEY); + // Set the reply record key only for non-batch requests + if (key != null && !(key instanceof List)) { + builder.setHeader(KafkaHeaders.KEY, key); + } + } + @Nullable private byte[] getReplyPartition(Message source) { return source.getHeaders().get(KafkaHeaders.REPLY_PARTITION, byte[].class); @@ -737,7 +817,8 @@ protected final String createMessagingErrorMessage(String description, Object pa * @param method the method. * @return the type. */ - protected Type determineInferredType(Method method) { // NOSONAR complexity + @Nullable + protected Type determineInferredType(@Nullable Method method) { // NOSONAR complexity if (method == null) { return null; } @@ -763,8 +844,8 @@ protected Type determineInferredType(Method method) { // NOSONAR complexity isNotConvertible |= isAck; boolean isConsumer = parameterIsType(parameterType, Consumer.class); isNotConvertible |= isConsumer; - boolean isCoroutines = KotlinDetector.isKotlinType(methodParameter.getParameterType()); - isNotConvertible |= isCoroutines; + boolean isKotlinContinuation = AdapterUtils.isKotlinContinuation(methodParameter.getParameterType()); + isNotConvertible |= isKotlinContinuation; boolean isMeta = parameterIsType(parameterType, ConsumerRecordMetadata.class); this.hasMetadataParameter |= isMeta; isNotConvertible |= isMeta; @@ -783,7 +864,7 @@ protected Type determineInferredType(Method method) { // NOSONAR complexity break; } } - else if (isAck || isCoroutines || isConsumer || annotationHeaderIsGroupId(methodParameter)) { + else if (isAck || isKotlinContinuation || isConsumer || annotationHeaderIsGroupId(methodParameter)) { allowedBatchParameters++; } } @@ -810,53 +891,44 @@ else if (isAck || isCoroutines || isConsumer || annotationHeaderIsGroupId(method private Type extractGenericParameterTypFromMethodParameter(MethodParameter methodParameter) { Type genericParameterType = methodParameter.getGenericParameterType(); if (genericParameterType instanceof ParameterizedType parameterizedType) { - if (parameterizedType.getRawType().equals(Message.class)) { + Type rawType = parameterizedType.getRawType(); + if (rawType.equals(Message.class)) { genericParameterType = parameterizedType.getActualTypeArguments()[0]; } - else if (parameterizedType.getRawType().equals(List.class) - && parameterizedType.getActualTypeArguments().length == 1) { - - Type paramType = getTypeFromWildCardWithUpperBound(parameterizedType.getActualTypeArguments()[0]); - this.isConsumerRecordList = parameterIsType(paramType, ConsumerRecord.class); - boolean messageWithGeneric = rawByParameterIsType(paramType, Message.class); - this.isMessageList = Message.class.equals(paramType) || messageWithGeneric; - if (messageWithGeneric) { + else if (rawType.equals(List.class) && parameterizedType.getActualTypeArguments().length == 1) { + Type paramType = parameterizedType.getActualTypeArguments()[0]; + boolean messageHasGeneric = paramType instanceof ParameterizedType pType + && pType.getRawType().equals(Message.class); + this.isMessageList = TypeUtils.isAssignable(paramType, Message.class) || messageHasGeneric; + this.isConsumerRecordList = TypeUtils.isAssignable(paramType, ConsumerRecord.class); + if (messageHasGeneric) { genericParameterType = ((ParameterizedType) paramType).getActualTypeArguments()[0]; } } else { - this.isConsumerRecords = parameterizedType.getRawType().equals(ConsumerRecords.class); + this.isConsumerRecords = rawType.equals(ConsumerRecords.class); } } return genericParameterType; } - private boolean annotationHeaderIsGroupId(MethodParameter methodParameter) { + private static boolean annotationHeaderIsGroupId(MethodParameter methodParameter) { Header header = methodParameter.getParameterAnnotation(Header.class); return header != null && KafkaHeaders.GROUP_ID.equals(header.value()); } - private Type getTypeFromWildCardWithUpperBound(Type paramType) { - if (paramType instanceof WildcardType wcType - && wcType.getUpperBounds() != null - && wcType.getUpperBounds().length > 0) { - paramType = wcType.getUpperBounds()[0]; - } - return paramType; - } - - private boolean isMessageWithNoTypeInfo(Type parameterType) { + private static boolean isMessageWithNoTypeInfo(Type parameterType) { if (parameterType instanceof ParameterizedType pType && pType.getRawType().equals(Message.class)) { return pType.getActualTypeArguments()[0] instanceof WildcardType; } return Message.class.equals(parameterType); // could be Message without a generic type } - private boolean parameterIsType(Type parameterType, Type type) { + private static boolean parameterIsType(Type parameterType, Type type) { return parameterType.equals(type) || rawByParameterIsType(parameterType, type); } - private boolean rawByParameterIsType(Type parameterType, Type type) { + private static boolean rawByParameterIsType(Type parameterType, Type type) { return parameterType instanceof ParameterizedType pType && pType.getRawType().equals(type); } @@ -868,6 +940,7 @@ private boolean rawByParameterIsType(Type parameterType, Type type) { * @since 2.0 */ public record ReplyExpressionRoot(Object request, Object source, Object result) { + } static class NoOpAck implements Acknowledgment { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordFilterStrategy.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordFilterStrategy.java index f2de862e64..aa06755512 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordFilterStrategy.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordFilterStrategy.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2021 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,11 +16,12 @@ package org.springframework.kafka.listener.adapter; -import java.util.Iterator; import java.util.List; import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.springframework.kafka.listener.BatchMessageListener; + /** * Implementations of this interface can signal that a record about * to be delivered to a message listener should be discarded instead @@ -30,7 +31,7 @@ * @param the value type. * * @author Gary Russell - * + * @author Sanghyeok An */ public interface RecordFilterStrategy { @@ -49,13 +50,20 @@ public interface RecordFilterStrategy { * @since 2.8 */ default List> filterBatch(List> records) { - Iterator> iterator = records.iterator(); - while (iterator.hasNext()) { - if (filter(iterator.next())) { - iterator.remove(); - } - } + records.removeIf(this::filter); return records; } + /** + * Determine whether {@link FilteringBatchMessageListenerAdapter} should invoke + * the {@link BatchMessageListener} when all {@link ConsumerRecord}s in a batch have been filtered out + * resulting in empty list. By default, do invoke the {@link BatchMessageListener} (return false). + * @return true for {@link FilteringBatchMessageListenerAdapter} to not invoke {@link BatchMessageListener} + * when all {@link ConsumerRecord} in a batch filtered out + * @since 3.3 + */ + default boolean ignoreEmptyBatch() { + return false; + } + } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordMessagingMessageListenerAdapter.java b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordMessagingMessageListenerAdapter.java index ee9b791009..6caa854e45 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordMessagingMessageListenerAdapter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/RecordMessagingMessageListenerAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2002-2023 the original author or authors. + * Copyright 2002-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,7 +28,6 @@ import org.springframework.lang.Nullable; import org.springframework.messaging.Message; - /** * A {@link org.springframework.kafka.listener.MessageListener MessageListener} * adapter that invokes a configurable {@link HandlerAdapter}; used when the factory is diff --git a/spring-kafka/src/main/java/org/springframework/kafka/requestreply/AggregatingReplyingKafkaTemplate.java b/spring-kafka/src/main/java/org/springframework/kafka/requestreply/AggregatingReplyingKafkaTemplate.java index ba63cabc37..69d6a39301 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/requestreply/AggregatingReplyingKafkaTemplate.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/requestreply/AggregatingReplyingKafkaTemplate.java @@ -50,6 +50,7 @@ * @param the reply data type. * * @author Gary Russell + * @author Sanghyeok An * @since 2.3 * */ @@ -162,7 +163,7 @@ public void onMessage(List>>> } } }); - if (completed.size() > 0) { + if (!completed.isEmpty()) { super.onMessage(completed); } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplate.java b/spring-kafka/src/main/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplate.java index e6bf35aee7..3ae1ef4212 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplate.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplate.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -68,6 +68,7 @@ * * @author Gary Russell * @author Artem Bilan + * @author Borahm Lee * * @since 2.1.3 * @@ -408,7 +409,7 @@ public RequestReplyFuture sendAndReceive(ProducerRecord record) { @Override public RequestReplyFuture sendAndReceive(ProducerRecord record, @Nullable Duration replyTimeout) { - Assert.state(this.running, "Template has not been start()ed"); // NOSONAR (sync) + Assert.state(this.running, "Template has not been started"); // NOSONAR (sync) Duration timeout = replyTimeout; if (timeout == null) { timeout = this.defaultReplyTimeout; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/BackOffValuesGenerator.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/BackOffValuesGenerator.java index 9e451a49a7..cfa5afa4dc 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/BackOffValuesGenerator.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/BackOffValuesGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,7 +28,6 @@ import org.springframework.retry.backoff.NoBackOffPolicy; import org.springframework.retry.backoff.Sleeper; import org.springframework.retry.backoff.SleepingBackOffPolicy; -import org.springframework.retry.backoff.UniformRandomBackOffPolicy; import org.springframework.retry.support.RetrySynchronizationManager; /** @@ -38,6 +37,7 @@ * * @author Tomaz Fernandes * @author Artem Bilan + * @author Borahm Lee * * @since 2.7 * @@ -53,7 +53,7 @@ public class BackOffValuesGenerator { public BackOffValuesGenerator(int providedMaxAttempts, BackOffPolicy providedBackOffPolicy) { this.numberOfValuesToCreate = getMaxAttempts(providedMaxAttempts) - 1; BackOffPolicy policy = providedBackOffPolicy != null ? providedBackOffPolicy : DEFAULT_BACKOFF_POLICY; - checkBackOffPolicyTipe(policy); + checkBackOffPolicyType(policy); this.backOffPolicy = policy; } @@ -69,7 +69,7 @@ public List generateValues() { : generateFromSleepingBackOffPolicy(this.numberOfValuesToCreate, this.backOffPolicy); } - private void checkBackOffPolicyTipe(BackOffPolicy providedBackOffPolicy) { + private void checkBackOffPolicyType(BackOffPolicy providedBackOffPolicy) { if (!(SleepingBackOffPolicy.class.isAssignableFrom(providedBackOffPolicy.getClass()) || NoBackOffPolicy.class.isAssignableFrom(providedBackOffPolicy.getClass()))) { throw new IllegalArgumentException("Either a SleepingBackOffPolicy or a NoBackOffPolicy must be provided. " + @@ -81,12 +81,6 @@ private List generateFromSleepingBackOffPolicy(int maxAttempts, BackOffPol BackoffRetainerSleeper sleeper = new BackoffRetainerSleeper(); SleepingBackOffPolicy retainingBackOffPolicy = ((SleepingBackOffPolicy) providedBackOffPolicy).withSleeper(sleeper); - - // UniformRandomBackOffPolicy loses the max value when a sleeper is set. - if (providedBackOffPolicy instanceof UniformRandomBackOffPolicy) { - ((UniformRandomBackOffPolicy) retainingBackOffPolicy) - .setMaxBackOffPeriod(((UniformRandomBackOffPolicy) providedBackOffPolicy).getMaxBackOffPeriod()); - } BackOffContext backOffContext = retainingBackOffPolicy.start(RetrySynchronizationManager.getContext()); IntStream.range(0, maxAttempts) .forEach(index -> retainingBackOffPolicy.backOff(backOffContext)); diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactory.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactory.java index f87cb0349b..fe7791adf7 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactory.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,6 +54,7 @@ * * @author Tomaz Fernandes * @author Gary Russell + * @author Soby Chacko * @since 2.7 * */ @@ -237,6 +238,35 @@ public DeadLetterPublishingRecoverer create(String mainListenerId) { return recoverer; } + /** + * Log the exception before sending the record in error to the retry topic. + * This method can be overridden by downstream applications to customize how the error is logged. + * @param exception the exception that caused the error + * @param consumerRecord the original consumer record + * @param nextDestination the next topic where the record goes + * @since 3.3.0 + */ + protected void maybeLogListenerException(Exception exception, ConsumerRecord consumerRecord, DestinationTopic nextDestination) { + if (nextDestination.isDltTopic() + && !ListenerExceptionLoggingStrategy.NEVER.equals(this.loggingStrategy)) { + LOGGER.error(exception, () -> getErrorMessage(consumerRecord) + " and won't be retried. " + + "Sending to DLT with name " + nextDestination.getDestinationName() + "."); + } + else if (nextDestination.isNoOpsTopic() + && !ListenerExceptionLoggingStrategy.NEVER.equals(this.loggingStrategy)) { + LOGGER.error(exception, () -> getErrorMessage(consumerRecord) + " and won't be retried. " + + "No further action will be taken with this record."); + } + else if (ListenerExceptionLoggingStrategy.EACH_ATTEMPT.equals(this.loggingStrategy)) { + LOGGER.error(exception, () -> getErrorMessage(consumerRecord) + ". " + + "Sending to retry topic " + nextDestination.getDestinationName() + "."); + } + else { + LOGGER.debug(exception, () -> getErrorMessage(consumerRecord) + ". " + + "Sending to retry topic " + nextDestination.getDestinationName() + "."); + } + } + private DeadLetterPublishingRecoverer create( Function, KafkaOperations> templateResolver, BiFunction, Exception, TopicPartition> destinationResolver) { @@ -271,27 +301,6 @@ private DeadLetterPublishingRecoverer create( }; } - private void maybeLogListenerException(Exception e, ConsumerRecord cr, DestinationTopic nextDestination) { - if (nextDestination.isDltTopic() - && !ListenerExceptionLoggingStrategy.NEVER.equals(this.loggingStrategy)) { - LOGGER.error(e, () -> getErrorMessage(cr) + " and won't be retried. " - + "Sending to DLT with name " + nextDestination.getDestinationName() + "."); - } - else if (nextDestination.isNoOpsTopic() - && !ListenerExceptionLoggingStrategy.NEVER.equals(this.loggingStrategy)) { - LOGGER.error(e, () -> getErrorMessage(cr) + " and won't be retried. " - + "No further action will be taken with this record."); - } - else if (ListenerExceptionLoggingStrategy.EACH_ATTEMPT.equals(this.loggingStrategy)) { - LOGGER.error(e, () -> getErrorMessage(cr) + ". " - + "Sending to retry topic " + nextDestination.getDestinationName() + "."); - } - else { - LOGGER.debug(e, () -> getErrorMessage(cr) + ". " - + "Sending to retry topic " + nextDestination.getDestinationName() + "."); - } - } - private static String getErrorMessage(ConsumerRecord cr) { return "Record: " + getRecordInfo(cr) + " threw an error at topic " + cr.topic(); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolver.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolver.java index 1b4d893191..e2260613ba 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolver.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolver.java @@ -24,6 +24,8 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -39,7 +41,6 @@ import org.springframework.lang.Nullable; import org.springframework.util.Assert; - /** * * Default implementation of the {@link DestinationTopicResolver} interface. @@ -50,6 +51,7 @@ * @author Gary Russell * @author Yvette Quinby * @author Adrian Chlebosz + * @author Omer Celik * @since 2.7 * */ @@ -63,6 +65,8 @@ public class DefaultDestinationTopicResolver extends ExceptionClassifier private final Map> sourceDestinationsHolderMap; + private final Lock sourceDestinationsHolderLock = new ReentrantLock(); + private final Clock clock; private ApplicationContext applicationContext; @@ -211,9 +215,13 @@ private DestinationTopicHolder getDestinationHolderFor(String mainListenerId, St } private DestinationTopicHolder getDestinationTopicSynchronized(String mainListenerId, String topic) { - synchronized (this.sourceDestinationsHolderMap) { + try { + this.sourceDestinationsHolderLock.lock(); return doGetDestinationFor(mainListenerId, topic); } + finally { + this.sourceDestinationsHolderLock.unlock(); + } } private DestinationTopicHolder doGetDestinationFor(String mainListenerId, String topic) { @@ -230,11 +238,15 @@ public void addDestinationTopics(String mainListenerId, List d + DefaultDestinationTopicResolver.class.getSimpleName() + " is already refreshed."); } validateDestinations(destinationsToAdd); - synchronized (this.sourceDestinationsHolderMap) { + try { + this.sourceDestinationsHolderLock.lock(); Map map = this.sourceDestinationsHolderMap.computeIfAbsent(mainListenerId, id -> new HashMap<>()); map.putAll(correlatePairSourceAndDestinationValues(destinationsToAdd)); } + finally { + this.sourceDestinationsHolderLock.unlock(); + } } private void validateDestinations(List destinationsToAdd) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DestinationTopic.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DestinationTopic.java index 936a1c4dbf..f8676fe825 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DestinationTopic.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DestinationTopic.java @@ -31,6 +31,7 @@ * @author Tomaz Fernandes * @author Gary Russell * @author Adrian Chlebosz + * @author Sanghyeok An * @since 2.7 * */ @@ -46,8 +47,7 @@ public DestinationTopic(String destinationName, Properties properties) { } public DestinationTopic(String destinationName, DestinationTopic sourceDestinationtopic, String suffix, Type type) { - this.destinationName = destinationName; - this.properties = new Properties(sourceDestinationtopic.properties, suffix, type); + this(destinationName, new Properties(sourceDestinationtopic.properties, suffix, type)); } public Long getDestinationDelay() { @@ -148,6 +148,7 @@ public static class Properties { @Nullable private final Boolean autoStartDltHandler; + /** * Create an instance with the provided properties with the DLT container starting * automatically (if the container factory is so configured). diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizer.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizer.java index 72351d9f95..ae6dee6fc2 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizer.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,21 +24,25 @@ * Customizes main, retry and DLT endpoints in the Retry Topic functionality * and returns the resulting topic names. * + * @param the listener endpoint type. + * * @author Tomaz Fernandes + * @author Wang Zhiyang + * * @since 2.7.2 * * @see EndpointCustomizerFactory * */ @FunctionalInterface -public interface EndpointCustomizer { +public interface EndpointCustomizer> { /** * Customize the endpoint and return the topic names generated for this endpoint. * @param listenerEndpoint The main, retry or DLT endpoint to be customized. * @return A collection containing the topic names generated for this endpoint. */ - Collection customizeEndpointAndCollectTopics(MethodKafkaListenerEndpoint listenerEndpoint); + Collection customizeEndpointAndCollectTopics(T listenerEndpoint); class TopicNamesHolder { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactory.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactory.java index 4481ef5ca7..f6bc477a4c 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactory.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2022 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,13 @@ import java.lang.reflect.Method; import java.util.Arrays; import java.util.Collection; -import java.util.stream.Collectors; import java.util.stream.Stream; import org.springframework.beans.factory.BeanFactory; import org.springframework.kafka.config.MethodKafkaListenerEndpoint; +import org.springframework.kafka.config.MultiMethodKafkaListenerEndpoint; import org.springframework.kafka.support.EndpointHandlerMethod; +import org.springframework.kafka.support.EndpointHandlerMultiMethod; import org.springframework.kafka.support.TopicPartitionOffset; /** @@ -34,6 +35,8 @@ * * @author Tomaz Fernandes * @author Gary Russell + * @author Wang Zhiyang + * * @since 2.7.2 * * @see RetryTopicConfigurer @@ -62,41 +65,88 @@ public EndpointCustomizerFactory(DestinationTopic.Properties destinationProperti this.retryTopicNamesProviderFactory = retryTopicNamesProviderFactory; } - public final EndpointCustomizer createEndpointCustomizer() { - return addSuffixesAndMethod(this.destinationProperties, this.beanMethod.resolveBean(this.beanFactory), - this.beanMethod.getMethod()); + public final EndpointCustomizer> createEndpointCustomizer() { + return addSuffixesAndMethod(this.destinationProperties); } - protected EndpointCustomizer addSuffixesAndMethod(DestinationTopic.Properties properties, Object bean, Method method) { + /** + * Create MethodKafkaListenerEndpoint's EndpointCustomizer, but not support MultiMethodKafkaListenerEndpoint. + * Replace by {@link #addSuffixesAndMethod(DestinationTopic.Properties)} + * @param properties the destination-topic's properties. + * @param bean the bean. + * @param method the method. + * @return the endpoint customizer. + */ + @Deprecated(since = "3.2", forRemoval = true) + @SuppressWarnings("rawtypes") + protected EndpointCustomizer addSuffixesAndMethod(DestinationTopic.Properties properties, Object bean, + Method method) { + + RetryTopicNamesProviderFactory.RetryTopicNamesProvider namesProvider = + this.retryTopicNamesProviderFactory.createRetryTopicNamesProvider(properties); + return endpoint -> { + Collection topics = + customizeAndRegisterTopics(namesProvider, endpoint); + configurationEndpoint(endpoint, namesProvider, properties, bean); + endpoint.setMethod(method); + return topics; + }; + } + + /** + * Create MethodKafkaListenerEndpoint's EndpointCustomizer and support MultiMethodKafkaListenerEndpoint. + * @param properties the destination-topic's properties. + * @return the endpoint customizer. + * @since 3.2 + */ + protected EndpointCustomizer> addSuffixesAndMethod( + DestinationTopic.Properties properties) { + RetryTopicNamesProviderFactory.RetryTopicNamesProvider namesProvider = this.retryTopicNamesProviderFactory.createRetryTopicNamesProvider(properties); return endpoint -> { - Collection topics = customizeAndRegisterTopics(namesProvider, endpoint); - endpoint.setId(namesProvider.getEndpointId(endpoint)); - endpoint.setGroupId(namesProvider.getGroupId(endpoint)); - if (endpoint.getTopics().isEmpty() && endpoint.getTopicPartitionsToAssign() != null) { - endpoint.setTopicPartitions(getTopicPartitions(properties, namesProvider, - endpoint.getTopicPartitionsToAssign())); + Collection topics = + customizeAndRegisterTopics(namesProvider, endpoint); + configurationEndpoint(endpoint, namesProvider, properties, this.beanMethod.resolveBean(this.beanFactory)); + if (endpoint instanceof MultiMethodKafkaListenerEndpoint multiMethodEndpoint + && this.beanMethod instanceof EndpointHandlerMultiMethod beanMultiMethod) { + multiMethodEndpoint.setDefaultMethod(beanMultiMethod.getDefaultMethod()); + multiMethodEndpoint.setMethods(beanMultiMethod.getMethods()); } else { - endpoint.setTopics(endpoint.getTopics().stream() - .map(namesProvider::getTopicName).toArray(String[]::new)); - } - endpoint.setClientIdPrefix(namesProvider.getClientIdPrefix(endpoint)); - endpoint.setGroup(namesProvider.getGroup(endpoint)); - endpoint.setBean(bean); - endpoint.setMethod(method); - Boolean autoStartDltHandler = properties.autoStartDltHandler(); - if (autoStartDltHandler != null && properties.isDltTopic()) { - endpoint.setAutoStartup(autoStartDltHandler); + endpoint.setMethod(this.beanMethod.getMethod()); } return topics; }; } + private void configurationEndpoint(MethodKafkaListenerEndpoint endpoint, + RetryTopicNamesProviderFactory.RetryTopicNamesProvider namesProvider, + DestinationTopic.Properties properties, Object bean) { + + endpoint.setId(namesProvider.getEndpointId(endpoint)); + endpoint.setGroupId(namesProvider.getGroupId(endpoint)); + if (endpoint.getTopics().isEmpty() && endpoint.getTopicPartitionsToAssign() != null) { + endpoint.setTopicPartitions(getTopicPartitions(properties, namesProvider, + endpoint.getTopicPartitionsToAssign())); + } + else { + endpoint.setTopics(endpoint.getTopics().stream() + .map(namesProvider::getTopicName).toArray(String[]::new)); + } + endpoint.setClientIdPrefix(namesProvider.getClientIdPrefix(endpoint)); + endpoint.setGroup(namesProvider.getGroup(endpoint)); + endpoint.setBean(bean); + Boolean autoStartDltHandler = properties.autoStartDltHandler(); + if (autoStartDltHandler != null && properties.isDltTopic()) { + endpoint.setAutoStartup(autoStartDltHandler); + } + } + private static TopicPartitionOffset[] getTopicPartitions(DestinationTopic.Properties properties, RetryTopicNamesProviderFactory.RetryTopicNamesProvider namesProvider, TopicPartitionOffset[] topicPartitionOffsets) { + return Stream.of(topicPartitionOffsets) .map(tpo -> properties.isMainEndpoint() ? getTPOForMainTopic(namesProvider, tpo) @@ -124,7 +174,7 @@ protected Collection customizeAndRegisterTo return getTopics(endpoint) .stream() .map(topic -> new EndpointCustomizer.TopicNamesHolder(topic, namesProvider.getTopicName(topic))) - .collect(Collectors.toList()); + .toList(); } private Collection getTopics(MethodKafkaListenerEndpoint endpoint) { @@ -135,7 +185,7 @@ private Collection getTopics(MethodKafkaListenerEndpoint endpoint) topics = Arrays.stream(topicPartitionsToAssign) .map(TopicPartitionOffset::getTopic) .distinct() - .collect(Collectors.toList()); + .toList(); } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolver.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolver.java index 8521d70915..c78cec38c0 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolver.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolver.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2022 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -205,9 +205,7 @@ static class Cache { ConcurrentKafkaListenerContainerFactory resolvedFactory) { synchronized (this.cacheMap) { Key key = cacheKey(factoryFromKafkaListenerAnnotation, config); - if (!this.cacheMap.containsKey(key)) { - this.cacheMap.put(key, resolvedFactory); - } + this.cacheMap.putIfAbsent(key, resolvedFactory); return resolvedFactory; } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfiguration.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfiguration.java index 9a1631440b..cdb5c89dc3 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfiguration.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfiguration.java @@ -91,7 +91,9 @@ public Integer getConcurrency() { static class TopicCreation { private final boolean shouldCreateTopics; + private final int numPartitions; + private final short replicationFactor; TopicCreation(@Nullable Boolean shouldCreate, @Nullable Integer numPartitions, @Nullable Short replicationFactor) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilder.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilder.java index b0c3cfd45d..d52c1c1ef9 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilder.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilder.java @@ -72,7 +72,6 @@ public class RetryTopicConfigurationBuilder { private RetryTopicConfiguration.TopicCreation topicCreationConfiguration = new RetryTopicConfiguration.TopicCreation(); - private ConcurrentKafkaListenerContainerFactory listenerContainerFactory; @Nullable @@ -401,7 +400,6 @@ public RetryTopicConfigurationBuilder customBackoff(SleepingBackOffPolicy bac return this; } - /** * Configure a {@link FixedBackOffPolicy}. * @param interval the interval. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurer.java b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurer.java index 4ad885d2ad..d045788e80 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/retrytopic/RetryTopicConfigurer.java @@ -18,6 +18,7 @@ import java.lang.reflect.Method; import java.util.Collection; +import java.util.List; import java.util.function.Consumer; import org.apache.commons.logging.LogFactory; @@ -36,12 +37,12 @@ import org.springframework.kafka.config.MultiMethodKafkaListenerEndpoint; import org.springframework.kafka.support.Acknowledgment; import org.springframework.kafka.support.EndpointHandlerMethod; +import org.springframework.kafka.support.EndpointHandlerMultiMethod; import org.springframework.kafka.support.KafkaUtils; import org.springframework.kafka.support.TopicForRetryable; import org.springframework.lang.NonNull; import org.springframework.lang.Nullable; - /** * *

Configures main, retry and DLT topics based on a main endpoint and provided @@ -115,7 +116,7 @@ * public RetryTopicConfiguration myRetryableTopic(KafkaTemplate<String, MyPojo> template) { * return RetryTopicConfigurationBuilder * .newInstance() - * .fixedBackoff(3000) + * .fixedBackOff(3000) * .maxAttempts(5) * .includeTopics("my-topic", "my-other-topic") * .create(template); @@ -150,6 +151,32 @@ * // ... message processing * } * + *

Since 3.2 , {@link org.springframework.kafka.annotation.RetryableTopic} annotation support + * {@link org.springframework.kafka.annotation.KafkaListener} annotated class, such as: + *

+ *     @RetryableTopic(attempts = 3,
+ *     		backoff = @Backoff(delay = 700, maxDelay = 12000, multiplier = 3))
+ *     @KafkaListener(topics = "my-annotated-topic")
+ *     static class ListenerBean {
+ *          @KafkaHandler
+ *         public void processMessage(MyPojo message) {
+ *        		// ... message processing
+ *         }
+ *     }
+ *
+ *

Since 3.2, {@link org.springframework.kafka.annotation.RetryableTopic} annotation supports + * {@link org.springframework.kafka.annotation.KafkaListener} annotated class, such as: + *

+ *     @RetryableTopic(attempts = 3,
+ *     		backoff = @Backoff(delay = 700, maxDelay = 12000, multiplier = 3))
+ *     @KafkaListener(topics = "my-annotated-topic")
+ *     static class ListenerBean {
+ *          @KafkaHandler
+ *         public void processMessage(MyPojo message) {
+ *        		// ... message processing
+ *         }
+ *     }
+ *
*

Or through meta-annotations, such as: *

  *     @RetryableTopic(backoff = @Backoff(delay = 700, maxDelay = 12000, multiplier = 3))
@@ -211,6 +238,7 @@
  * @author Fabio da Silva Jr.
  * @author Gary Russell
  * @author Wang Zhiyang
+ * @author Borahm Lee
  *
  * @since 2.7
  *
@@ -281,7 +309,7 @@ public void processMainAndRetryListeners(EndpointProcessor endpointProcessor,
 											KafkaListenerEndpointRegistrar registrar,
 											@Nullable KafkaListenerContainerFactory factory,
 											String defaultContainerFactoryBeanName) {
-		throwIfMultiMethodEndpoint(mainEndpoint);
+
 		String id = mainEndpoint.getId();
 		if (id == null) {
 			id = "no.id.provided";
@@ -300,6 +328,7 @@ private void configureEndpoints(MethodKafkaListenerEndpoint mainEndpoint,
 									RetryTopicConfiguration configuration,
 									DestinationTopicProcessor.Context context,
 									String defaultContainerFactoryBeanName) {
+
 		this.destinationTopicProcessor
 				.processDestinationTopicProperties(destinationTopicProperties ->
 						processAndRegisterEndpoint(mainEndpoint,
@@ -330,7 +359,13 @@ private void processAndRegisterEndpoint(MethodKafkaListenerEndpoint mainEn
 			endpoint = mainEndpoint;
 		}
 		else {
-			endpoint = new MethodKafkaListenerEndpoint<>();
+			if (mainEndpoint instanceof MultiMethodKafkaListenerEndpoint multi) {
+				endpoint = new MultiMethodKafkaListenerEndpoint<>(multi.getMethods(), multi.getDefaultMethod(),
+						multi.getBean());
+			}
+			else {
+				endpoint = new MethodKafkaListenerEndpoint<>();
+			}
 			endpoint.setId(mainEndpoint.getId());
 			endpoint.setMainListenerId(mainEndpoint.getId());
 		}
@@ -345,12 +380,12 @@ private void processAndRegisterEndpoint(MethodKafkaListenerEndpoint mainEn
 				getEndpointHandlerMethod(mainEndpoint, configuration, destinationTopicProperties);
 
 		createEndpointCustomizer(endpointBeanMethod, destinationTopicProperties)
-						.customizeEndpointAndCollectTopics(endpoint)
-						.forEach(topicNamesHolder ->
-								this.destinationTopicProcessor
-										.registerDestinationTopic(topicNamesHolder.getMainTopic(),
-												topicNamesHolder.getCustomizedTopic(),
-												destinationTopicProperties, context));
+				.customizeEndpointAndCollectTopics(endpoint)
+				.forEach(topicNamesHolder ->
+						this.destinationTopicProcessor
+								.registerDestinationTopic(topicNamesHolder.getMainTopic(),
+										topicNamesHolder.getCustomizedTopic(),
+										destinationTopicProperties, context));
 
 		registrar.registerEndpoint(endpoint, resolvedFactory);
 		endpoint.setBeanFactory(this.beanFactory);
@@ -359,9 +394,10 @@ private void processAndRegisterEndpoint(MethodKafkaListenerEndpoint mainEn
 	protected EndpointHandlerMethod getEndpointHandlerMethod(MethodKafkaListenerEndpoint mainEndpoint,
 														RetryTopicConfiguration configuration,
 														DestinationTopic.Properties props) {
+
 		EndpointHandlerMethod dltHandlerMethod = configuration.getDltHandlerMethod();
-		EndpointHandlerMethod retryBeanMethod = new EndpointHandlerMethod(mainEndpoint.getBean(), mainEndpoint.getMethod());
-		return props.isDltTopic() ? getDltEndpointHandlerMethodOrDefault(dltHandlerMethod) : retryBeanMethod;
+		return props.isDltTopic() ? getDltEndpointHandlerMethodOrDefault(mainEndpoint, dltHandlerMethod)
+				: getRetryEndpointHandlerMethod(mainEndpoint);
 	}
 
 	private Consumer> getTopicCreationFunction(RetryTopicConfiguration config) {
@@ -383,7 +419,7 @@ protected void createNewTopicBeans(Collection topics, RetryTopicConfigur
 		);
 	}
 
-	protected EndpointCustomizer createEndpointCustomizer(
+	protected EndpointCustomizer> createEndpointCustomizer(
 			EndpointHandlerMethod endpointBeanMethod, DestinationTopic.Properties destinationTopicProperties) {
 
 		return new EndpointCustomizerFactory(destinationTopicProperties,
@@ -393,8 +429,28 @@ protected EndpointCustomizer createEndpointCustomizer(
 				.createEndpointCustomizer();
 	}
 
-	private EndpointHandlerMethod getDltEndpointHandlerMethodOrDefault(EndpointHandlerMethod dltEndpointHandlerMethod) {
-		return dltEndpointHandlerMethod != null ? dltEndpointHandlerMethod : DEFAULT_DLT_HANDLER;
+	private EndpointHandlerMethod getDltEndpointHandlerMethodOrDefault(MethodKafkaListenerEndpoint mainEndpoint,
+			@Nullable EndpointHandlerMethod dltEndpointHandlerMethod) {
+
+		EndpointHandlerMethod dltHandlerMethod = dltEndpointHandlerMethod != null
+				? dltEndpointHandlerMethod : DEFAULT_DLT_HANDLER;
+		if (mainEndpoint instanceof MultiMethodKafkaListenerEndpoint) {
+			dltHandlerMethod = new EndpointHandlerMultiMethod(dltHandlerMethod.resolveBean(this.beanFactory),
+					dltHandlerMethod.getMethod(), List.of(dltHandlerMethod.getMethod()));
+		}
+		return dltHandlerMethod;
+	}
+
+	private EndpointHandlerMethod getRetryEndpointHandlerMethod(MethodKafkaListenerEndpoint mainEndpoint) {
+		EndpointHandlerMethod retryBeanMethod;
+		if (mainEndpoint instanceof MultiMethodKafkaListenerEndpoint multi) {
+			retryBeanMethod = new EndpointHandlerMultiMethod(multi.getBean(), multi.getDefaultMethod(),
+					multi.getMethods());
+		}
+		else {
+			retryBeanMethod = new EndpointHandlerMethod(mainEndpoint.getBean(), mainEndpoint.getMethod());
+		}
+		return retryBeanMethod;
 	}
 
 	private KafkaListenerContainerFactory resolveAndConfigureFactoryForMainEndpoint(
@@ -419,12 +475,6 @@ private KafkaListenerContainerFactory resolveAndConfigureFactoryForRetryEndpo
 		return this.listenerContainerFactoryConfigurer.decorateFactory(resolvedFactory);
 	}
 
-	private void throwIfMultiMethodEndpoint(MethodKafkaListenerEndpoint mainEndpoint) {
-		if (mainEndpoint instanceof MultiMethodKafkaListenerEndpoint) {
-			throw new IllegalArgumentException("Retry Topic is not compatible with " + MultiMethodKafkaListenerEndpoint.class);
-		}
-	}
-
 	public static EndpointHandlerMethod createHandlerMethodWith(Object beanOrClass, String methodName) {
 		return new EndpointHandlerMethod(beanOrClass, methodName);
 	}
diff --git a/spring-kafka/src/main/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializer.java b/spring-kafka/src/main/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializer.java
index e0def23d1c..d907d48b98 100644
--- a/spring-kafka/src/main/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializer.java
+++ b/spring-kafka/src/main/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializer.java
@@ -1,5 +1,5 @@
 /*
- * Copyright 2017-2023 the original author or authors.
+ * Copyright 2017-2024 the original author or authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
 
 package org.springframework.kafka.security.jaas;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
@@ -37,6 +36,7 @@
  * @author Marius Bogoevici
  * @author Gary Russell
  * @author Edan Idzerda
+ * @author Soby Chacko
  *
  * @since 1.3
  */
@@ -87,8 +87,6 @@ public enum ControlFlag {
 
 	private final boolean ignoreJavaLoginConfigParamSystemProperty;
 
-	private final File placeholderJaasConfiguration;
-
 	private final Map options = new HashMap<>();
 
 	private String loginModule = "com.sun.security.auth.module.Krb5LoginModule";
@@ -99,8 +97,6 @@ public enum ControlFlag {
 	public KafkaJaasLoginModuleInitializer() throws IOException {
 		// we ignore the system property if it wasn't originally set at launch
 		this.ignoreJavaLoginConfigParamSystemProperty = (System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) == null);
-		this.placeholderJaasConfiguration = File.createTempFile("kafka-client-jaas-config-placeholder", "conf");
-		this.placeholderJaasConfiguration.deleteOnExit();
 	}
 
 	public void setLoginModule(String loginModule) {
@@ -137,14 +133,6 @@ public void afterSingletonsInstantiated() {
 					new AppConfigurationEntry[] { kafkaClientConfigurationEntry });
 			Configuration.setConfiguration(new InternalConfiguration(configurationEntries,
 					Configuration.getConfiguration()));
-			// Workaround for a 0.9 client issue where even if the Configuration is
-			// set
-			// a system property check is performed.
-			// Since the Configuration already exists, this will be ignored.
-			if (this.placeholderJaasConfiguration != null) {
-				System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM,
-						this.placeholderJaasConfiguration.getAbsolutePath());
-			}
 		}
 	}
 
diff --git a/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryService.java b/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryService.java
index 05919ff952..2c011d6c24 100644
--- a/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryService.java
+++ b/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryService.java
@@ -16,13 +16,20 @@
 
 package org.springframework.kafka.streams;
 
+import java.util.Properties;
+
+import org.apache.kafka.common.serialization.Serializer;
 import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.KeyQueryMetadata;
 import org.apache.kafka.streams.StoreQueryParameters;
+import org.apache.kafka.streams.state.HostInfo;
 import org.apache.kafka.streams.state.QueryableStoreType;
 
 import org.springframework.kafka.config.StreamsBuilderFactoryBean;
+import org.springframework.lang.Nullable;
 import org.springframework.retry.support.RetryTemplate;
 import org.springframework.util.Assert;
+import org.springframework.util.StringUtils;
 
 /**
  * Provide a wrapper API around the interactive query stores in Kafka Streams.
@@ -75,20 +82,80 @@ public void setRetryTemplate(RetryTemplate retryTemplate) {
 	 * @return queryable store.
 	 */
 	public  T retrieveQueryableStore(String storeName, QueryableStoreType storeType) {
+		populateKafkaStreams();
+		StoreQueryParameters storeQueryParams = StoreQueryParameters.fromNameAndType(storeName, storeType);
+
+		return this.retryTemplate.execute(context -> {
+			try {
+				return this.kafkaStreams.store(storeQueryParams);
+			}
+			catch (Exception e) {
+				throw new IllegalStateException("Error retrieving state store: " + storeName, e);
+			}
+		});
+	}
+
+	private void populateKafkaStreams() {
 		if (this.kafkaStreams == null) {
 			this.kafkaStreams = this.streamsBuilderFactoryBean.getKafkaStreams();
 		}
 		Assert.notNull(this.kafkaStreams, "KafkaStreams cannot be null. " +
 				"Make sure that the corresponding StreamsBuilderFactoryBean has started properly.");
-		StoreQueryParameters storeQueryParams = StoreQueryParameters.fromNameAndType(storeName, storeType);
+	}
+
+	/**
+	 * Retrieve the current {@link HostInfo} where this Kafka Streams application is running on.
+	 * This {link @HostInfo} is different from the Kafka `bootstrap.server` property, and is based on
+	 * the Kafka Streams configuration property `application.server` where user-defined REST
+	 * endpoints can be invoked per each Kafka Streams application instance.
+	 * If this property - `application.server` - is not available from the end-user application, then null is returned.
+	 * @return the current {@link HostInfo}
+	 */
+	@Nullable
+	public HostInfo getCurrentKafkaStreamsApplicationHostInfo() {
+		Properties streamsConfiguration = this.streamsBuilderFactoryBean
+				.getStreamsConfiguration();
+		if (streamsConfiguration != null && streamsConfiguration.containsKey("application.server")) {
+			String applicationServer = (String) streamsConfiguration.get("application.server");
+			String[] appServerComponents = StringUtils.split(applicationServer, ":");
+			if (appServerComponents != null) {
+				return new HostInfo(appServerComponents[0], Integer.parseInt(appServerComponents[1]));
+			}
+		}
+		return null;
+	}
 
+	/**
+	 * Retrieve the {@link HostInfo} where the provided store and key are hosted on. This may
+	 * not be the current host that is running the application. Kafka Streams will look
+	 * through all the consumer instances under the same application id and retrieves the
+	 * proper host. Note that the end user applications must provide `application.server` as a
+	 * configuration property for all the application instances when calling this method.
+	 * If this is not available, then null maybe returned.
+	 * @param  generic type for key
+	 * @param store store name
+	 * @param key key to look for
+	 * @param serializer {@link Serializer} for the key
+	 * @return the {@link HostInfo} where the key for the provided store is hosted currently
+	 */
+	public  HostInfo getKafkaStreamsApplicationHostInfo(String store, K key, Serializer serializer) {
+		populateKafkaStreams();
 		return this.retryTemplate.execute(context -> {
+			Throwable throwable = null;
 			try {
-				return this.kafkaStreams.store(storeQueryParams);
+				KeyQueryMetadata keyQueryMetadata = this.kafkaStreams.queryMetadataForKey(store, key, serializer);
+				if (keyQueryMetadata != null) {
+					return keyQueryMetadata.activeHost();
+				}
 			}
 			catch (Exception e) {
-				throw new IllegalStateException("Error retrieving state store: " + storeName, e);
+				throwable = e;
 			}
+			// In addition to the obvious case of a valid exception above, if keyQueryMetadata was null for any
+			// transient reasons, let the retry kick in by forcing an exception below.
+			throw new IllegalStateException(
+					"Error when retrieving state store.", throwable != null ? throwable :
+					new Throwable("KeyQueryMetadata is not yet available."));
 		});
 	}
 
diff --git a/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsMicrometerListener.java b/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsMicrometerListener.java
index e1814c13d1..f0afd5045f 100644
--- a/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsMicrometerListener.java
+++ b/spring-kafka/src/main/java/org/springframework/kafka/streams/KafkaStreamsMicrometerListener.java
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020-2022 the original author or authors.
+ * Copyright 2020-2024 the original author or authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,35 +16,30 @@
 
 package org.springframework.kafka.streams;
 
-import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
-import org.apache.kafka.streams.KafkaStreams;
-
-import org.springframework.kafka.config.StreamsBuilderFactoryBean;
-
-import io.micrometer.core.instrument.ImmutableTag;
 import io.micrometer.core.instrument.MeterRegistry;
 import io.micrometer.core.instrument.Tag;
+import io.micrometer.core.instrument.binder.MeterBinder;
 import io.micrometer.core.instrument.binder.kafka.KafkaStreamsMetrics;
+import org.apache.kafka.streams.KafkaStreams;
+
+import org.springframework.kafka.config.StreamsBuilderFactoryBean;
+import org.springframework.kafka.core.KafkaMetricsSupport;
+import org.springframework.scheduling.TaskScheduler;
 
 /**
  * Creates a {@link KafkaStreamsMetrics} for the {@link KafkaStreams}.
  *
  * @author Gary Russell
+ * @author Artem Bilan
+ *
  * @since 2.5.3
  *
  */
-public class KafkaStreamsMicrometerListener implements StreamsBuilderFactoryBean.Listener {
-
-	private final MeterRegistry meterRegistry;
-
-	private final List tags;
-
-	private final Map metrics = new HashMap<>();
+public class KafkaStreamsMicrometerListener extends KafkaMetricsSupport
+		implements StreamsBuilderFactoryBean.Listener {
 
 	/**
 	 * Construct an instance with the provided registry.
@@ -54,33 +49,51 @@ public KafkaStreamsMicrometerListener(MeterRegistry meterRegistry) {
 		this(meterRegistry, Collections.emptyList());
 	}
 
+	/**
+	 * Construct an instance with the provided registry and task scheduler.
+	 * @param meterRegistry the registry.
+	 * @param taskScheduler the task scheduler.
+	 * @since 3.3
+	 */
+	public KafkaStreamsMicrometerListener(MeterRegistry meterRegistry, TaskScheduler taskScheduler) {
+		this(meterRegistry, Collections.emptyList(), taskScheduler);
+	}
+
 	/**
 	 * Construct an instance with the provided registry and tags.
 	 * @param meterRegistry the registry.
 	 * @param tags the tags.
 	 */
 	public KafkaStreamsMicrometerListener(MeterRegistry meterRegistry, List tags) {
-		this.meterRegistry = meterRegistry;
-		this.tags = tags;
+		super(meterRegistry, tags);
 	}
 
+	/**
+	 * Construct an instance with the provided registry, tags and task scheduler.
+	 * @param meterRegistry the registry.
+	 * @param tags the tags.
+	 * @param taskScheduler the task scheduler.
+	 * @since 3.3
+	 */
+	public KafkaStreamsMicrometerListener(MeterRegistry meterRegistry, List tags, TaskScheduler taskScheduler) {
+		super(meterRegistry, tags, taskScheduler);
+	}
 
 	@Override
 	public synchronized void streamsAdded(String id, KafkaStreams kafkaStreams) {
-		if (!this.metrics.containsKey(id)) {
-			List streamsTags = new ArrayList<>(this.tags);
-			streamsTags.add(new ImmutableTag("spring.id", id));
-			this.metrics.put(id, new KafkaStreamsMetrics(kafkaStreams, streamsTags));
-			this.metrics.get(id).bindTo(this.meterRegistry);
-		}
+		bindClient(id, kafkaStreams);
+	}
+
+	@Override
+	protected MeterBinder createClientMetrics(KafkaStreams client, List tags) {
+		return this.scheduler != null
+				? new KafkaStreamsMetrics(client, tags, this.scheduler)
+				: new KafkaStreamsMetrics(client, tags);
 	}
 
 	@Override
 	public synchronized void streamsRemoved(String id, KafkaStreams streams) {
-		KafkaStreamsMetrics removed = this.metrics.remove(id);
-		if (removed != null) {
-			removed.close();
-		}
+		unbindClient(id, streams);
 	}
 
 }
diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/AbstractKafkaHeaderMapper.java b/spring-kafka/src/main/java/org/springframework/kafka/support/AbstractKafkaHeaderMapper.java
index f33b135f00..2edba15612 100644
--- a/spring-kafka/src/main/java/org/springframework/kafka/support/AbstractKafkaHeaderMapper.java
+++ b/spring-kafka/src/main/java/org/springframework/kafka/support/AbstractKafkaHeaderMapper.java
@@ -1,5 +1,5 @@
 /*
- * Copyright 2018-2022 the original author or authors.
+ * Copyright 2018-2024 the original author or authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
@@ -43,12 +44,23 @@
  *
  * @author Gary Russell
  * @author Artem Bilan
+ * @author Sanghyeok An
+ * @author Soby Chacko
  *
  * @since 2.1.3
  *
  */
 public abstract class AbstractKafkaHeaderMapper implements KafkaHeaderMapper {
 
+	private static final String[] DEFAULT_RAW_MAPPED_HEADERS = {
+			KafkaHeaders.LISTENER_INFO,
+			"b3",
+			"traceparent",
+			"X-B3-TraceId",
+			"X-B3-SpanId",
+			"X-B3-Sampled",
+			"X-B3-Flags"};
+
 	protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
 
 	private final List matchers = new ArrayList<>();
@@ -56,7 +68,9 @@ public abstract class AbstractKafkaHeaderMapper implements KafkaHeaderMapper {
 	private final Map rawMappedHeaders = new HashMap<>();
 
 	{
-		this.rawMappedHeaders.put(KafkaHeaders.LISTENER_INFO, true);
+		for (String defaultRawMappedHeader : DEFAULT_RAW_MAPPED_HEADERS) {
+			this.rawMappedHeaders.put(defaultRawMappedHeader, true);
+		}
 	}
 
 	private final boolean outbound;
@@ -153,6 +167,8 @@ public void setCharset(Charset charset) {
 	 * {@code byte[]} for outbound). Inbound headers that match will be mapped as
 	 * {@code byte[]} unless the corresponding boolean in the map value is true,
 	 * in which case it will be mapped as a String.
+	 * Headers mapped by default are: {@code kafka_listenerInfo}, {@code b3}, {@code traceparent},
+	 * {@code X-B3-TraceId}, {@code X-B3-SpanId}, {@code X-B3-Sampled} and {@code X-B3-Flags}.
 	 * @param rawMappedHeaders the header names to not convert and
 	 * @since 2.2.5
 	 * @see #setCharset(Charset)
@@ -160,7 +176,6 @@ public void setCharset(Charset charset) {
 	 */
 	public void setRawMappedHeaders(Map rawMappedHeaders) {
 		if (!ObjectUtils.isEmpty(rawMappedHeaders)) {
-			this.rawMappedHeaders.clear();
 			this.rawMappedHeaders.putAll(rawMappedHeaders);
 		}
 	}
@@ -204,7 +219,7 @@ protected boolean matchesForInbound(String header) {
 		if (this.outbound) {
 			return true;
 		}
-		if (this.matchers.size() == 0) {
+		if (this.matchers.isEmpty()) {
 			return true;
 		}
 		return doesMatch(header);
@@ -255,11 +270,11 @@ else if (value instanceof String) {
 	 * @return the value to add.
 	 */
 	protected Object headerValueToAddIn(Header header) {
-		Object mapped = mapRawIn(header.key(), header.value());
-		if (mapped == null) {
-			mapped = header.value();
+		if (header == null || header.value() == null) {
+			return null;
 		}
-		return mapped;
+		String mapped = mapRawIn(header.key(), header.value());
+		return mapped != null ? mapped : header.value();
 	}
 
 	@Nullable
@@ -271,7 +286,6 @@ private String mapRawIn(String header, byte[] value) {
 		return null;
 	}
 
-
 	/**
 	 * A matcher for headers.
 	 * @since 2.3
@@ -339,13 +353,13 @@ protected SimplePatternBasedHeaderMatcher(String pattern) {
 
 		SimplePatternBasedHeaderMatcher(String pattern, boolean negate) {
 			Assert.notNull(pattern, "Pattern must no be null");
-			this.pattern = pattern.toLowerCase();
+			this.pattern = pattern.toLowerCase(Locale.ROOT);
 			this.negate = negate;
 		}
 
 		@Override
 		public boolean matchHeader(String headerName) {
-			String header = headerName.toLowerCase();
+			String header = headerName.toLowerCase(Locale.ROOT);
 			if (PatternMatchUtils.simpleMatch(this.pattern, header)) {
 				LOGGER.debug(() ->
 						MessageFormat.format(
diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/Acknowledgment.java b/spring-kafka/src/main/java/org/springframework/kafka/support/Acknowledgment.java
index 47217d53f1..e92151c476 100644
--- a/spring-kafka/src/main/java/org/springframework/kafka/support/Acknowledgment.java
+++ b/spring-kafka/src/main/java/org/springframework/kafka/support/Acknowledgment.java
@@ -46,7 +46,7 @@ public interface Acknowledgment {
 	 * Must be called on the consumer thread.
 	 * 

* @param sleep the duration to sleep; the actual sleep time will be larger of this value - * and the container's {@code maxPollInterval}, which defaults to 5 seconds. + * and the container's {@code pollTimeout}, which defaults to 5 seconds. * @since 2.8.7 */ default void nack(Duration sleep) { @@ -74,7 +74,7 @@ default void acknowledge(int index) { *

* @param index the index of the failed record in the batch. * @param sleep the duration to sleep; the actual sleep time will be larger of this value - * and the container's {@code maxPollInterval}, which defaults to 5 seconds. + * and the container's {@code pollTimeout}, which defaults to 5 seconds. * @since 2.8.7 */ default void nack(int index, Duration sleep) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/DefaultKafkaHeaderMapper.java b/spring-kafka/src/main/java/org/springframework/kafka/support/DefaultKafkaHeaderMapper.java index 7cb5b297ee..0be018fd8d 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/DefaultKafkaHeaderMapper.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/DefaultKafkaHeaderMapper.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,6 +27,9 @@ import java.util.Map; import java.util.Set; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeader; @@ -34,17 +37,6 @@ import org.springframework.messaging.MessageHeaders; import org.springframework.util.Assert; import org.springframework.util.ClassUtils; -import org.springframework.util.MimeType; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.DeserializationContext; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.deser.std.StdNodeBasedDeserializer; -import com.fasterxml.jackson.databind.module.SimpleModule; -import com.fasterxml.jackson.databind.node.TextNode; -import com.fasterxml.jackson.databind.type.TypeFactory; /** * Default header mapper for Apache Kafka. @@ -55,6 +47,7 @@ * * @author Gary Russell * @author Artem Bilan + * @author Soby Chacko * * @since 1.3 * @@ -163,8 +156,6 @@ private DefaultKafkaHeaderMapper(boolean outbound, ObjectMapper objectMapper, St Assert.notNull(objectMapper, "'objectMapper' must not be null"); Assert.noNullElements(patterns, "'patterns' must not have null elements"); this.objectMapper = objectMapper; - this.objectMapper - .registerModule(new SimpleModule().addDeserializer(MimeType.class, new MimeTypeJsonDeserializer())); } /** @@ -323,6 +314,10 @@ public void toHeaders(Headers source, final Map headers) { else if (headerName.equals(KafkaHeaders.LISTENER_INFO) && matchesForInbound(headerName)) { headers.put(headerName, new String(header.value(), getCharset())); } + else if (headerName.equals(KafkaUtils.KEY_DESERIALIZER_EXCEPTION_HEADER) || + headerName.equals(KafkaUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER)) { + headers.put(headerName, header); + } else if (!(headerName.equals(JSON_TYPES)) && matchesForInbound(headerName)) { if (jsonTypes.containsKey(headerName)) { String requestedType = jsonTypes.get(headerName); @@ -426,39 +421,6 @@ protected boolean trusted(String requestedType) { return true; } - - /** - * The {@link StdNodeBasedDeserializer} extension for {@link MimeType} deserialization. - * It is presented here for backward compatibility when older producers send {@link MimeType} - * headers as serialization version. - */ - private class MimeTypeJsonDeserializer extends StdNodeBasedDeserializer { - - private static final long serialVersionUID = 1L; - - MimeTypeJsonDeserializer() { - super(MimeType.class); - } - - @Override - public MimeType convert(JsonNode root, DeserializationContext ctxt) throws IOException { - if (root instanceof TextNode) { - return MimeType.valueOf(root.asText()); - } - else { - JsonNode type = root.get("type"); - JsonNode subType = root.get("subtype"); - JsonNode parameters = root.get("parameters"); - Map params = - DefaultKafkaHeaderMapper.this.objectMapper.readValue(parameters.traverse(), - TypeFactory.defaultInstance() - .constructMapType(HashMap.class, String.class, String.class)); - return new MimeType(type.asText(), subType.asText(), params); - } - } - - } - /** * Represents a header that could not be decoded due to an untrusted type. */ @@ -476,7 +438,6 @@ public NonTrustedHeaderType() { this.untrustedType = untrustedType; } - public void setHeaderValue(byte[] headerValue) { // NOSONAR this.headerValue = headerValue; // NOSONAR array reference } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/EndpointHandlerMethod.java b/spring-kafka/src/main/java/org/springframework/kafka/support/EndpointHandlerMethod.java index 9d86bce5fd..fac721f50d 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/EndpointHandlerMethod.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/EndpointHandlerMethod.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,6 +32,8 @@ * * @author Tomaz Fernandes * @author Gary Russell + * @author Wang Zhiyang + * * @since 2.7 * */ @@ -39,7 +41,7 @@ public class EndpointHandlerMethod { private final Object beanOrClass; - private final String methodName; + private String methodName; private Object bean; @@ -52,6 +54,17 @@ public EndpointHandlerMethod(Object beanOrClass, String methodName) { this.methodName = methodName; } + /** + * Construct an instance for the provided bean. + * @param bean the bean. + * @since 3.2 + */ + public EndpointHandlerMethod(Object bean) { + Assert.notNull(bean, () -> "No bean for destination provided!"); + this.bean = bean; + this.beanOrClass = bean.getClass(); + } + public EndpointHandlerMethod(Object bean, Method method) { Assert.notNull(bean, () -> "No bean for destination provided!"); Assert.notNull(method, () -> "No method for destination bean class provided!"); @@ -66,15 +79,15 @@ public EndpointHandlerMethod(Object bean, Method method) { * @return the method. */ public Method getMethod() { - if (this.beanOrClass instanceof Class) { - return forClass((Class) this.beanOrClass); + if (this.beanOrClass instanceof Class clazz) { + return forClass(clazz); } Assert.state(this.bean != null, "Bean must be resolved before accessing its method"); if (this.bean instanceof EndpointHandlerMethod) { try { return Object.class.getMethod("toString"); } - catch (NoSuchMethodException | SecurityException e) { + catch (NoSuchMethodException | SecurityException ignored) { } } return forClass(this.bean.getClass()); @@ -91,13 +104,12 @@ public String getMethodName() { } public Object resolveBean(BeanFactory beanFactory) { - if (this.bean instanceof EndpointHandlerMethod) { - return ((EndpointHandlerMethod) this.bean).beanOrClass; + if (this.bean instanceof EndpointHandlerMethod endpointHandlerMethod) { + return endpointHandlerMethod.beanOrClass; } if (this.bean == null) { try { - if (this.beanOrClass instanceof Class) { - Class clazz = (Class) this.beanOrClass; + if (this.beanOrClass instanceof Class clazz) { try { this.bean = beanFactory.getBean(clazz); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/EndpointHandlerMultiMethod.java b/spring-kafka/src/main/java/org/springframework/kafka/support/EndpointHandlerMultiMethod.java new file mode 100644 index 0000000000..34595ed18d --- /dev/null +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/EndpointHandlerMultiMethod.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.support; + +import java.lang.reflect.Method; +import java.util.List; + +/** + * Handler multi method for retrying endpoints. + * + * @author Wang Zhiyang + * + * @since 3.2 + * + */ +public class EndpointHandlerMultiMethod extends EndpointHandlerMethod { + + private Method defaultMethod; + + private List methods; + + /** + * Construct an instance for the provided bean, defaultMethod and methods. + * @param bean the bean. + * @param defaultMethod the defaultMethod. + * @param methods the methods. + */ + public EndpointHandlerMultiMethod(Object bean, Method defaultMethod, List methods) { + super(bean); + this.defaultMethod = defaultMethod; + this.methods = methods; + } + + /** + * Return the method list. + * @return the method list. + */ + public List getMethods() { + return this.methods; + } + + /** + * Set the method list. + * @param methods the method list. + */ + public void setMethods(List methods) { + this.methods = methods; + } + + /** + * Return the default method. + * @return the default method. + */ + public Method getDefaultMethod() { + return this.defaultMethod; + } + + /** + * Set the default method. + * @param defaultMethod the default method. + */ + public void setDefaultMethod(Method defaultMethod) { + this.defaultMethod = defaultMethod; + } + +} diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonMimeTypeModule.java b/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonMimeTypeModule.java index c353c20fdb..f876abea65 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonMimeTypeModule.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonMimeTypeModule.java @@ -1,5 +1,5 @@ /* - * Copyright 2019 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +18,13 @@ import java.io.IOException; -import org.springframework.util.MimeType; - import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.module.SimpleModule; +import org.springframework.util.MimeType; + /** * A {@link SimpleModule} extension for {@link MimeType} serialization. * diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonUtils.java b/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonUtils.java index 3121409286..042640c35d 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonUtils.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/JacksonUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2022 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,13 @@ package org.springframework.kafka.support; -import org.springframework.util.ClassUtils; - import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.json.JsonMapper; +import org.springframework.util.ClassUtils; + /** * The utilities for Jackson {@link ObjectMapper} instances. * @@ -105,9 +105,8 @@ private static final class JodaModuleProvider { private static final class KotlinModuleProvider { - @SuppressWarnings("deprecation") static final com.fasterxml.jackson.databind.Module MODULE = - new com.fasterxml.jackson.module.kotlin.KotlinModule(); + new com.fasterxml.jackson.module.kotlin.KotlinModule.Builder().build(); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaMessageHeaderAccessor.java b/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaMessageHeaderAccessor.java index 96975cbdd7..beecd6b66e 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaMessageHeaderAccessor.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaMessageHeaderAccessor.java @@ -44,7 +44,7 @@ public KafkaMessageHeaderAccessor(Message message) { /** * Access the header value when the blocking delivery attempt header is present. - * @return 1 if there is no header present; the decoded header value otherwise. + * @return The decoded header value if the header is present. * @throws IllegalStateException if the header is not present. * @see org.springframework.kafka.listener.ContainerProperties#setDeliveryAttemptHeader(boolean) */ diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaUtils.java b/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaUtils.java index 616e6b224d..b589445100 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaUtils.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/KafkaUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,12 +38,31 @@ * * @author Gary Russell * @author Wang ZhiYang + * @author Soby Chacko * * @since 2.2 * */ public final class KafkaUtils { + /** + * Header name for deserialization exceptions. + * @since 3.0.15 + */ + public static final String DESERIALIZER_EXCEPTION_HEADER_PREFIX = "springDeserializerException"; + + /** + * Header name for deserialization exceptions. + * @since 3.0.15 + */ + public static final String KEY_DESERIALIZER_EXCEPTION_HEADER = DESERIALIZER_EXCEPTION_HEADER_PREFIX + "Key"; + + /** + * Header name for deserialization exceptions. + * @since 3.0.15 + */ + public static final String VALUE_DESERIALIZER_EXCEPTION_HEADER = DESERIALIZER_EXCEPTION_HEADER_PREFIX + "Value"; + private static Function, String> prFormatter = ProducerRecord::toString; private static Function, String> crFormatter = diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/Suffixer.java b/spring-kafka/src/main/java/org/springframework/kafka/support/Suffixer.java index e2cbd543c8..85f5bce0c5 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/Suffixer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/Suffixer.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2021 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ import org.springframework.util.Assert; import org.springframework.util.StringUtils; - /** * Utility class that suffixes strings. * diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/TopicPartitionOffset.java b/spring-kafka/src/main/java/org/springframework/kafka/support/TopicPartitionOffset.java index 530b909ccb..15b984ffa8 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/TopicPartitionOffset.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/TopicPartitionOffset.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2022 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ package org.springframework.kafka.support; import java.util.Objects; +import java.util.function.Function; import org.apache.kafka.common.TopicPartition; @@ -41,6 +42,7 @@ * * @author Artem Bilan * @author Gary Russell + * @author Soby Chacko * * @since 2.3 */ @@ -77,6 +79,8 @@ public enum SeekPosition { private boolean relativeToCurrent; + private Function offsetComputeFunction; + /** * Construct an instance with no initial offset management. * @param topic the topic. @@ -98,6 +102,19 @@ public TopicPartitionOffset(String topic, int partition, Long offset) { this(topic, partition, offset, false); } + /** + * Construct an instance with the provided function to compute the offset. + * @param topic the topic. + * @param partition the partition. + * @param offsetComputeFunction function to compute the offset. + * @since 3.2.0 + */ + public TopicPartitionOffset(String topic, int partition, Function offsetComputeFunction) { + this.topicPartition = new TopicPartition(topic, partition); + this.offsetComputeFunction = offsetComputeFunction; + this.position = null; + } + /** * Construct an instance with the provided initial offset. * @param topic the topic. @@ -198,6 +215,10 @@ public SeekPosition getPosition() { return this.position; } + public Function getOffsetComputeFunction() { + return this.offsetComputeFunction; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BatchMessagingMessageConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BatchMessagingMessageConverter.java index cc644b0d61..4125202a43 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BatchMessagingMessageConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BatchMessagingMessageConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2022 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,14 +24,15 @@ import java.util.List; import java.util.Map; -import org.apache.commons.logging.LogFactory; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.header.Headers; +import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.Bytes; import org.springframework.core.log.LogAccessor; +import org.springframework.core.log.LogMessage; import org.springframework.kafka.support.Acknowledgment; import org.springframework.kafka.support.DefaultKafkaHeaderMapper; import org.springframework.kafka.support.JacksonPresent; @@ -54,18 +55,24 @@ *

* If a {@link RecordMessageConverter} is provided, and the batch type is a {@link ParameterizedType} * with a single generic type parameter, each record will be passed to the converter, thus supporting - * a method signature {@code List foos}. + * a method signature {@code List myObjects}. * * @author Marius Bogoevici * @author Gary Russell * @author Dariusz Szablinski * @author Biju Kunjummen + * @author Sanghyeok An + * @author Hope Kim + * @author Borahm Lee + * @author Artem Bilan + * * @since 1.1 */ public class BatchMessagingMessageConverter implements BatchMessageConverter { - protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR + protected final LogAccessor logger = new LogAccessor(getClass()); // NOSONAR + @Nullable private final RecordMessageConverter recordConverter; private boolean generateMessageId = false; @@ -89,7 +96,7 @@ public BatchMessagingMessageConverter() { * @param recordConverter the converter. * @since 1.3.2 */ - public BatchMessagingMessageConverter(RecordMessageConverter recordConverter) { + public BatchMessagingMessageConverter(@Nullable RecordMessageConverter recordConverter) { this.recordConverter = recordConverter; if (JacksonPresent.isJackson2Present()) { this.headerMapper = new DefaultKafkaHeaderMapper(); @@ -98,7 +105,7 @@ public BatchMessagingMessageConverter(RecordMessageConverter recordConverter) { /** * Generate {@link Message} {@code ids} for produced messages. If set to {@code false}, - * will try to use a default value. By default set to {@code false}. + * will try to use a default value. By default, set to {@code false}. * @param generateMessageId true if a message id should be generated */ public void setGenerateMessageId(boolean generateMessageId) { @@ -107,7 +114,7 @@ public void setGenerateMessageId(boolean generateMessageId) { /** * Generate {@code timestamp} for produced messages. If set to {@code false}, -1 is - * used instead. By default set to {@code false}. + * used instead. By default, set to {@code false}. * @param generateTimestamp true if a timestamp should be generated */ public void setGenerateTimestamp(boolean generateTimestamp) { @@ -123,6 +130,7 @@ public void setHeaderMapper(KafkaHeaderMapper headerMapper) { this.headerMapper = headerMapper; } + @Nullable @Override public RecordMessageConverter getRecordMessageConverter() { return this.recordConverter; @@ -142,8 +150,8 @@ public void setRawRecordHeader(boolean rawRecordHeader) { public Message toMessage(List> records, @Nullable Acknowledgment acknowledgment, Consumer consumer, Type type) { - KafkaMessageHeaders kafkaMessageHeaders = new KafkaMessageHeaders(this.generateMessageId, - this.generateTimestamp); + KafkaMessageHeaders kafkaMessageHeaders = + new KafkaMessageHeaders(this.generateMessageId, this.generateTimestamp); Map rawHeaders = kafkaMessageHeaders.getRawHeaders(); List payloads = new ArrayList<>(); @@ -157,46 +165,38 @@ public Message toMessage(List> records, @Nullable Acknow List natives = new ArrayList<>(); List> raws = new ArrayList<>(); List conversionFailures = new ArrayList<>(); + addToRawHeaders(rawHeaders, convertedHeaders, natives, raws, conversionFailures); commonHeaders(acknowledgment, consumer, rawHeaders, keys, topics, partitions, offsets, timestampTypes, timestamps); - boolean logged = false; - String info = null; + + String listenerInfo = null; for (ConsumerRecord record : records) { - payloads.add(obtainPayload(type, record, conversionFailures)); - keys.add(record.key()); - topics.add(record.topic()); - partitions.add(record.partition()); - offsets.add(record.offset()); - if (record.timestampType() != null) { - timestampTypes.add(record.timestampType().name()); - } - timestamps.add(record.timestamp()); - if (this.headerMapper != null && record.headers() != null) { - Map converted = new HashMap<>(); - this.headerMapper.toHeaders(record.headers(), converted); - convertedHeaders.add(converted); - Object object = converted.get(KafkaHeaders.LISTENER_INFO); - if (object instanceof String) { - info = (String) object; + addRecordInfo(record, type, payloads, keys, topics, partitions, offsets, timestampTypes, timestamps, + conversionFailures); + Headers recordHeaders = record.headers(); + if (this.headerMapper != null && recordHeaders != null) { + Map converted = convertHeaders(recordHeaders, convertedHeaders); + Object obj = converted.get(KafkaHeaders.LISTENER_INFO); + if (obj instanceof String info) { + listenerInfo = info; } } else { - if (!logged) { - this.logger.debug(() -> - "No header mapper is available; Jackson is required for the default mapper; " - + "headers (if present) are not mapped but provided raw in " - + KafkaHeaders.NATIVE_HEADERS); - logged = true; - } - natives.add(record.headers()); + natives.add(recordHeaders); } if (this.rawRecordHeader) { raws.add(record); } } - if (info != null) { - rawHeaders.put(KafkaHeaders.LISTENER_INFO, info); + if (this.headerMapper == null && !natives.isEmpty()) { + this.logger.debug(() -> + "No header mapper is available; Jackson is required for the default mapper; " + + "headers (if present) are not mapped but provided raw in " + + KafkaHeaders.NATIVE_HEADERS); + } + if (listenerInfo != null) { + rawHeaders.put(KafkaHeaders.LISTENER_INFO, listenerInfo); } return MessageBuilder.createMessage(payloads, kafkaMessageHeaders); } @@ -216,12 +216,35 @@ private void addToRawHeaders(Map rawHeaders, List record, Type type, List payloads, List keys, + List topics, List partitions, List offsets, List timestampTypes, + List timestamps, List conversionFailures) { + + payloads.add(obtainPayload(type, record, conversionFailures)); + keys.add(record.key()); + topics.add(record.topic()); + partitions.add(record.partition()); + offsets.add(record.offset()); + timestamps.add(record.timestamp()); + TimestampType timestampType = record.timestampType(); + if (timestampType != null) { + timestampTypes.add(timestampType.name()); + } + } + private Object obtainPayload(Type type, ConsumerRecord record, List conversionFailures) { return this.recordConverter == null || !containerType(type) ? extractAndConvertValue(record, type) : convert(record, type, conversionFailures); } + private Map convertHeaders(Headers headers, List> convertedHeaders) { + Map converted = new HashMap<>(); + this.headerMapper.toHeaders(headers, converted); + convertedHeaders.add(converted); + return converted; + } + @Override public List> fromMessage(Message message, String defaultTopic) { throw new UnsupportedOperationException(); @@ -238,7 +261,6 @@ protected Object extractAndConvertValue(ConsumerRecord record, Type type) return record.value() == null ? KafkaNull.INSTANCE : record.value(); } - /** * Convert the record value. * @param record the record. @@ -250,24 +272,29 @@ protected Object extractAndConvertValue(ConsumerRecord record, Type type) protected Object convert(ConsumerRecord record, Type type, List conversionFailures) { try { Object payload = this.recordConverter - .toMessage(record, null, null, ((ParameterizedType) type).getActualTypeArguments()[0]).getPayload(); + .toMessage(record, null, null, ((ParameterizedType) type).getActualTypeArguments()[0]).getPayload(); conversionFailures.add(null); return payload; } catch (ConversionException ex) { byte[] original = null; - if (record.value() instanceof byte[]) { - original = (byte[]) record.value(); + if (record.value() instanceof byte[] bytes) { + original = bytes; } - else if (record.value() instanceof Bytes) { - original = ((Bytes) record.value()).get(); + else if (record.value() instanceof Bytes bytes) { + original = bytes.get(); } - else if (record.value() instanceof String) { - original = ((String) record.value()).getBytes(StandardCharsets.UTF_8); + else if (record.value() instanceof String string) { + original = string.getBytes(StandardCharsets.UTF_8); } if (original != null) { SerializationUtils.deserializationException(record.headers(), original, ex, false); conversionFailures.add(ex); + this.logger.warn(ex, + LogMessage.format("Could not convert message for topic=%s, partition=%d, offset=%d", + record.topic(), + record.partition(), + record.offset())); return null; } throw new ConversionException("The batch converter can only report conversion failures to the listener " @@ -282,8 +309,8 @@ else if (record.value() instanceof String) { * @return true if the conditions are met. */ private boolean containerType(Type type) { - return type instanceof ParameterizedType - && ((ParameterizedType) type).getActualTypeArguments().length == 1; + return type instanceof ParameterizedType parameterizedType + && parameterizedType.getActualTypeArguments().length == 1; } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ByteArrayJsonMessageConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ByteArrayJsonMessageConverter.java index b359ae3f95..e5256cdf51 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ByteArrayJsonMessageConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ByteArrayJsonMessageConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ package org.springframework.kafka.support.converter; -import org.springframework.kafka.support.KafkaNull; -import org.springframework.messaging.Message; - import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; +import org.springframework.kafka.support.KafkaNull; +import org.springframework.messaging.Message; + /** * JSON Message converter - {@code byte[]} on output, String, Bytes, or byte[] on input. * Used in conjunction with Kafka diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BytesJsonMessageConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BytesJsonMessageConverter.java index 65e3ca86df..58b4ea11ce 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BytesJsonMessageConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/BytesJsonMessageConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,13 @@ package org.springframework.kafka.support.converter; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.kafka.common.utils.Bytes; import org.springframework.kafka.support.KafkaNull; import org.springframework.messaging.Message; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - /** * JSON Message converter - {@code Bytes} on output, String, Bytes, or byte[] on input. * Used in conjunction with Kafka diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/JsonMessageConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/JsonMessageConverter.java index a32872c18c..201f3b0779 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/JsonMessageConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/JsonMessageConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,9 @@ import java.io.IOException; import java.lang.reflect.Type; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.type.TypeFactory; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; @@ -32,10 +35,6 @@ import org.springframework.messaging.Message; import org.springframework.util.Assert; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.type.TypeFactory; - /** * Base class for JSON message converters; on the consumer side, it can * handle {@code byte[]}, {@link Bytes} and {@link String} record values. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MappingJacksonParameterizedConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MappingJacksonParameterizedConverter.java index cb534678ee..7744d5ee9b 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MappingJacksonParameterizedConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MappingJacksonParameterizedConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,8 @@ import java.io.IOException; import java.lang.reflect.Type; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.type.TypeFactory; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.utils.Bytes; @@ -32,9 +34,6 @@ import org.springframework.util.Assert; import org.springframework.util.MimeType; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.type.TypeFactory; - /** * Subclass of {@link MappingJackson2MessageConverter} that can handle parameterized * (generic) types. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MessagingMessageConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MessagingMessageConverter.java index 51a7148ec4..339bd17ce0 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MessagingMessageConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/MessagingMessageConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -136,7 +136,6 @@ public void setRawRecordHeader(boolean rawRecordHeader) { this.rawRecordHeader = rawRecordHeader; } - protected org.springframework.messaging.converter.MessageConverter getMessagingConverter() { return this.messagingConverter; } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ProjectingMessageConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ProjectingMessageConverter.java index 6a78d3bc03..1a774c3cb9 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ProjectingMessageConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/ProjectingMessageConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,6 +21,8 @@ import java.lang.reflect.Type; import java.nio.charset.StandardCharsets; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.utils.Bytes; @@ -34,9 +36,6 @@ import org.springframework.messaging.Message; import org.springframework.util.Assert; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider; - /** * A {@link MessageConverter} implementation that uses a Spring Data * {@link ProjectionFactory} to bind incoming messages to projection interfaces. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/StringJsonMessageConverter.java b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/StringJsonMessageConverter.java index 869b8d2762..24814cd239 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/converter/StringJsonMessageConverter.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/converter/StringJsonMessageConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ package org.springframework.kafka.support.converter; -import org.springframework.kafka.support.KafkaNull; -import org.springframework.messaging.Message; - import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; +import org.springframework.kafka.support.KafkaNull; +import org.springframework.messaging.Message; + /** * JSON Message converter - String on output, String, Bytes, or byte[] on input. Used in * conjunction with Kafka diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/DefaultJackson2JavaTypeMapper.java b/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/DefaultJackson2JavaTypeMapper.java index 42e3519f15..5a9b64df27 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/DefaultJackson2JavaTypeMapper.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/DefaultJackson2JavaTypeMapper.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2021 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,8 @@ import java.util.List; import java.util.Set; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.type.TypeFactory; import org.apache.kafka.common.header.Headers; import org.springframework.messaging.converter.MessageConversionException; @@ -27,9 +29,6 @@ import org.springframework.util.ClassUtils; import org.springframework.util.PatternMatchUtils; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.type.TypeFactory; - /** * Jackson 2 type mapper. * diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/Jackson2JavaTypeMapper.java b/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/Jackson2JavaTypeMapper.java index 3f45504a88..91cd1960da 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/Jackson2JavaTypeMapper.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/mapping/Jackson2JavaTypeMapper.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2021 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,8 @@ package org.springframework.kafka.support.mapping; -import org.apache.kafka.common.header.Headers; - import com.fasterxml.jackson.databind.JavaType; +import org.apache.kafka.common.header.Headers; /** * Strategy for setting metadata on messages such that one can create the class that needs diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaListenerObservation.java b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaListenerObservation.java index e3055fa763..aaec5c5275 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaListenerObservation.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaListenerObservation.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,10 +22,17 @@ import io.micrometer.observation.ObservationConvention; import io.micrometer.observation.docs.ObservationDocumentation; +import org.springframework.lang.NonNull; +import org.springframework.lang.Nullable; +import org.springframework.util.StringUtils; + /** * Spring for Apache Kafka Observation for listeners. * * @author Gary Russell + * @author Christian Mergenthaler + * @author Wang Zhiyang + * * @since 3.0 * */ @@ -36,20 +43,21 @@ public enum KafkaListenerObservation implements ObservationDocumentation { */ LISTENER_OBSERVATION { - @Override public Class> getDefaultConvention() { return DefaultKafkaListenerObservationConvention.class; } @Override - public String getPrefix() { - return "spring.kafka.listener"; + @NonNull + public KeyName[] getLowCardinalityKeyNames() { + return ListenerLowCardinalityTags.values(); } @Override - public KeyName[] getLowCardinalityKeyNames() { - return ListenerLowCardinalityTags.values(); + @NonNull + public KeyName[] getHighCardinalityKeyNames() { + return ListenerHighCardinalityTags.values(); } }; @@ -65,20 +73,147 @@ public enum ListenerLowCardinalityTags implements KeyName { LISTENER_ID { @Override + @NonNull public String asString() { return "spring.kafka.listener.id"; } - } + }, + + /** + * Messaging system. + * @since 3.2 + */ + MESSAGING_SYSTEM { + + @Override + @NonNull + public String asString() { + return "messaging.system"; + } + + }, + + /** + * Messaging operation. + * @since 3.2 + */ + MESSAGING_OPERATION { + + @Override + @NonNull + public String asString() { + return "messaging.operation"; + } + + }, + + /** + * Messaging source name. + * @since 3.2 + */ + MESSAGING_SOURCE_NAME { + + @Override + @NonNull + public String asString() { + return "messaging.source.name"; + } + + }, + + /** + * Messaging source kind. + * @since 3.2 + */ + MESSAGING_SOURCE_KIND { + + @Override + @NonNull + public String asString() { + return "messaging.source.kind"; + } + + }, + + /** + * Messaging the consumer group. + * @since 3.2 + */ + MESSAGING_CONSUMER_GROUP { + + @Override + @NonNull + public String asString() { + return "messaging.kafka.consumer.group"; + } + + }, + + } + + /** + * High cardinality tags. + * @since 3.2 + */ + public enum ListenerHighCardinalityTags implements KeyName { + + /** + * Messaging client id. + */ + MESSAGING_CLIENT_ID { + + @Override + @NonNull + public String asString() { + return "messaging.kafka.client_id"; + } + + }, + + /** + * Messaging consumer id (consumer group and client id). + */ + MESSAGING_CONSUMER_ID { + + @Override + @NonNull + public String asString() { + return "messaging.consumer.id"; + } + + }, + + /** + * Messaging partition. + */ + MESSAGING_PARTITION { + + @Override + @NonNull + public String asString() { + return "messaging.kafka.source.partition"; + } + + }, + + /** + * Messaging message offset. + */ + MESSAGING_OFFSET { + + @Override + @NonNull + public String asString() { + return "messaging.kafka.message.offset"; + } + + }, } /** * Default {@link KafkaListenerObservationConvention} for Kafka listener key values. - * - * @author Gary Russell - * @since 3.0 - * */ public static class DefaultKafkaListenerObservationConvention implements KafkaListenerObservationConvention { @@ -90,8 +225,33 @@ public static class DefaultKafkaListenerObservationConvention implements KafkaLi @Override public KeyValues getLowCardinalityKeyValues(KafkaRecordReceiverContext context) { - return KeyValues.of(KafkaListenerObservation.ListenerLowCardinalityTags.LISTENER_ID.asString(), - context.getListenerId()); + + return KeyValues.of( + ListenerLowCardinalityTags.LISTENER_ID.withValue(context.getListenerId()), + ListenerLowCardinalityTags.MESSAGING_SYSTEM.withValue("kafka"), + ListenerLowCardinalityTags.MESSAGING_OPERATION.withValue("receive"), + ListenerLowCardinalityTags.MESSAGING_SOURCE_NAME.withValue(context.getSource()), + ListenerLowCardinalityTags.MESSAGING_SOURCE_KIND.withValue("topic"), + ListenerLowCardinalityTags.MESSAGING_CONSUMER_GROUP.withValue(context.getGroupId()) + ); + } + + @Override + @NonNull + public KeyValues getHighCardinalityKeyValues(KafkaRecordReceiverContext context) { + String clientId = context.getClientId(); + KeyValues keyValues = KeyValues.of( + ListenerHighCardinalityTags.MESSAGING_PARTITION.withValue(context.getPartition()), + ListenerHighCardinalityTags.MESSAGING_OFFSET.withValue(context.getOffset()), + ListenerHighCardinalityTags.MESSAGING_CONSUMER_ID.withValue(getConsumerId(context, clientId)) + ); + + if (StringUtils.hasText(clientId)) { + keyValues = keyValues + .and(ListenerHighCardinalityTags.MESSAGING_CLIENT_ID.withValue(clientId)); + } + + return keyValues; } @Override @@ -99,9 +259,11 @@ public String getContextualName(KafkaRecordReceiverContext context) { return context.getSource() + " receive"; } - @Override - public String getName() { - return "spring.kafka.listener"; + private static String getConsumerId(KafkaRecordReceiverContext context, @Nullable String clientId) { + if (StringUtils.hasText(clientId)) { + return context.getGroupId() + " - " + clientId; + } + return context.getGroupId(); } } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordReceiverContext.java b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordReceiverContext.java index b3de789176..f93dce1842 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordReceiverContext.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordReceiverContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,15 +19,19 @@ import java.nio.charset.StandardCharsets; import java.util.function.Supplier; +import io.micrometer.observation.transport.ReceiverContext; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.header.Header; -import io.micrometer.observation.transport.ReceiverContext; +import org.springframework.lang.Nullable; /** * {@link ReceiverContext} for {@link ConsumerRecord}s. * * @author Gary Russell + * @author Christian Mergenthaler + * @author Wang Zhiyang + * * @since 3.0 * */ @@ -35,9 +39,33 @@ public class KafkaRecordReceiverContext extends ReceiverContext record; + /** + * Construct a kafka record receiver context. + * @param record the consumer record. + * @param listenerId the container listener id. + * @param clusterId the kafka cluster id. + */ public KafkaRecordReceiverContext(ConsumerRecord record, String listenerId, Supplier clusterId) { + this(record, listenerId, null, null, clusterId); + } + + /** + * Construct a kafka record receiver context. + * @param record the consumer record. + * @param listenerId the container listener id. + * @param clientId the kafka client id. + * @param groupId the consumer group id. + * @param clusterId the kafka cluster id. + * @since 3.2 + */ + public KafkaRecordReceiverContext(ConsumerRecord record, String listenerId, String clientId, String groupId, + Supplier clusterId) { super((carrier, key) -> { Header header = carrier.headers().lastHeader(key); if (header == null || header.value() == null) { @@ -48,6 +76,8 @@ public KafkaRecordReceiverContext(ConsumerRecord record, String listenerId setCarrier(record); this.record = record; this.listenerId = listenerId; + this.clientId = clientId; + this.groupId = groupId; String cluster = clusterId.get(); setRemoteServiceName("Apache Kafka" + (cluster != null ? ": " + cluster : "")); } @@ -60,6 +90,25 @@ public String getListenerId() { return this.listenerId; } + /** + * Return the consumer group id. + * @return the consumer group id. + * @since 3.2 + */ + public String getGroupId() { + return this.groupId; + } + + /** + * Return the client id. + * @return the client id. + * @since 3.2 + */ + @Nullable + public String getClientId() { + return this.clientId; + } + /** * Return the source topic. * @return the source. @@ -70,11 +119,29 @@ public String getSource() { /** * Return the consumer record. - * @return the record the record. + * @return the record. * @since 3.0.6 */ public ConsumerRecord getRecord() { return this.record; } + /** + * Return the partition. + * @return the partition. + * @since 3.2 + */ + public String getPartition() { + return Integer.toString(this.record.partition()); + } + + /** + * Return the offset. + * @return the offset. + * @since 3.2 + */ + public String getOffset() { + return Long.toString(this.record.offset()); + } + } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordSenderContext.java b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordSenderContext.java index ea6005b883..0d18699313 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordSenderContext.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaRecordSenderContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,14 +19,16 @@ import java.nio.charset.StandardCharsets; import java.util.function.Supplier; -import org.apache.kafka.clients.producer.ProducerRecord; - import io.micrometer.observation.transport.SenderContext; +import org.apache.kafka.clients.producer.ProducerRecord; /** * {@link SenderContext} for {@link ProducerRecord}s. * * @author Gary Russell + * @author Christian Mergenthaler + * @author Wang Zhiyang + * * @since 3.0 * */ @@ -64,7 +66,7 @@ public String getDestination() { /** * Return the producer record. - * @return the record the record. + * @return the record. * @since 3.0.6 */ public ProducerRecord getRecord() { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaTemplateObservation.java b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaTemplateObservation.java index 60580d86ca..f50710e5d6 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaTemplateObservation.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/KafkaTemplateObservation.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,11 +22,16 @@ import io.micrometer.observation.ObservationConvention; import io.micrometer.observation.docs.ObservationDocumentation; +import org.springframework.lang.NonNull; + /** * Spring for Apache Kafka Observation for * {@link org.springframework.kafka.core.KafkaTemplate}. * * @author Gary Russell + * @author Christian Mergenthaler + * @author Wang Zhiyang + * * @since 3.0 * */ @@ -43,11 +48,7 @@ public Class> getDefaultConve } @Override - public String getPrefix() { - return "spring.kafka.template"; - } - - @Override + @NonNull public KeyName[] getLowCardinalityKeyNames() { return TemplateLowCardinalityTags.values(); } @@ -56,6 +57,11 @@ public KeyName[] getLowCardinalityKeyNames() { /** * Low cardinality tags. + * + * @author Christian Mergenthaler + * @author Wang Zhiyang + * + * @since 3.2 */ public enum TemplateLowCardinalityTags implements KeyName { @@ -65,10 +71,63 @@ public enum TemplateLowCardinalityTags implements KeyName { BEAN_NAME { @Override + @NonNull public String asString() { return "spring.kafka.template.name"; } + }, + + /** + * Messaging system. + */ + MESSAGING_SYSTEM { + + @Override + @NonNull + public String asString() { + return "messaging.system"; + } + + }, + + /** + * Messaging operation. + */ + MESSAGING_OPERATION { + + @Override + @NonNull + public String asString() { + return "messaging.operation"; + } + + }, + + /** + * Messaging destination name. + */ + MESSAGING_DESTINATION_NAME { + + @Override + @NonNull + public String asString() { + return "messaging.destination.name"; + } + + }, + + /** + * Messaging destination kind. + */ + MESSAGING_DESTINATION_KIND { + + @Override + @NonNull + public String asString() { + return "messaging.destination.kind"; + } + } } @@ -77,6 +136,9 @@ public String asString() { * Default {@link KafkaTemplateObservationConvention} for Kafka template key values. * * @author Gary Russell + * @author Christian Mergenthaler + * @author Wang Zhiyang + * * @since 3.0 * */ @@ -90,8 +152,12 @@ public static class DefaultKafkaTemplateObservationConvention implements KafkaTe @Override public KeyValues getLowCardinalityKeyValues(KafkaRecordSenderContext context) { - return KeyValues.of(KafkaTemplateObservation.TemplateLowCardinalityTags.BEAN_NAME.asString(), - context.getBeanName()); + return KeyValues.of( + TemplateLowCardinalityTags.BEAN_NAME.withValue(context.getBeanName()), + TemplateLowCardinalityTags.MESSAGING_SYSTEM.withValue("kafka"), + TemplateLowCardinalityTags.MESSAGING_OPERATION.withValue("publish"), + TemplateLowCardinalityTags.MESSAGING_DESTINATION_KIND.withValue("topic"), + TemplateLowCardinalityTags.MESSAGING_DESTINATION_NAME.withValue(context.getDestination())); } @Override @@ -100,6 +166,7 @@ public String getContextualName(KafkaRecordSenderContext context) { } @Override + @NonNull public String getName() { return "spring.kafka.template"; } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/MicrometerHolder.java b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/MicrometerHolder.java index 782745925b..42af8dba61 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/MicrometerHolder.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/micrometer/MicrometerHolder.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2023 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,16 +20,16 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; -import org.springframework.beans.factory.NoUniqueBeanDefinitionException; -import org.springframework.context.ApplicationContext; -import org.springframework.lang.Nullable; -import org.springframework.util.Assert; - import io.micrometer.core.instrument.MeterRegistry; import io.micrometer.core.instrument.Timer; import io.micrometer.core.instrument.Timer.Builder; import io.micrometer.core.instrument.Timer.Sample; +import org.springframework.beans.factory.NoUniqueBeanDefinitionException; +import org.springframework.context.ApplicationContext; +import org.springframework.lang.Nullable; +import org.springframework.util.Assert; + /** * A wrapper for micrometer timers when available on the class path. * diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerialization.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerialization.java index 5cc0fe63e1..54e44c8e99 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerialization.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerialization.java @@ -25,6 +25,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Pattern; +import org.apache.commons.logging.LogFactory; + import org.springframework.core.log.LogAccessor; import org.springframework.lang.NonNull; import org.springframework.lang.Nullable; @@ -39,6 +41,8 @@ * * @author Gary Russell * @author Wang Zhiyang + * @author Sanghyeok An + * @author Borahm Lee * * @since 2.8 * @@ -47,7 +51,7 @@ public abstract class DelegatingByTopicSerialization implem private static final String UNCHECKED = "unchecked"; - private static final LogAccessor LOGGER = new LogAccessor(DelegatingDeserializer.class); + private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(DelegatingByTopicSerialization.class)); /** * Name of the configuration property containing the serialization selector map for @@ -108,7 +112,7 @@ public void setCaseSensitive(boolean caseSensitive) { @SuppressWarnings(UNCHECKED) protected void configure(Map configs, boolean isKey) { - if (this.delegates.size() > 0) { + if (!this.delegates.isEmpty()) { this.delegates.values().forEach(delegate -> configureDelegate(configs, isKey, delegate)); } this.forKeys = isKey; diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingSerializer.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingSerializer.java index b7e9c3770b..0e2e0de0d4 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingSerializer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/DelegatingSerializer.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2022 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -187,7 +187,6 @@ public byte[] serialize(String topic, Object data) { throw new UnsupportedOperationException(); } - @Override public byte[] serialize(String topic, Headers headers, Object data) { if (data == null) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonDeserializer.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonDeserializer.java index 14b4a218bb..11688f41a5 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonDeserializer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonDeserializer.java @@ -1,5 +1,5 @@ /* - * Copyright 2015-2023 the original author or authors. + * Copyright 2015-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,8 +25,15 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.type.TypeFactory; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.serialization.Deserializer; @@ -42,12 +49,6 @@ import org.springframework.util.ClassUtils; import org.springframework.util.StringUtils; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.type.TypeFactory; - /** * Generic {@link org.apache.kafka.common.serialization.Deserializer Deserializer} for * receiving JSON from Kafka and return Java objects. @@ -65,6 +66,7 @@ * @author Elliot Kennedy * @author Torsten Schleede * @author Ivan Ponomarev + * @author Omer Celik */ public class JsonDeserializer implements Deserializer { @@ -145,6 +147,8 @@ public class JsonDeserializer implements Deserializer { private boolean configured; + private final Lock trustedPackagesLock = new ReentrantLock(); + /** * Construct an instance with a default {@link ObjectMapper}. */ @@ -178,7 +182,6 @@ public JsonDeserializer(@Nullable TypeReference targetType) { this(targetType, true); } - /** * Construct an instance with the provided target type, and a default {@link ObjectMapper}. * @param targetType the target java type to use if no type info headers are present. @@ -399,29 +402,35 @@ public void setTypeResolver(JsonTypeResolver typeResolver) { } @Override - public synchronized void configure(Map configs, boolean isKey) { - if (this.configured) { - return; - } - Assert.state(!this.setterCalled || !configsHasOurKeys(configs), - "JsonDeserializer must be configured with property setters, or via configuration properties; not both"); - doSetUseTypeMapperForKey(isKey); - setUpTypePrecedence(configs); - setupTarget(configs, isKey); - if (configs.containsKey(TRUSTED_PACKAGES) - && configs.get(TRUSTED_PACKAGES) instanceof String) { - this.typeMapper.addTrustedPackages( - StringUtils.delimitedListToStringArray((String) configs.get(TRUSTED_PACKAGES), ",", " \r\n\f\t")); - } - if (configs.containsKey(TYPE_MAPPINGS) && !this.typeMapperExplicitlySet - && this.typeMapper instanceof AbstractJavaTypeMapper) { - ((AbstractJavaTypeMapper) this.typeMapper).setIdClassMapping(createMappings(configs)); + public void configure(Map configs, boolean isKey) { + try { + this.trustedPackagesLock.lock(); + if (this.configured) { + return; + } + Assert.state(!this.setterCalled || !configsHasOurKeys(configs), + "JsonDeserializer must be configured with property setters, or via configuration properties; not both"); + doSetUseTypeMapperForKey(isKey); + setUpTypePrecedence(configs); + setupTarget(configs, isKey); + if (configs.containsKey(TRUSTED_PACKAGES) + && configs.get(TRUSTED_PACKAGES) instanceof String) { + this.typeMapper.addTrustedPackages( + StringUtils.delimitedListToStringArray((String) configs.get(TRUSTED_PACKAGES), ",", " \r\n\f\t")); + } + if (configs.containsKey(TYPE_MAPPINGS) && !this.typeMapperExplicitlySet + && this.typeMapper instanceof AbstractJavaTypeMapper) { + ((AbstractJavaTypeMapper) this.typeMapper).setIdClassMapping(createMappings(configs)); + } + if (configs.containsKey(REMOVE_TYPE_INFO_HEADERS)) { + this.removeTypeHeaders = Boolean.parseBoolean(configs.get(REMOVE_TYPE_INFO_HEADERS).toString()); + } + setUpTypeMethod(configs, isKey); + this.configured = true; } - if (configs.containsKey(REMOVE_TYPE_INFO_HEADERS)) { - this.removeTypeHeaders = Boolean.parseBoolean(configs.get(REMOVE_TYPE_INFO_HEADERS).toString()); + finally { + this.trustedPackagesLock.unlock(); } - setUpTypeMethod(configs, isKey); - this.configured = true; } private boolean configsHasOurKeys(Map configs) { @@ -524,9 +533,15 @@ else if (configs.get(key) instanceof String) { * @param packages the packages. * @since 2.1 */ - public synchronized void addTrustedPackages(String... packages) { - doAddTrustedPackages(packages); - this.setterCalled = true; + public void addTrustedPackages(String... packages) { + try { + this.trustedPackagesLock.lock(); + doAddTrustedPackages(packages); + this.setterCalled = true; + } + finally { + this.trustedPackagesLock.unlock(); + } } private void addMappingsToTrusted(Map> mappings) { @@ -706,10 +721,16 @@ public JsonDeserializer typeMapper(Jackson2JavaTypeMapper mapper) { * @return the deserializer. * @since 2,5 */ - public synchronized JsonDeserializer trustedPackages(String... packages) { - Assert.isTrue(!this.typeMapperExplicitlySet, "When using a custom type mapper, set the trusted packages there"); - this.typeMapper.addTrustedPackages(packages); - return this; + public JsonDeserializer trustedPackages(String... packages) { + try { + this.trustedPackagesLock.lock(); + Assert.isTrue(!this.typeMapperExplicitlySet, "When using a custom type mapper, set the trusted packages there"); + this.typeMapper.addTrustedPackages(packages); + return this; + } + finally { + this.trustedPackagesLock.unlock(); + } } /** diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerde.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerde.java index 8cdfbb114e..168b9f866a 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerde.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerde.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,9 @@ import java.util.Map; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.kafka.common.serialization.Serde; import org.springframework.core.ResolvableType; @@ -26,10 +29,6 @@ import org.springframework.lang.Nullable; import org.springframework.util.Assert; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; - /** * A {@link org.apache.kafka.common.serialization.Serde} that provides serialization and * deserialization in JSON format. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerializer.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerializer.java index aaa8f8de3c..0787bc9a00 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerializer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonSerializer.java @@ -19,7 +19,13 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.serialization.Serializer; @@ -33,11 +39,6 @@ import org.springframework.util.ClassUtils; import org.springframework.util.StringUtils; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectWriter; - /** * Generic {@link org.apache.kafka.common.serialization.Serializer Serializer} for sending * Java objects to Kafka as JSON. @@ -53,6 +54,7 @@ * @author Gary Russell * @author Elliot Kennedy * @author Wang Zhiyang + * @author Omer Celik */ public class JsonSerializer implements Serializer { @@ -81,6 +83,8 @@ public class JsonSerializer implements Serializer { private boolean configured; + private final Lock globalLock = new ReentrantLock(); + public JsonSerializer() { this((JavaType) null, JacksonUtils.enhancedObjectMapper()); } @@ -147,31 +151,37 @@ public void setUseTypeMapperForKey(boolean isKey) { } @Override - public synchronized void configure(Map configs, boolean isKey) { - if (this.configured) { - return; - } - Assert.state(!this.setterCalled - || (!configs.containsKey(ADD_TYPE_INFO_HEADERS) && !configs.containsKey(TYPE_MAPPINGS)), - "JsonSerializer must be configured with property setters, or via configuration properties; not both"); - setUseTypeMapperForKey(isKey); - if (configs.containsKey(ADD_TYPE_INFO_HEADERS)) { - Object config = configs.get(ADD_TYPE_INFO_HEADERS); - if (config instanceof Boolean configBoolean) { - this.addTypeInfo = configBoolean; + public void configure(Map configs, boolean isKey) { + try { + this.globalLock.lock(); + if (this.configured) { + return; } - else if (config instanceof String configString) { - this.addTypeInfo = Boolean.parseBoolean(configString); + Assert.state(!this.setterCalled + || (!configs.containsKey(ADD_TYPE_INFO_HEADERS) && !configs.containsKey(TYPE_MAPPINGS)), + "JsonSerializer must be configured with property setters, or via configuration properties; not both"); + setUseTypeMapperForKey(isKey); + if (configs.containsKey(ADD_TYPE_INFO_HEADERS)) { + Object config = configs.get(ADD_TYPE_INFO_HEADERS); + if (config instanceof Boolean configBoolean) { + this.addTypeInfo = configBoolean; + } + else if (config instanceof String configString) { + this.addTypeInfo = Boolean.parseBoolean(configString); + } + else { + throw new IllegalStateException(ADD_TYPE_INFO_HEADERS + " must be Boolean or String"); + } } - else { - throw new IllegalStateException(ADD_TYPE_INFO_HEADERS + " must be Boolean or String"); + if (configs.containsKey(TYPE_MAPPINGS) && !this.typeMapperExplicitlySet + && this.typeMapper instanceof AbstractJavaTypeMapper abstractJavaTypeMapper) { + abstractJavaTypeMapper.setIdClassMapping(createMappings((String) configs.get(TYPE_MAPPINGS))); } + this.configured = true; } - if (configs.containsKey(TYPE_MAPPINGS) && !this.typeMapperExplicitlySet - && this.typeMapper instanceof AbstractJavaTypeMapper abstractJavaTypeMapper) { - abstractJavaTypeMapper.setIdClassMapping(createMappings((String) configs.get(TYPE_MAPPINGS))); + finally { + this.globalLock.unlock(); } - this.configured = true; } protected static Map> createMappings(String mappings) { diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonTypeResolver.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonTypeResolver.java index fa3d486017..dad78f8f43 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonTypeResolver.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/JsonTypeResolver.java @@ -1,5 +1,5 @@ /* - * Copyright 2020 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,8 @@ package org.springframework.kafka.support.serializer; -import org.apache.kafka.common.header.Headers; - import com.fasterxml.jackson.databind.JavaType; +import org.apache.kafka.common.header.Headers; /** * Determine the {@link JavaType} from the topic/data/headers. diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/SerializationUtils.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/SerializationUtils.java index 48c0121039..12bc17c54b 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/SerializationUtils.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/SerializationUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2023 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,19 +50,19 @@ public final class SerializationUtils { * Header name for deserialization exceptions. * @since 2.8 */ - public static final String DESERIALIZER_EXCEPTION_HEADER_PREFIX = "springDeserializerException"; + public static final String DESERIALIZER_EXCEPTION_HEADER_PREFIX = KafkaUtils.DESERIALIZER_EXCEPTION_HEADER_PREFIX; /** * Header name for deserialization exceptions. * @since 2.8 */ - public static final String KEY_DESERIALIZER_EXCEPTION_HEADER = DESERIALIZER_EXCEPTION_HEADER_PREFIX + "Key"; + public static final String KEY_DESERIALIZER_EXCEPTION_HEADER = KafkaUtils.KEY_DESERIALIZER_EXCEPTION_HEADER; /** * Header name for deserialization exceptions. * @since 2.8 */ - public static final String VALUE_DESERIALIZER_EXCEPTION_HEADER = DESERIALIZER_EXCEPTION_HEADER_PREFIX + "Value"; + public static final String VALUE_DESERIALIZER_EXCEPTION_HEADER = KafkaUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER; private SerializationUtils() { } @@ -241,7 +241,6 @@ protected Class resolveClass(ObjectStreamClass desc) throws IOException, Clas return super.resolveClass(desc); } - }; return (DeserializationException) ois.readObject(); } diff --git a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/ToStringSerializer.java b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/ToStringSerializer.java index b2691c0870..f80e33ccf6 100644 --- a/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/ToStringSerializer.java +++ b/spring-kafka/src/main/java/org/springframework/kafka/support/serializer/ToStringSerializer.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2020 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,7 +43,6 @@ public class ToStringSerializer implements Serializer { */ public static final String ADD_TYPE_INFO_HEADERS = "spring.message.add.type.headers"; - /** * Header for the type of key. */ diff --git a/spring-kafka/src/test/java/org/springframework/kafka/annotation/AliasPropertiesTests.java b/spring-kafka/src/test/java/org/springframework/kafka/annotation/AliasPropertiesTests.java index e719213169..1fbded44a8 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/annotation/AliasPropertiesTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/annotation/AliasPropertiesTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,13 @@ package org.springframework.kafka.annotation; -import static org.assertj.core.api.Assertions.assertThat; - import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Method; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -53,9 +52,12 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @author Artem Bilan + * @author Soby Chacko * * @since 2.2 * @@ -113,13 +115,15 @@ public static class Config { @Bean public static AnnotationEnhancer mainEnhancer() { + return (attrs, element) -> { - attrs.put("groupId", attrs.get("id") + "." + (element instanceof Class + Map newAttrs = new HashMap<>(attrs); + newAttrs.put("groupId", attrs.get("id") + "." + (element instanceof Class ? ((Class) element).getSimpleName() : ((Method) element).getDeclaringClass().getSimpleName() + "." + ((Method) element).getName())); orderedCalledFirst.set(true); - return attrs; + return newAttrs; }; } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/annotation/AsyncListenerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/annotation/AsyncListenerTests.java index 24d668737e..1b90d4e2cc 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/annotation/AsyncListenerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/annotation/AsyncListenerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.annotation; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -27,6 +25,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; @@ -45,7 +44,7 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import reactor.core.publisher.Mono; +import static org.assertj.core.api.Assertions.assertThat; @SpringJUnitConfig @DirtiesContext diff --git a/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversion2Tests.java b/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversion2Tests.java index 5371612e31..af5477e44c 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversion2Tests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversion2Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2020 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.annotation; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -48,6 +46,8 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @author Victor Perez Rey diff --git a/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversionTests.java b/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversionTests.java index d7ed4c1996..ea124fb3a6 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversionTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/annotation/BatchListenerConversionTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.annotation; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatNoException; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -74,6 +68,12 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatNoException; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Artem Bilan @@ -84,7 +84,7 @@ */ @SpringJUnitConfig @DirtiesContext -@EmbeddedKafka(kraft = false, partitions = 1, topics = { "blc1", "blc2", "blc3", "blc4", "blc5", "blc6", "blc6.DLT" }) +@EmbeddedKafka(kraft = false, partitions = 1, topics = { "blc1", "blc2", "blc3", "blc4", "blc5", "blc6", "blc6-dlt" }) public class BatchListenerConversionTests { private static final String DEFAULT_TEST_GROUP_ID = "blc"; @@ -378,7 +378,7 @@ public void listen5(List foos, } } - @KafkaListener(topics = "blc6.DLT", groupId = "blc6.DLT", + @KafkaListener(topics = "blc6-dlt", groupId = "blc6-dlt", properties = ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG + ":org.apache.kafka.common.serialization.StringDeserializer") public void listen5Dlt(String in) { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/annotation/ContainerFactoryTests.java b/spring-kafka/src/test/java/org/springframework/kafka/annotation/ContainerFactoryTests.java index 0c6dc77fd0..030780cc3f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/annotation/ContainerFactoryTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/annotation/ContainerFactoryTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2021 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.annotation; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - import java.util.concurrent.atomic.AtomicBoolean; import org.junit.jupiter.api.Test; @@ -33,6 +30,9 @@ import org.springframework.kafka.support.converter.MessageConverter; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.2 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/annotation/EnableKafkaIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/annotation/EnableKafkaIntegrationTests.java index 27808beea3..96f3731ea9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/annotation/EnableKafkaIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/annotation/EnableKafkaIntegrationTests.java @@ -16,18 +16,6 @@ package org.springframework.kafka.annotation; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; - import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Type; @@ -50,7 +38,15 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import java.util.stream.IntStream; +import io.micrometer.core.instrument.ImmutableTag; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Timer; +import io.micrometer.core.instrument.search.MeterNotFoundException; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import jakarta.validation.Valid; +import jakarta.validation.constraints.Max; import org.aopalliance.intercept.MethodInterceptor; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -76,6 +72,7 @@ import org.springframework.beans.factory.config.BeanPostProcessor; import org.springframework.beans.factory.config.ConfigurableBeanFactory; import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationListener; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Role; @@ -98,6 +95,7 @@ import org.springframework.kafka.core.MicrometerConsumerListener; import org.springframework.kafka.core.MicrometerProducerListener; import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.event.ConsumerStartedEvent; import org.springframework.kafka.event.ListenerContainerIdleEvent; import org.springframework.kafka.event.ListenerContainerNoLongerIdleEvent; import org.springframework.kafka.listener.AbstractConsumerSeekAware; @@ -149,6 +147,7 @@ import org.springframework.messaging.handler.invocation.HandlerMethodArgumentResolver; import org.springframework.messaging.support.GenericMessage; import org.springframework.messaging.support.MessageBuilder; +import org.springframework.scheduling.concurrent.SimpleAsyncTaskScheduler; import org.springframework.stereotype.Component; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.TestPropertySource; @@ -160,13 +159,17 @@ import org.springframework.validation.Errors; import org.springframework.validation.Validator; -import io.micrometer.core.instrument.ImmutableTag; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Timer; -import io.micrometer.core.instrument.search.MeterNotFoundException; -import io.micrometer.core.instrument.simple.SimpleMeterRegistry; -import jakarta.validation.Valid; -import jakarta.validation.constraints.Max; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; /** * @author Gary Russell @@ -176,12 +179,16 @@ * @author Dimitri Penner * @author Nakul Mishra * @author Soby Chacko + * @author Wang Zhiyang + * @author Borahm Lee */ @SpringJUnitConfig @DirtiesContext @EmbeddedKafka(topics = {"annotated1", "annotated2", "annotated3", "annotated3x", "annotated4", "annotated5", "annotated6", "annotated7", "annotated8", "annotated8reply", - "annotated9", "annotated10", + "annotated9", "annotated10", EnableKafkaIntegrationTests.ANNO_TOPIC_PARTITION_SPEL_ONE, + EnableKafkaIntegrationTests.ANNO_TOPIC_PARTITION_SPEL_TWO, + EnableKafkaIntegrationTests.TOPIC_SEEK_POSITION, EnableKafkaIntegrationTests.TOPIC_SEEK_POSITION_TIMESTAMP, "annotated11", "annotated12", "annotated13", "annotated14", "annotated15", "annotated16", "annotated17", "annotated18", "annotated19", "annotated20", "annotated21", "annotated21reply", "annotated22", "annotated22reply", "annotated23", "annotated23reply", "annotated24", "annotated24reply", @@ -189,12 +196,32 @@ "annotated29", "annotated30", "annotated30reply", "annotated31", "annotated32", "annotated33", "annotated34", "annotated35", "annotated36", "annotated37", "foo", "manualStart", "seekOnIdle", "annotated38", "annotated38reply", "annotated39", "annotated40", "annotated41", "annotated42", - "annotated43", "annotated43reply"}) + "annotated43", "annotated43reply", "seekToComputeFn"}, kraft = true) @TestPropertySource(properties = "spel.props=fetch.min.bytes=420000,max.poll.records=10") public class EnableKafkaIntegrationTests { private static final String DEFAULT_TEST_GROUP_ID = "testAnnot"; + private static final String LISTENER_ID_SEEK_POSITION_BEGINNING = "seekPositionBeginning"; + + private static final String LISTENER_ID_SEEK_POSITION_END = "seekPositionEnd"; + + private static final String LISTENER_ID_SEEK_POSITION_TIMESTAMP_0 = "seekPositionTimestamp0"; + + private static final String LISTENER_ID_SEEK_POSITION_TIMESTAMP_1 = "seekPositionTimestamp1"; + + private static final String LISTENER_ID_TOPIC_PARTITION_SPEL_ONE = "seekTopicPartitionSpel1"; + + private static final String LISTENER_ID_TOPIC_PARTITION_SPEL_TWO = "seekTopicPartitionSpel2"; + + public static final String TOPIC_SEEK_POSITION = "annotatedPartitionOffset1"; + + public static final String TOPIC_SEEK_POSITION_TIMESTAMP = "annotatedPartitionOffset2"; + + public static final String ANNO_TOPIC_PARTITION_SPEL_ONE = "annotatedTopicPartitionSpel1"; + + public static final String ANNO_TOPIC_PARTITION_SPEL_TWO = "annotatedTopicPartitionSpel2"; + private static final Log logger = LogFactory.getLog(EnableKafkaIntegrationTests.class); @Autowired @@ -248,6 +275,9 @@ public class EnableKafkaIntegrationTests { @Autowired private SeekToLastOnIdleListener seekOnIdleListener; + @Autowired + private SeekToOffsetFromComputeFunction seekToOffsetFromComputeFunction; + @Autowired private MeterRegistry meterRegistry; @@ -273,7 +303,7 @@ public void testAnonymous() { } @Test - public void manyTests() throws Exception { + void manyTests() throws Exception { this.recordFilter.called = false; template.send("annotated1", 0, "foo"); template.send("annotated1", 0, "bar"); @@ -336,48 +366,20 @@ public void manyTests() throws Exception { "listenerConsumer.consumer")); assertThat( KafkaTestUtils.getPropertyValue(this.listener.listen4Consumer, - "fetcher.fetchConfig.maxPollRecords", Integer.class)) + "delegate.fetcher.fetchConfig.maxPollRecords", Integer.class)) .isEqualTo(100); assertThat(this.quxGroup).hasSize(1); assertThat(this.quxGroup.get(0)).isSameAs(manualContainer); List containers = KafkaTestUtils.getPropertyValue(manualContainer, "containers", List.class); assertThat(KafkaTestUtils.getPropertyValue(containers.get(0), "listenerConsumer.consumerGroupId")) .isEqualTo("qux"); - assertThat(KafkaTestUtils.getPropertyValue(containers.get(0), "listenerConsumer.consumer.clientId")) + assertThat(KafkaTestUtils.getPropertyValue(containers.get(0), "listenerConsumer.consumer.delegate.clientId")) .isEqualTo("clientIdViaProps3-0"); template.send("annotated4", 0, "foo"); assertThat(this.listener.noLongerIdleEventLatch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(this.listener.noLongerIdleEvent.getListenerId()).startsWith("qux-"); - template.send("annotated5", 0, 0, "foo"); - template.send("annotated5", 1, 0, "bar"); - template.send("annotated6", 0, 0, "baz"); - template.send("annotated6", 1, 0, "qux"); - template.flush(); - assertThat(this.listener.latch5.await(60, TimeUnit.SECONDS)).isTrue(); - MessageListenerContainer fizConcurrentContainer = registry.getListenerContainer("fiz"); - assertThat(fizConcurrentContainer).isNotNull(); - MessageListenerContainer fizContainer = (MessageListenerContainer) KafkaTestUtils - .getPropertyValue(fizConcurrentContainer, "containers", List.class).get(0); - TopicPartitionOffset offset = KafkaTestUtils.getPropertyValue(fizContainer, "topicPartitions", - TopicPartitionOffset[].class)[2]; - assertThat(offset.isRelativeToCurrent()).isFalse(); - offset = KafkaTestUtils.getPropertyValue(fizContainer, "topicPartitions", - TopicPartitionOffset[].class)[3]; - assertThat(offset.isRelativeToCurrent()).isTrue(); - assertThat(KafkaTestUtils.getPropertyValue(fizContainer, - "listenerConsumer.consumer.groupId", Optional.class).get()) - .isEqualTo("fiz"); - assertThat(KafkaTestUtils.getPropertyValue(fizContainer, "listenerConsumer.consumer.clientId")) - .isEqualTo("clientIdViaAnnotation-0"); - assertThat(KafkaTestUtils.getPropertyValue(fizContainer, - "listenerConsumer.consumer.fetcher.fetchConfig.maxPollRecords")) - .isEqualTo(10); - assertThat(KafkaTestUtils.getPropertyValue(fizContainer, - "listenerConsumer.consumer.fetcher.fetchConfig.minBytes")) - .isEqualTo(420000); - MessageListenerContainer rebalanceConcurrentContainer = registry.getListenerContainer("rebalanceListener"); assertThat(rebalanceConcurrentContainer).isNotNull(); assertThat(rebalanceConcurrentContainer.isAutoStartup()).isFalse(); @@ -398,9 +400,9 @@ public void manyTests() throws Exception { MessageListenerContainer rebalanceContainer = (MessageListenerContainer) KafkaTestUtils .getPropertyValue(rebalanceConcurrentContainer, "containers", List.class).get(0); - assertThat(KafkaTestUtils.getPropertyValue(rebalanceContainer, "listenerConsumer.consumer.groupId")) + assertThat(KafkaTestUtils.getPropertyValue(rebalanceContainer, "listenerConsumer.consumer.delegate.groupId")) .isNotEqualTo("rebalanceListener"); - String clientId = KafkaTestUtils.getPropertyValue(rebalanceContainer, "listenerConsumer.consumer.clientId", + String clientId = KafkaTestUtils.getPropertyValue(rebalanceContainer, "listenerConsumer.consumer.delegate.clientId", String.class); assertThat(clientId).startsWith("rebal-"); assertThat(clientId.indexOf('-')).isEqualTo(clientId.lastIndexOf('-')); @@ -412,6 +414,89 @@ public void manyTests() throws Exception { assertThat(adapter).extracting("recordFilterStrategy").isSameAs(this.lambdaAll); } + @Test + void testAnnotationTopicPartitionOffset() throws Exception { + template.send("annotated5", 0, 0, "foo"); + template.send("annotated5", 1, 0, "bar"); + template.send("annotated6", 0, 0, "baz"); + template.send("annotated6", 1, 0, "qux"); + template.flush(); + MessageListenerContainer fizConcurrentContainer = registry.getListenerContainer("fiz"); + assertThat(fizConcurrentContainer).isNotNull(); + fizConcurrentContainer.start(); + assertThat(this.listener.latch5.await(60, TimeUnit.SECONDS)).isTrue(); + + MessageListenerContainer fizContainer = (MessageListenerContainer) KafkaTestUtils + .getPropertyValue(fizConcurrentContainer, "containers", List.class).get(0); + TopicPartitionOffset offset = KafkaTestUtils.getPropertyValue(fizContainer, "topicPartitions", + TopicPartitionOffset[].class)[2]; + assertThat(offset.isRelativeToCurrent()).isFalse(); + offset = KafkaTestUtils.getPropertyValue(fizContainer, "topicPartitions", + TopicPartitionOffset[].class)[3]; + assertThat(offset.isRelativeToCurrent()).isTrue(); + assertThat(KafkaTestUtils.getPropertyValue(fizContainer, + "listenerConsumer.consumer.delegate.groupId", Optional.class).get()) + .isEqualTo("fiz"); + assertThat(KafkaTestUtils.getPropertyValue(fizContainer, "listenerConsumer.consumer.delegate.clientId")) + .isEqualTo("clientIdViaAnnotation-0"); + assertThat(KafkaTestUtils.getPropertyValue(fizContainer, + "listenerConsumer.consumer.delegate.fetcher.fetchConfig.maxPollRecords")) + .isEqualTo(10); + assertThat(KafkaTestUtils.getPropertyValue(fizContainer, + "listenerConsumer.consumer.delegate.fetcher.fetchConfig.minBytes")) + .isEqualTo(420000); + + template.send(ANNO_TOPIC_PARTITION_SPEL_ONE, 0, 0, "annoTopicPartitionOne0"); + template.send(ANNO_TOPIC_PARTITION_SPEL_ONE, 1, 1, "annoTopicPartitionOne1"); + template.flush(); + MessageListenerContainer spel1 = registry.getListenerContainer(LISTENER_ID_TOPIC_PARTITION_SPEL_ONE); + assertThat(spel1).isNotNull(); + spel1.start(); + assertThat(this.listener.latchSpel1.await(60, TimeUnit.SECONDS)).isTrue(); + + template.send(ANNO_TOPIC_PARTITION_SPEL_TWO, 0, 0, "annoTopicPartitionTwo0"); + template.send(ANNO_TOPIC_PARTITION_SPEL_TWO, 1, 1, "annoTopicPartitionTwo1"); + template.flush(); + MessageListenerContainer spel2 = registry.getListenerContainer(LISTENER_ID_TOPIC_PARTITION_SPEL_ONE); + assertThat(spel2).isNotNull(); + spel2.start(); + assertThat(this.listener.latchSpel2.await(60, TimeUnit.SECONDS)).isTrue(); + + template.send(TOPIC_SEEK_POSITION, 1, 1, "seekPosition2"); + template.flush(); + MessageListenerContainer seekPositionBeginning = registry.getListenerContainer(LISTENER_ID_SEEK_POSITION_BEGINNING); + assertThat(seekPositionBeginning).isNotNull(); + seekPositionBeginning.start(); + assertThat(this.listener.latchSpBeginning.await(60, TimeUnit.SECONDS)).isTrue(); + + template.send(TOPIC_SEEK_POSITION, 0, 0, "seekPosition1"); + template.flush(); + MessageListenerContainer seekPositionEnd = registry.getListenerContainer(LISTENER_ID_SEEK_POSITION_END); + assertThat(seekPositionEnd).isNotNull(); + seekPositionEnd.start(); + assertThat(this.listener.latchSpEndConsumerStarted.await(60, TimeUnit.SECONDS)).isTrue(); + template.send(TOPIC_SEEK_POSITION, 0, 0, "seekPosition2"); + template.flush(); + assertThat(this.listener.latchSpEnd.await(60, TimeUnit.SECONDS)).isTrue(); + + template.send(TOPIC_SEEK_POSITION_TIMESTAMP, 0, 0, "sp3"); + template.flush(); + MessageListenerContainer seekPositionTimestamp0 = registry.getListenerContainer(LISTENER_ID_SEEK_POSITION_TIMESTAMP_0); + assertThat(seekPositionTimestamp0).isNotNull(); + seekPositionTimestamp0.start(); + assertThat(this.listener.latchSpTimestamp0ConsumerStarted.await(60, TimeUnit.SECONDS)).isTrue(); + template.send(TOPIC_SEEK_POSITION_TIMESTAMP, 0, 0, "sp4"); + template.flush(); + assertThat(this.listener.latchSpTimestamp0.await(60, TimeUnit.SECONDS)).isTrue(); + + template.send(TOPIC_SEEK_POSITION_TIMESTAMP, 1, 1, "sp5"); + template.flush(); + MessageListenerContainer seekPositionTimestamp1 = registry.getListenerContainer(LISTENER_ID_SEEK_POSITION_TIMESTAMP_1); + assertThat(seekPositionTimestamp1).isNotNull(); + seekPositionTimestamp1.start(); + assertThat(this.listener.latchSpTimestamp1.await(60, TimeUnit.SECONDS)).isTrue(); + } + @Test public void testAutoStartup() { MessageListenerContainer listenerContainer = registry.getListenerContainer("manualStart"); @@ -531,13 +616,13 @@ public void testJson() throws Exception { MessageListenerContainer buzContainer = (MessageListenerContainer) KafkaTestUtils .getPropertyValue(buzConcurrentContainer, "containers", List.class).get(0); assertThat(KafkaTestUtils.getPropertyValue(buzContainer, - "listenerConsumer.consumer.groupId", Optional.class).get()) + "listenerConsumer.consumer.delegate.groupId", Optional.class).get()) .isEqualTo("buz.explicitGroupId"); assertThat(KafkaTestUtils.getPropertyValue(buzContainer, - "listenerConsumer.consumer.fetcher.fetchConfig.maxPollRecords")) + "listenerConsumer.consumer.delegate.fetcher.fetchConfig.maxPollRecords")) .isEqualTo(5); assertThat(KafkaTestUtils.getPropertyValue(buzContainer, - "listenerConsumer.consumer.fetcher.fetchConfig.minBytes")) + "listenerConsumer.consumer.delegate.fetcher.fetchConfig.minBytes")) .isEqualTo(123456); } @@ -997,7 +1082,7 @@ public void testSeekToLastOnIdle() throws InterruptedException { assertThat(this.seekOnIdleListener.latch3.await(10, TimeUnit.SECONDS)).isTrue(); this.registry.getListenerContainer("seekOnIdle").stop(); assertThat(this.seekOnIdleListener.latch4.await(10, TimeUnit.SECONDS)).isTrue(); - assertThat(KafkaTestUtils.getPropertyValue(this.seekOnIdleListener, "callbacks", Map.class)).hasSize(0); + assertThat(KafkaTestUtils.getPropertyValue(this.seekOnIdleListener, "topicToCallbacks", Map.class)).hasSize(0); } @SuppressWarnings({"unchecked", "rawtypes"}) @@ -1069,6 +1154,13 @@ void classLevelTwoInstancesSameClass() { assertThat(this.registry.getListenerContainer("multiTwoTwo")).isNotNull(); } + @Test + void seekToOffsetComputedFromFunction() throws InterruptedException { + IntStream.range(0, 10).forEach(i -> template.send("seekToComputeFn", 0, i, "my-data")); + assertThat(this.seekToOffsetFromComputeFunction.latch1.await(10, TimeUnit.SECONDS)).isTrue(); + assertThat(this.seekToOffsetFromComputeFunction.latch2.await(10, TimeUnit.SECONDS)).isTrue(); + } + @Configuration @EnableKafka @EnableTransactionManagement(proxyTargetClass = true) @@ -1397,8 +1489,9 @@ public DefaultKafkaConsumerFactory bytesStringConsumerFactory() Map configs = consumerConfigs(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(configs); - cf.addListener(new MicrometerConsumerListener(meterRegistry(), - Collections.singletonList(new ImmutableTag("consumerTag", "bytesString")))); + cf.addListener(new MicrometerConsumerListener<>(meterRegistry(), + Collections.singletonList(new ImmutableTag("consumerTag", "bytesString")), + new SimpleAsyncTaskScheduler())); return cf; } @@ -1425,6 +1518,11 @@ public SeekToLastOnIdleListener seekOnIdle() { return new SeekToLastOnIdleListener(); } + @Bean + public SeekToOffsetFromComputeFunction seekToOffsetFromComputeFunction() { + return new SeekToOffsetFromComputeFunction(); + } + @Bean public IfaceListener ifaceListener() { return new IfaceListenerImpl(); @@ -1483,7 +1581,8 @@ public ProducerFactory bytesStringProducerFactory() { configs.put(ProducerConfig.CLIENT_ID_CONFIG, "bsPF"); DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(configs); pf.addListener(new MicrometerProducerListener<>(meterRegistry(), - Collections.singletonList(new ImmutableTag("producerTag", "bytesString")))); + Collections.singletonList(new ImmutableTag("producerTag", "bytesString")), + new SimpleAsyncTaskScheduler())); return pf; } @@ -1830,7 +1929,7 @@ public void listen(String in) { } @Component - static class Listener implements ConsumerSeekAware { + static class Listener implements ConsumerSeekAware, ApplicationListener { private final ThreadLocal seekCallBack = new ThreadLocal<>(); @@ -1846,7 +1945,23 @@ static class Listener implements ConsumerSeekAware { final CountDownLatch latch4 = new CountDownLatch(1); - final CountDownLatch latch5 = new CountDownLatch(1); + final CountDownLatch latch5 = new CountDownLatch(4); + + final CountDownLatch latchSpel1 = new CountDownLatch(2); + + final CountDownLatch latchSpel2 = new CountDownLatch(2); + + final CountDownLatch latchSpBeginning = new CountDownLatch(1); + + final CountDownLatch latchSpEnd = new CountDownLatch(1); + + final CountDownLatch latchSpEndConsumerStarted = new CountDownLatch(1); + + final CountDownLatch latchSpTimestamp0 = new CountDownLatch(1); + + final CountDownLatch latchSpTimestamp0ConsumerStarted = new CountDownLatch(1); + + final CountDownLatch latchSpTimestamp1 = new CountDownLatch(1); final CountDownLatch latch6 = new CountDownLatch(1); @@ -2055,17 +2170,67 @@ public void eventHandler(ListenerContainerNoLongerIdleEvent event) { noLongerIdleEventLatch.countDown(); } - @KafkaListener(id = "fiz", topicPartitions = { + @KafkaListener(id = "fiz", autoStartup = "false", topicPartitions = { @TopicPartition(topic = "annotated5", partitions = {"#{'${foo:0,1}'.split(',')}"}), @TopicPartition(topic = "annotated6", partitions = "0", partitionOffsets = @PartitionOffset(partition = "${xxx:1}", initialOffset = "${yyy:0}", relativeToCurrent = "${zzz:true}")) }, clientIdPrefix = "${foo.xxx:clientIdViaAnnotation}", properties = "#{'${spel.props}'.split(',')}") - public void listen5(ConsumerRecord record) { + void listen5(ConsumerRecord record) { this.capturedRecord = record; this.latch5.countDown(); } + @KafkaListener(id = LISTENER_ID_TOPIC_PARTITION_SPEL_ONE, topicPartitions = { + @TopicPartition(topic = ANNO_TOPIC_PARTITION_SPEL_ONE, partitions = "#{0 + 0}, #{0 + 1}") + }) + void annotationTopicPartitionSpelOne(ConsumerRecord record) { + this.latchSpel1.countDown(); + } + + @KafkaListener(id = LISTENER_ID_TOPIC_PARTITION_SPEL_TWO, topicPartitions = { + @TopicPartition(topic = ANNO_TOPIC_PARTITION_SPEL_TWO, partitions = "#{new Integer[]{0,1}}") + }) + void annotationTopicPartitionSpelTwo(ConsumerRecord record) { + this.latchSpel2.countDown(); + } + + @KafkaListener(id = LISTENER_ID_SEEK_POSITION_END, autoStartup = "false", topicPartitions = { + @TopicPartition(topic = TOPIC_SEEK_POSITION, partitionOffsets = + @PartitionOffset(partition = "${p:0}", initialOffset = "1", seekPosition = "END") + ) + }) + void annotationPartitionOffsetSeekPositionEnd(ConsumerRecord record) { + this.latchSpEnd.countDown(); + } + + @KafkaListener(id = LISTENER_ID_SEEK_POSITION_BEGINNING, autoStartup = "false", topicPartitions = { + @TopicPartition(topic = TOPIC_SEEK_POSITION, partitionOffsets = + @PartitionOffset(partition = "${p:1}", initialOffset = "1", seekPosition = "${sp:BEGINNING}") + ) + }) + void annotationPartitionOffsetSeekPositionBeginning(ConsumerRecord record) { + this.latchSpBeginning.countDown(); + } + + @KafkaListener(id = LISTENER_ID_SEEK_POSITION_TIMESTAMP_0, autoStartup = "false", topicPartitions = { + @TopicPartition(topic = TOPIC_SEEK_POSITION_TIMESTAMP, partitionOffsets = + @PartitionOffset(partition = "0", initialOffset = "9999999999000", seekPosition = "TIMESTAMP") + ) + }) + void annotationPartitionOffsetSeekPositionTimestampNoMatch(ConsumerRecord record) { + this.latchSpTimestamp0.countDown(); + } + + @KafkaListener(id = LISTENER_ID_SEEK_POSITION_TIMESTAMP_1, autoStartup = "false", topicPartitions = { + @TopicPartition(topic = TOPIC_SEEK_POSITION_TIMESTAMP, partitionOffsets = + @PartitionOffset(partition = "1", initialOffset = "723916800000", seekPosition = "TIMESTAMP") + ) + }) + void annotationPartitionOffsetSeekPositionTimestamp(ConsumerRecord record) { + this.latchSpTimestamp1.countDown(); + } + @KafkaListener(id = "buz", topics = "annotated10", containerFactory = "kafkaJsonListenerContainerFactory", groupId = "buz.explicitGroupId", properties = "#{@buzProps}") public void listen6(Foo foo) { @@ -2313,6 +2478,19 @@ public void registerSeekCallback(ConsumerSeekCallback callback) { this.seekCallBack.set(callback); } + @Override + @SuppressWarnings("rawtypes") + public void onApplicationEvent(ConsumerStartedEvent event) { + KafkaMessageListenerContainer container = event.getSource(KafkaMessageListenerContainer.class); + String listenerId = container.getListenerId(); + if ((LISTENER_ID_SEEK_POSITION_END + "-0").equals(listenerId)) { + this.latchSpEndConsumerStarted.countDown(); + } + else if ((LISTENER_ID_SEEK_POSITION_TIMESTAMP_0 + "-0").equals(listenerId)) { + this.latchSpTimestamp0ConsumerStarted.countDown(); + } + } + } static class ProxyListenerPostProcessor implements BeanPostProcessor { @@ -2333,6 +2511,31 @@ public Object postProcessBeforeInitialization(Object bean, String beanName) thro } } + public static class SeekToOffsetFromComputeFunction extends AbstractConsumerSeekAware { + + CountDownLatch latch1 = new CountDownLatch(10); + + CountDownLatch latch2 = new CountDownLatch(1); + + @KafkaListener(id = "seekToComputeFn", topics = "seekToComputeFn") + public void listen(String in) throws InterruptedException { + if (latch2.getCount() > 0) { // if latch2 is zero, the test condition is met + if (latch1.getCount() == 0) { // Seek happened on the consumer + latch2.countDown(); + } + if (latch1.getCount() > 0) { + latch1.countDown(); + if (latch1.getCount() == 0) { + List seekToComputeFunctions = getSeekCallbacksFor( + new org.apache.kafka.common.TopicPartition("seekToComputeFn", 0)); + assertThat(seekToComputeFunctions).isNotEmpty(); + seekToComputeFunctions.forEach(callback -> callback.seek("seekToComputeFn", 0, current -> 0L)); + } + } + } + } + } + public static class SeekToLastOnIdleListener extends AbstractConsumerSeekAware { final CountDownLatch latch1 = new CountDownLatch(10); @@ -2375,14 +2578,15 @@ public void onIdleContainer(Map as } public void rewindAllOneRecord() { - getSeekCallbacks() - .forEach((tp, callback) -> - callback.seekRelative(tp.topic(), tp.partition(), -1, true)); + getTopicsAndCallbacks() + .forEach((tp, callbacks) -> + callbacks.forEach(callback -> callback.seekRelative(tp.topic(), tp.partition(), -1, true)) + ); } public void rewindOnePartitionOneRecord(String topic, int partition) { - getSeekCallbackFor(new org.apache.kafka.common.TopicPartition(topic, partition)) - .seekRelative(topic, partition, -1, true); + getSeekCallbacksFor(new org.apache.kafka.common.TopicPartition(topic, partition)) + .forEach(callback -> callback.seekRelative(topic, partition, -1, true)); } @Override @@ -2611,7 +2815,6 @@ public static class Foo implements Bar { private String bar; - public Foo() { } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaListenerEndpointRegistryTests.java b/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaListenerEndpointRegistryTests.java index bccf4bd6de..2150737fb1 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaListenerEndpointRegistryTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaListenerEndpointRegistryTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,35 @@ package org.springframework.kafka.config; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.BiPredicate; +import java.util.function.Predicate; +import java.util.stream.Stream; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.springframework.context.support.GenericApplicationContext; +import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; import org.springframework.kafka.listener.MessageListenerContainer; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell - * @since 2.8.9 + * @author Joo Hyuk Kim + * @author Artem Bilan * + * @since 2.8.9 */ public class KafkaListenerEndpointRegistryTests { @@ -47,4 +64,138 @@ void unregister() { assertThat(unregistered).isSameAs(container); } + @DisplayName("getListenerContainersMatching throws on null predicate") + @Test + void getListenerContainersMatchingThrowsOnNullPredicate() { + // Given + KafkaListenerEndpointRegistry registry = new KafkaListenerEndpointRegistry(); + // When & Then + assertThatIllegalArgumentException() + .isThrownBy(() -> registry.getListenerContainersMatching((Predicate) null)) + .withMessage("'idMatcher' cannot be null"); + } + + @DisplayName("getListenerContainersMatching with BiPredicate throws on null biPredicate") + @Test + void getListenerContainersMatchingBiPredicateThrowsOnNullBiPredicate() { + // Given + KafkaListenerEndpointRegistry registry = new KafkaListenerEndpointRegistry(); + // When & Then + assertThatIllegalArgumentException() + .isThrownBy(() -> registry.getListenerContainersMatching((BiPredicate) null)) + .withMessage("'idAndContainerMatcher' cannot be null"); + } + + @DisplayName("getListenerContainersMatching should return unmodifiable list") + @Test + void testGetListenerContainersMatchingReturnsUnmodifiableList() { + // Given + KafkaListenerEndpointRegistry registry = new KafkaListenerEndpointRegistry(); + registerListenerWithId(registry, "foo"); + // When + Collection listeners = registry.getListenerContainersMatching(s -> true); + // Then + assertThatExceptionOfType(UnsupportedOperationException.class) + .isThrownBy(() -> listeners.add(mock(MessageListenerContainer.class))); + } + + @ParameterizedTest(name = "getListenerContainersMatching({0}, {1}) = {2}") + @MethodSource("paramsForGetListenerContainersMatching") + void getListenerContainersMatching(List names, Predicate idMatcher, int expectedCount) { + // Given + KafkaListenerEndpointRegistry registry = new KafkaListenerEndpointRegistry(); + registerWithListenerIds(registry, names); + // When + Collection listeners = registry.getListenerContainersMatching(idMatcher); + // Then + assertThat(listeners).hasSize(expectedCount); + } + + /** + * Provides parameters for the getListenerContainersMatching test. + * Each set of parameters includes a list of names, a predicate, and the expected count of matching containers. + */ + private static Stream paramsForGetListenerContainersMatching() { + List names = List.of("foo", "bar", "baz"); + return Stream.of( + // Case : Two names start with "b" + Arguments.of(names, (Predicate) id -> id.startsWith("b"), 2), + // Case : One name starts with "f" + Arguments.of(names, (Predicate) id -> id.startsWith("f"), 1), + // Case : Empty list + Arguments.of(new ArrayList<>(), (Predicate) id -> id.startsWith("b"), 0), + // Case : All names match as the predicate always returns true + Arguments.of(names, (Predicate) id -> true, 3), + // Case : No names match as the predicate always returns false + Arguments.of(names, (Predicate) id -> false, 0) + ); + } + + @ParameterizedTest(name = "getListenerContainersMatching with BiPredicate for {0}, expecting {2} matches") + @MethodSource("paramsForGetListenerContainersMatchingBiPredicate") + void getListenerContainersMatchingBiPredicate(List names, BiPredicate idAndContainerMatcher, int expectedCount) { + // Given + KafkaListenerEndpointRegistry registry = new KafkaListenerEndpointRegistry(); + registerWithListenerIds(registry, names); + // When + Collection listeners = registry.getListenerContainersMatching(idAndContainerMatcher); + // Then + assertThat(listeners).hasSize(expectedCount); + } + + @Test + void verifyUnregisteredListenerContainer() { + KafkaListenerEndpointRegistry registry = new KafkaListenerEndpointRegistry(); + GenericApplicationContext applicationContext = new GenericApplicationContext(); + ConcurrentMessageListenerContainer listenerContainerMock = mock(ConcurrentMessageListenerContainer.class); + given(listenerContainerMock.getListenerId()).willReturn("testListenerContainer"); + applicationContext.registerBean(ConcurrentMessageListenerContainer.class, () -> listenerContainerMock); + applicationContext.refresh(); + registry.setApplicationContext(applicationContext); + // Lazy-load from application context + assertThat(registry.getUnregisteredListenerContainer("testListenerContainer")).isNotNull(); + // From internal map + assertThat(registry.getUnregisteredListenerContainer("testListenerContainer")).isNotNull(); + } + + /** + * Provides parameters for the getListenerContainersMatchingBiPredicate test. + * Each set of parameters includes a list of names, a bi-predicate, and the expected count of matching containers. + */ + private static Stream paramsForGetListenerContainersMatchingBiPredicate() { + List names = List.of("foo", "bar", "baz"); + return Stream.of( + // Case : Filters for names starting with "b" and containers that are "running" + Arguments.of(names, + (BiPredicate) (id, container) -> id.startsWith("b") && container.isRunning(), 2), + // Case : Filters for names starting with "f" and containers that are "running" + Arguments.of(names, + (BiPredicate) (id, container) -> id.startsWith("f") && container.isRunning(), 1), + // Case : Filters in an empty list of names + Arguments.of(new ArrayList<>(), + (BiPredicate) (id, container) -> id.startsWith("b") && container.isRunning(), 0), + // Case : Filters where all containers are considered "running" + Arguments.of(names, + (BiPredicate) (id, container) -> container.isRunning(), 3), + // Case : Filters where no containers are considered "running" + Arguments.of(names, + (BiPredicate) (id, container) -> !container.isRunning(), 0) + ); + } + + private static void registerWithListenerIds(KafkaListenerEndpointRegistry registry, List names) { + names.forEach(name -> registerListenerWithId(registry, name)); + } + + private static void registerListenerWithId(KafkaListenerEndpointRegistry registry, String id) { + KafkaListenerEndpoint endpoint = mock(KafkaListenerEndpoint.class); + @SuppressWarnings("unchecked") + KafkaListenerContainerFactory factory = mock(KafkaListenerContainerFactory.class); + given(endpoint.getId()).willReturn(id); + MessageListenerContainer container = mock(MessageListenerContainer.class); + given(container.isRunning()).willReturn(true); + given(factory.createListenerContainer(endpoint)).willReturn(container); + registry.registerListenerContainer(endpoint, factory); + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaStreamsCustomizerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaStreamsCustomizerTests.java index d3b02aea2c..18af1eaf27 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaStreamsCustomizerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/config/KafkaStreamsCustomizerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,17 @@ package org.springframework.kafka.config; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.concurrent.atomic.AtomicBoolean; +import io.micrometer.core.instrument.ImmutableTag; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.KafkaClientSupplier; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; @@ -48,16 +49,18 @@ import org.springframework.kafka.streams.KafkaStreamsMicrometerListener; import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.scheduling.concurrent.SimpleAsyncTaskScheduler; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import io.micrometer.core.instrument.ImmutableTag; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import static org.assertj.core.api.Assertions.assertThat; /** * @author Nurettin Yilmaz * @author Artem Bilan + * @author Almog Gavra + * @author Sanghyeok An * * @since 2.1.5 */ @@ -88,13 +91,14 @@ public void testKafkaStreamsCustomizer(@Autowired KafkaStreamsConfiguration conf assertThat(STATE_LISTENER.getCurrentState()).isEqualTo(state); Properties properties = configuration.asProperties(); assertThat(properties.get(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) - .isEqualTo(Collections.singletonList(config.broker.getBrokersAsString())); + .isEqualTo(config.broker.getBrokersAsString()); assertThat(properties.get(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG)) .isEqualTo(Foo.class); assertThat(properties.get(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG)) .isEqualTo(1000); assertThat(this.config.builderConfigured.get()).isTrue(); assertThat(this.config.topologyConfigured.get()).isTrue(); + assertThat(this.config.ksInitialized.get()).isTrue(); assertThat(this.meterRegistry.get("kafka.consumer.coordinator.join.total") .tag("customTag", "stream") .tag("spring.id", KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_BUILDER_BEAN_NAME) @@ -118,6 +122,8 @@ public static class KafkaStreamsConfig { final AtomicBoolean topologyConfigured = new AtomicBoolean(); + final AtomicBoolean ksInitialized = new AtomicBoolean(); + @Autowired EmbeddedKafkaBroker broker; @@ -151,24 +157,42 @@ public void configureTopology(Topology topology) { }); streamsBuilderFactoryBean.addListener(new KafkaStreamsMicrometerListener(meterRegistry(), - Collections.singletonList(new ImmutableTag("customTag", "stream")))); + Collections.singletonList(new ImmutableTag("customTag", "stream")), + new SimpleAsyncTaskScheduler())); return streamsBuilderFactoryBean; } @SuppressWarnings("deprecation") @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, - Collections.singletonList(this.broker.getBrokersAsString())); + Map props = + KafkaTestUtils.streamsProps(APPLICATION_ID, this.broker.getBrokersAsString()); props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, Foo.class); props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 1000); return new KafkaStreamsConfiguration(props); } private KafkaStreamsCustomizer customizer() { - return kafkaStreams -> kafkaStreams.setStateListener(STATE_LISTENER); + return new KafkaStreamsCustomizer() { + @Override + public KafkaStreams initKafkaStreams( + final Topology topology, + final Properties properties, + final KafkaClientSupplier clientSupplier + ) { + ksInitialized.set(true); + return KafkaStreamsCustomizer.super.initKafkaStreams( + topology, + properties, + clientSupplier + ); + } + + @Override + public void customize(final KafkaStreams kafkaStreams) { + kafkaStreams.setStateListener(STATE_LISTENER); + } + }; } @Bean diff --git a/spring-kafka/src/test/java/org/springframework/kafka/config/RecordMessagingMessageListenerAdapterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/config/RecordMessagingMessageListenerAdapterTests.java index 0ba7308bdd..4dbed481ca 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/config/RecordMessagingMessageListenerAdapterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/config/RecordMessagingMessageListenerAdapterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2021 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.config; -import static org.assertj.core.api.Assertions.assertThat; - import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; @@ -35,6 +33,8 @@ import org.springframework.messaging.handler.annotation.support.DefaultMessageHandlerMethodFactory; import org.springframework.messaging.handler.annotation.support.MessageHandlerMethodFactory; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.5 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryBeanInMemoryStateStoreTests.java b/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryBeanInMemoryStateStoreTests.java new file mode 100644 index 0000000000..a2e8163edb --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryBeanInMemoryStateStoreTests.java @@ -0,0 +1,99 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.config; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.Materialized; +import org.apache.kafka.streams.state.BuiltInDslStoreSuppliers; +import org.apache.kafka.streams.state.KeyValueStore; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.EnableKafkaStreams; +import org.springframework.kafka.annotation.KafkaStreamsDefaultConfiguration; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * @author Cédric Schaller + * @author Soby Chacko + */ +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka +public class StreamsBuilderFactoryBeanInMemoryStateStoreTests { + + private static Path stateStoreDir; + + @BeforeAll + static void beforeAll() throws IOException { + stateStoreDir = Files.createTempDirectory(StreamsBuilderFactoryBeanInMemoryStateStoreTests.class.getSimpleName()); + } + + @Test + void testStateStoreIsInMemory() { + // Testing that an in-memory state store is used requires accessing the internal state of StreamsBuilder via reflection + // Therefore, we check the non-existence of RocksDB files instead + assertThat(stateStoreDir).isEmptyDirectory(); + } + + @Configuration + @EnableKafkaStreams + static class KafkaStreamsConfig { + + @Value("${" + EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS + "}") + private String brokerAddresses; + + @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) + public KafkaStreamsConfiguration kStreamsConfigWithInMemoryStateStores() { + Map props = new HashMap<>(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "should-be-stored-in-memory"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); + props.put(StreamsConfig.STATE_DIR_CONFIG, stateStoreDir.toString()); + return new KafkaStreamsConfiguration(props); + } + + @Bean + public KTable table(StreamsBuilder builder) { + KStream stream = builder.stream("source-topic"); + return stream + .groupByKey() + .count(Materialized.>as("store") + .withStoreType(BuiltInDslStoreSuppliers.IN_MEMORY)); + } + } +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryBeanTests.java b/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryBeanTests.java index 9da17583e5..a0fd441738 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryBeanTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryBeanTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2020 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.config; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -50,12 +46,17 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Pawel Szymczyk * @author Artem Bilan * @author Gary Russell * @author Denis Washington * @author Soby Chacko + * @author Sanghyeok An */ @SpringJUnitConfig @DirtiesContext @@ -102,12 +103,34 @@ protected StreamsBuilder createInstance() { streamsBuilderFactoryBean.afterPropertiesSet(); StreamsBuilder builder = streamsBuilderFactoryBean.getObject(); builder.stream(Pattern.compile("foo")); + streamsBuilderFactoryBean.afterSingletonsInstantiated(); streamsBuilderFactoryBean.start(); StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject(); verify(streamsBuilder).build(kafkaStreamsConfiguration.asProperties()); assertThat(streamsBuilderFactoryBean.getTopology()).isNotNull(); } + @Test + public void testGetTopologyBeforeKafkaStreamsStart() throws Exception { + // Given + streamsBuilderFactoryBean = new StreamsBuilderFactoryBean(kafkaStreamsConfiguration) { + @Override + protected StreamsBuilder createInstance() { + return spy(super.createInstance()); + } + }; + streamsBuilderFactoryBean.afterPropertiesSet(); + StreamsBuilder builder = streamsBuilderFactoryBean.getObject(); + builder.stream(Pattern.compile("test-topic")); + + // When + streamsBuilderFactoryBean.afterSingletonsInstantiated(); + + // Then + assertThat(streamsBuilderFactoryBean.getTopology()).isNotNull(); + assertThat(streamsBuilderFactoryBean.isRunning()).isFalse(); + } + @Configuration @EnableKafkaStreams public static class KafkaStreamsConfig { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryLateConfigTests.java b/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryLateConfigTests.java index 9abc3f3321..241ef161f8 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryLateConfigTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/config/StreamsBuilderFactoryLateConfigTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2020 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,9 @@ package org.springframework.kafka.config; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; - import java.util.Properties; -import java.util.regex.Pattern; +import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; import org.junit.jupiter.api.Test; @@ -39,10 +35,15 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; + /** * @author Soby Chacko * @author Artem Bilan * @author Gary Russell + * @author Sanghyeok An */ @SpringJUnitConfig @DirtiesContext @@ -72,11 +73,12 @@ public void testStreamBuilderFactoryCannotBeInstantiatedWhenAutoStart() { @Test public void testStreamsBuilderFactoryWithConfigProvidedLater() throws Exception { + boolean isAutoStartUp = true; Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); streamsBuilderFactoryBean.setStreamsConfiguration(props); - streamsBuilderFactoryBean.getObject().stream(Pattern.compile("foo")); + streamsBuilderFactoryBean.setAutoStartup(isAutoStartUp); assertThat(streamsBuilderFactoryBean.isRunning()).isFalse(); streamsBuilderFactoryBean.start(); @@ -95,6 +97,23 @@ public StreamsBuilderFactoryBean defaultKafkaStreamsBuilder() { return streamsBuilderFactoryBean; } + @Bean + public KafkaStreamsService kafkaStreamsService(StreamsBuilder streamsBuilder) { + return new KafkaStreamsService(streamsBuilder); + } + } + static class KafkaStreamsService { + private final StreamsBuilder streamsBuilder; + + KafkaStreamsService(StreamsBuilder streamsBuilder) { + this.streamsBuilder = streamsBuilder; + buildPipeline(); + } + + void buildPipeline() { + this.streamsBuilder.stream("test-topic"); + } + } } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaConsumerFactoryTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaConsumerFactoryTests.java index f361973b77..cb6aede96d 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaConsumerFactoryTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaConsumerFactoryTests.java @@ -16,10 +16,7 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - +import java.time.Duration; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collections; @@ -40,10 +37,14 @@ import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.springframework.aop.framework.ProxyFactory; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationContext; import org.springframework.context.annotation.Configuration; +import org.springframework.core.env.Environment; import org.springframework.kafka.core.ConsumerFactory.Listener; import org.springframework.kafka.listener.ContainerProperties; import org.springframework.kafka.listener.KafkaMessageListenerContainer; @@ -56,10 +57,18 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Chris Gilbert * @author Artem Bilan + * @author Adrian Gygax + * @author Soby Chacko + * @author Yaniv Nahoum * * @since 1.0.6 */ @@ -113,13 +122,13 @@ public void testBootstrapServersSupplier() { DefaultKafkaConsumerFactory target = new DefaultKafkaConsumerFactory(originalConfig) { - @Override protected Consumer createRawConsumer(Map configProps) { configPassedToKafkaConsumer.putAll(configProps); return null; } }; + target.setApplicationContext(createApplicationContextWithApplicationName()); target.setBootstrapServersSupplier(() -> "foo"); target.createConsumer(null, null, null, null); assertThat(configPassedToKafkaConsumer.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)).isEqualTo("foo"); @@ -143,6 +152,7 @@ protected KafkaConsumer createKafkaConsumer(Map return null; } }; + target.setApplicationContext(createApplicationContextWithApplicationName()); target.createConsumer(null, null, null, overrides); assertThat(configPassedToKafkaConsumer.get("config1")).isEqualTo("overridden"); assertThat(configPassedToKafkaConsumer.get("config2")).isSameAs(originalConfig.get("config2")); @@ -165,6 +175,7 @@ protected KafkaConsumer createKafkaConsumer(Map return null; } }; + target.setApplicationContext(createApplicationContextWithApplicationName()); target.createConsumer(null, null, "-1", null); assertThat(configPassedToKafkaConsumer.get(ConsumerConfig.CLIENT_ID_CONFIG)).isEqualTo("original-1"); } @@ -198,6 +209,7 @@ protected KafkaConsumer createKafkaConsumer(Map return null; } }; + target.setApplicationContext(createApplicationContextWithApplicationName()); target.createConsumer(null, "overridden", null, null); assertThat(configPassedToKafkaConsumer.get(ConsumerConfig.CLIENT_ID_CONFIG)).isEqualTo("overridden"); } @@ -214,6 +226,7 @@ protected KafkaConsumer createKafkaConsumer(Map return null; } }; + target.setApplicationContext(createApplicationContextWithApplicationName()); target.createConsumer(null, "overridden", null, null); assertThat(configPassedToKafkaConsumer.get(ConsumerConfig.CLIENT_ID_CONFIG)).isEqualTo("overridden"); } @@ -231,6 +244,7 @@ protected KafkaConsumer createKafkaConsumer(Map return null; } }; + target.setApplicationContext(createApplicationContextWithApplicationName()); target.createConsumer(null, "overridden", "-1", null); assertThat(configPassedToKafkaConsumer.get(ConsumerConfig.CLIENT_ID_CONFIG)).isEqualTo("overridden-1"); } @@ -250,10 +264,27 @@ protected KafkaConsumer createKafkaConsumer(Map return null; } }; + target.setApplicationContext(createApplicationContextWithApplicationName()); target.createConsumer(null, "overridden", "-1", overrides); assertThat(configPassedToKafkaConsumer.get(ConsumerConfig.CLIENT_ID_CONFIG)).isEqualTo("overridden-1"); } + @Test + public void testApplicationNameIfNoGroupIdAsClientIdWhenCreatingConsumer() { + final Map configPassedToKafkaConsumer = new HashMap<>(); + DefaultKafkaConsumerFactory target = + new DefaultKafkaConsumerFactory(Map.of()) { + + @Override + protected KafkaConsumer createKafkaConsumer(Map configProps) { + configPassedToKafkaConsumer.putAll(configProps); + return null; + } + }; + target.setApplicationContext(createApplicationContextWithApplicationName()); + target.createConsumer(null, null, "-1", null); + assertThat(configPassedToKafkaConsumer.get(ConsumerConfig.CLIENT_ID_CONFIG)).isEqualTo("appname-consumer-1"); + } @Test public void testOverriddenGroupIdWhenCreatingConsumer() { @@ -355,7 +386,7 @@ public void testNestedTxProducerIsCached() throws Exception { latch.countDown(); }); KafkaTransactionManager tm = new KafkaTransactionManager<>(pfTx); - containerProps.setTransactionManager(tm); + containerProps.setKafkaAwareTransactionManager(tm); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.start(); @@ -406,7 +437,7 @@ public void testNestedTxProducerIsFixed() throws Exception { latch.countDown(); }); KafkaTransactionManager tm = new KafkaTransactionManager<>(pfTx); - containerProps.setTransactionManager(tm); + containerProps.setKafkaAwareTransactionManager(tm); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.start(); @@ -430,8 +461,9 @@ public void testNestedTxProducerIsFixed() throws Exception { } @SuppressWarnings({ "rawtypes", "unchecked" }) - @Test - void listener() { + @ParameterizedTest + @ValueSource(booleans = { true, false }) + void listener(boolean closeWithTimeout) { Map consumerConfig = KafkaTestUtils.consumerProps("txCache1Group", "false", this.embeddedKafka); consumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, "foo-0"); DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory(consumerConfig); @@ -456,8 +488,13 @@ public void consumerRemoved(String id, Consumer consumer) { Consumer consumer = cf.createConsumer(); assertThat(adds).hasSize(1); assertThat(adds.get(0)).isEqualTo("cf.foo-0"); - assertThat(removals).hasSize(0); - consumer.close(); + assertThat(removals).isEmpty(); + if (closeWithTimeout) { + consumer.close(Duration.ofSeconds(10)); + } + else { + consumer.close(); + } assertThat(removals).hasSize(1); } @@ -466,16 +503,25 @@ public void consumerRemoved(String id, Consumer consumer) { void configDeserializer() { Deserializer key = mock(Deserializer.class); Deserializer value = mock(Deserializer.class); - Map config = new HashMap<>(); + Map config = KafkaTestUtils.consumerProps("mockGroup", "false", this.embeddedKafka); DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory(config, key, value); Deserializer keyDeserializer = cf.getKeyDeserializer(); assertThat(keyDeserializer).isSameAs(key); + cf.createKafkaConsumer(config); verify(key).configure(config, true); Deserializer valueDeserializer = cf.getValueDeserializer(); assertThat(valueDeserializer).isSameAs(value); verify(value).configure(config, false); } + private static ApplicationContext createApplicationContextWithApplicationName() { + final Environment environment = mock(Environment.class); + given(environment.getProperty("spring.application.name")).willReturn("appname"); + final ApplicationContext applicationContext = mock(ApplicationContext.class); + given(applicationContext.getEnvironment()).willReturn(environment); + return applicationContext; + } + @Configuration public static class Config { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaProducerFactoryTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaProducerFactoryTests.java index 95b41e085c..144d42357a 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaProducerFactoryTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultKafkaProducerFactoryTests.java @@ -16,20 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatCode; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Collections; @@ -55,16 +41,32 @@ import org.springframework.context.ApplicationContext; import org.springframework.context.event.ContextStoppedEvent; +import org.springframework.core.env.Environment; import org.springframework.kafka.core.ProducerFactory.Listener; import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.kafka.transaction.KafkaTransactionManager; import org.springframework.transaction.CannotCreateTransactionException; import org.springframework.transaction.support.TransactionTemplate; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell - * @since 1.3.5 + * @author Adrian Gygax * + * @since 1.3.5 */ public class DefaultKafkaProducerFactoryTests { @@ -704,7 +706,6 @@ protected Producer createRawProducer(Map configs) { assertThat(configPassedToKafkaConsumer.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG)).isEqualTo("tx.1"); } - @SuppressWarnings({ "rawtypes", "unchecked" }) @Test void configUpdates() { @@ -778,4 +779,26 @@ protected Producer createRawProducer(Map rawConf assertThat(producerConfigs).containsEntry("linger.ms", 200); } + @Test + void testDefaultClientIdPrefixIsSpringBootApplicationName() { + final DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(Map.of()); + final Environment environment = mock(Environment.class); + given(environment.getProperty("spring.application.name")).willReturn("appname"); + final ApplicationContext applicationContext = mock(ApplicationContext.class); + given(applicationContext.getEnvironment()).willReturn(environment); + pf.setApplicationContext(applicationContext); + assertThat(pf.getProducerConfigs()).containsEntry(ProducerConfig.CLIENT_ID_CONFIG, "appname-producer-1"); + } + + @Test + void testExplicitClientIdPrefixOverridesDefault() { + final DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(Map.of(ProducerConfig.CLIENT_ID_CONFIG, "clientId")); + final Environment environment = mock(Environment.class); + given(environment.getProperty("spring.application.name")).willReturn("appname"); + final ApplicationContext applicationContext = mock(ApplicationContext.class); + given(applicationContext.getEnvironment()).willReturn(environment); + pf.setApplicationContext(applicationContext); + assertThat(pf.getProducerConfigs()).containsEntry(ProducerConfig.CLIENT_ID_CONFIG, "clientId-1"); + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultTransactionIdSuffixStrategyTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultTransactionIdSuffixStrategyTests.java index 1f8db2a18e..8e928b93ec 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultTransactionIdSuffixStrategyTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/DefaultTransactionIdSuffixStrategyTests.java @@ -16,10 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatNoException; - import java.util.Map; import java.util.Queue; @@ -27,6 +23,10 @@ import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatNoException; + /** * @author Ilya Starchenko * @@ -50,7 +50,6 @@ void acquireSuffixWithCache() { assertThat(suffix).isNotNull(); } - @Test void acquireSuffixWithCacheExhausted() { String txIdPrefix = "txIdPrefix"; diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminBadContextTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminBadContextTests.java index bdf18ceb87..43642878ca 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminBadContextTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminBadContextTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; - import java.util.HashMap; import java.util.Map; @@ -31,6 +29,8 @@ import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.EmbeddedKafkaZKBroker; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; + /** * @author Gary Russell * @since 1.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminTests.java index cb8a3885de..9322e30f59 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaAdminTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.awaitility.Awaitility.await; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; - import java.lang.reflect.Method; import java.util.Arrays; import java.util.Collections; @@ -34,6 +28,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.AlterConfigOp; @@ -54,8 +49,10 @@ import org.springframework.beans.DirectFieldAccessor; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationContext; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; +import org.springframework.core.env.Environment; import org.springframework.kafka.config.TopicBuilder; import org.springframework.kafka.core.KafkaAdmin.NewTopics; import org.springframework.kafka.test.EmbeddedKafkaBroker; @@ -64,8 +61,17 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.ReflectionUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.awaitility.Awaitility.await; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell + * @author Adrian Gygax + * @author Anders Swanson + * * @since 1.3 */ @SpringJUnitConfig @@ -282,7 +288,7 @@ void nullClusterId() { KafkaAdmin admin = new KafkaAdmin(Map.of()) { @Override - AdminClient createAdmin() { + protected Admin createAdmin() { return mock; } @@ -290,6 +296,31 @@ AdminClient createAdmin() { assertThat(admin.clusterId()).isEqualTo("null"); } + @Test + void getAdminConfigWithNoClientId() { + KafkaAdmin kafkaAdmin = new KafkaAdmin(Map.of()); + assertThat(kafkaAdmin.getAdminConfig()).isEmpty(); + } + + @Test + void getAdminConfigWithExplicitClientId() { + Map config = Map.of(AdminClientConfig.CLIENT_ID_CONFIG, "admin"); + KafkaAdmin kafkaAdmin = new KafkaAdmin(config); + assertThat(kafkaAdmin.getAdminConfig()).containsExactlyInAnyOrderEntriesOf(config); + } + + @Test + void getAdminConfigWithApplicationNameAsClientId() { + Map config = Map.of(); + KafkaAdmin kafkaAdmin = new KafkaAdmin(config); + final Environment environment = mock(Environment.class); + given(environment.getProperty("spring.application.name")).willReturn("appname"); + final ApplicationContext applicationContext = mock(ApplicationContext.class); + given(applicationContext.getEnvironment()).willReturn(environment); + kafkaAdmin.setApplicationContext(applicationContext); + assertThat(kafkaAdmin.getAdminConfig()).containsOnly(Map.entry(AdminClientConfig.CLIENT_ID_CONFIG, "appname-admin-0")); + } + @Configuration public static class Config { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTests.java index 98ab9c3c01..2853eeef1d 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,24 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.allOf; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoInteractions; -import static org.springframework.kafka.test.assertj.KafkaConditions.key; -import static org.springframework.kafka.test.assertj.KafkaConditions.keyValue; -import static org.springframework.kafka.test.assertj.KafkaConditions.partition; -import static org.springframework.kafka.test.assertj.KafkaConditions.timestamp; -import static org.springframework.kafka.test.assertj.KafkaConditions.value; - import java.time.Duration; import java.util.ArrayList; import java.util.Collections; @@ -97,6 +79,24 @@ import org.springframework.messaging.Message; import org.springframework.messaging.support.MessageBuilder; +import static org.assertj.core.api.Assertions.allOf; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.springframework.kafka.test.assertj.KafkaConditions.key; +import static org.springframework.kafka.test.assertj.KafkaConditions.keyValue; +import static org.springframework.kafka.test.assertj.KafkaConditions.partition; +import static org.springframework.kafka.test.assertj.KafkaConditions.timestamp; +import static org.springframework.kafka.test.assertj.KafkaConditions.value; + /** * @author Gary Russell * @author Artem Bilan @@ -132,7 +132,6 @@ public void producerRemoved(String id, Producer producer) { private static final ProducerPostProcessor noopProducerPostProcessor = processor -> processor; - @BeforeAll public static void setUp() { embeddedKafka = EmbeddedKafkaCondition.getBroker(); @@ -349,6 +348,7 @@ public void onError(ProducerRecord producerRecord, RecordMetada } } + PL pl1 = new PL(); PL pl2 = new PL(); CompositeProducerListener cpl = new CompositeProducerListener<>(new PL[]{ pl1, pl2 }); @@ -638,5 +638,4 @@ void testReceiveWhenOffsetIsInvalid(Long offset) { .withMessage("Offset supplied in TopicPartitionOffset is invalid: " + tpoWithNullOffset); } - } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTransactionTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTransactionTests.java index 3db8bfa773..641ea558eb 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTransactionTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/KafkaTemplateTransactionTests.java @@ -16,24 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.springframework.kafka.test.assertj.KafkaConditions.key; -import static org.springframework.kafka.test.assertj.KafkaConditions.value; - import java.time.Duration; import java.util.Collections; import java.util.Iterator; @@ -85,6 +67,24 @@ import org.springframework.transaction.support.DefaultTransactionStatus; import org.springframework.transaction.support.TransactionTemplate; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.springframework.kafka.test.assertj.KafkaConditions.key; +import static org.springframework.kafka.test.assertj.KafkaConditions.value; + /** * @author Gary Russell * @author Nakul Mishra diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/RoutingKafkaTemplateTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/RoutingKafkaTemplateTests.java index 608ae2fe68..1db2b0561f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/RoutingKafkaTemplateTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/RoutingKafkaTemplateTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2023 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -34,6 +26,14 @@ import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Nathan Xu diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/TransactionSynchronizationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/TransactionSynchronizationTests.java index 2b0eaea06d..83dec1b12e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/TransactionSynchronizationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/TransactionSynchronizationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.core; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - import org.apache.kafka.clients.producer.Producer; import org.junit.jupiter.api.Test; @@ -34,6 +28,12 @@ import org.springframework.transaction.support.TransactionSynchronizationManager; import org.springframework.transaction.support.TransactionTemplate; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateIntegrationTests.java index bd9f1e9896..abbd12fb0a 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.core.reactive; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; - import java.time.Duration; import java.time.Instant; import java.util.AbstractMap.SimpleImmutableEntry; @@ -44,6 +41,16 @@ import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.reactivestreams.Subscription; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.kafka.receiver.ReceiverOptions; +import reactor.kafka.receiver.ReceiverRecord; +import reactor.kafka.sender.KafkaSender; +import reactor.kafka.sender.SenderOptions; +import reactor.kafka.sender.SenderRecord; +import reactor.kafka.sender.SenderResult; +import reactor.test.StepVerifier; +import reactor.util.function.Tuple2; import org.springframework.kafka.support.DefaultKafkaHeaderMapper; import org.springframework.kafka.support.KafkaHeaderMapper; @@ -56,16 +63,8 @@ import org.springframework.messaging.MessageHeaders; import org.springframework.messaging.support.MessageBuilder; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.kafka.receiver.ReceiverOptions; -import reactor.kafka.receiver.ReceiverRecord; -import reactor.kafka.sender.KafkaSender; -import reactor.kafka.sender.SenderOptions; -import reactor.kafka.sender.SenderRecord; -import reactor.kafka.sender.SenderResult; -import reactor.test.StepVerifier; -import reactor.util.function.Tuple2; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; /** * @author Mark Norkin diff --git a/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateTransactionIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateTransactionIntegrationTests.java index 5d82eb01a1..48f30db38e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateTransactionIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplateTransactionIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.core.reactive; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; - import java.time.Duration; import java.time.Instant; import java.util.ArrayList; @@ -40,17 +37,6 @@ import org.junit.jupiter.api.TestInfo; import org.reactivestreams.Publisher; import org.reactivestreams.Subscription; - -import org.springframework.core.log.LogAccessor; -import org.springframework.kafka.support.KafkaUtils; -import org.springframework.kafka.support.converter.MessagingMessageConverter; -import org.springframework.kafka.test.condition.EmbeddedKafkaCondition; -import org.springframework.kafka.test.condition.LogLevels; -import org.springframework.kafka.test.condition.LogLevelsCondition; -import org.springframework.kafka.test.context.EmbeddedKafka; -import org.springframework.kafka.test.utils.JUnitUtils; -import org.springframework.kafka.test.utils.KafkaTestUtils; - import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.scheduler.Schedulers; @@ -63,6 +49,19 @@ import reactor.kafka.sender.internals.DefaultKafkaSender; import reactor.test.StepVerifier; +import org.springframework.core.log.LogAccessor; +import org.springframework.kafka.support.KafkaUtils; +import org.springframework.kafka.support.converter.MessagingMessageConverter; +import org.springframework.kafka.test.condition.EmbeddedKafkaCondition; +import org.springframework.kafka.test.condition.LogLevels; +import org.springframework.kafka.test.condition.LogLevelsCondition; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.JUnitUtils; +import org.springframework.kafka.test.utils.KafkaTestUtils; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; + /** * @author Mark Norkin * @author Gary Russell diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ABSwitchClusterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ABSwitchClusterTests.java index 2f6f58b701..afdf0d3d57 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ABSwitchClusterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ABSwitchClusterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,11 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; - import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -44,6 +39,11 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.6 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/AbstractConsumerSeekAwareTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/AbstractConsumerSeekAwareTests.java new file mode 100644 index 0000000000..bec23890fa --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/AbstractConsumerSeekAwareTests.java @@ -0,0 +1,195 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.listener; + +import java.time.Duration; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.apache.kafka.common.TopicPartition; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.AbstractConsumerSeekAwareTests.Config.MultiGroupListener; +import org.springframework.kafka.listener.ConsumerSeekAware.ConsumerSeekCallback; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.stereotype.Component; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +/** + * @author Borahm Lee + * @author Artem Bilan + * @since 3.3 + */ +@DirtiesContext +@SpringJUnitConfig +@EmbeddedKafka(topics = {AbstractConsumerSeekAwareTests.TOPIC}, + partitions = 9, + brokerProperties = "group.initial.rebalance.delay.ms:4000") +class AbstractConsumerSeekAwareTests { + + static final String TOPIC = "Seek"; + + @Autowired + Config config; + + @Autowired + KafkaTemplate template; + + @Autowired + MultiGroupListener multiGroupListener; + + @Test + public void checkCallbacksAndTopicPartitions() { + await().timeout(Duration.ofSeconds(15)) + .untilAsserted(() -> { + Map> callbacksAndTopics = + multiGroupListener.getCallbacksAndTopics(); + Set registeredCallbacks = callbacksAndTopics.keySet(); + Set registeredTopicPartitions = + callbacksAndTopics.values() + .stream() + .flatMap(Collection::stream) + .collect(Collectors.toSet()); + + Map> topicsAndCallbacks = + multiGroupListener.getTopicsAndCallbacks(); + Set getTopicPartitions = topicsAndCallbacks.keySet(); + Set getCallbacks = + topicsAndCallbacks.values() + .stream() + .flatMap(Collection::stream) + .collect(Collectors.toSet()); + + assertThat(registeredCallbacks).containsExactlyInAnyOrderElementsOf(getCallbacks).isNotEmpty(); + assertThat(registeredTopicPartitions).containsExactlyInAnyOrderElementsOf(getTopicPartitions); + }); + } + + @Test + void seekForAllGroups() throws Exception { + template.send(TOPIC, "test-data"); + template.send(TOPIC, "test-data"); + assertThat(MultiGroupListener.latch1.await(15, TimeUnit.SECONDS)).isTrue(); + assertThat(MultiGroupListener.latch2.await(15, TimeUnit.SECONDS)).isTrue(); + + MultiGroupListener.latch1 = new CountDownLatch(2); + MultiGroupListener.latch2 = new CountDownLatch(2); + + multiGroupListener.seekToBeginning(); + assertThat(MultiGroupListener.latch1.await(15, TimeUnit.SECONDS)).isTrue(); + assertThat(MultiGroupListener.latch2.await(15, TimeUnit.SECONDS)).isTrue(); + } + + @Test + void seekForSpecificGroup() throws Exception { + template.send(TOPIC, "test-data"); + template.send(TOPIC, "test-data"); + assertThat(MultiGroupListener.latch1.await(15, TimeUnit.SECONDS)).isTrue(); + assertThat(MultiGroupListener.latch2.await(15, TimeUnit.SECONDS)).isTrue(); + + MultiGroupListener.latch1 = new CountDownLatch(2); + MultiGroupListener.latch2 = new CountDownLatch(2); + + multiGroupListener.seekToBeginningFor("group2"); + assertThat(MultiGroupListener.latch2.await(15, TimeUnit.SECONDS)).isTrue(); + assertThat(MultiGroupListener.latch1.await(1, TimeUnit.SECONDS)).isFalse(); + assertThat(MultiGroupListener.latch1.getCount()).isEqualTo(2); + } + + @EnableKafka + @Configuration + static class Config { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + return factory; + } + + @Bean + ConsumerFactory consumerFactory() { + return new DefaultKafkaConsumerFactory<>(KafkaTestUtils.consumerProps("test-group", "false", this.broker)); + } + + @Bean + ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(KafkaTestUtils.producerProps(this.broker)); + } + + @Bean + KafkaTemplate template(ProducerFactory pf) { + return new KafkaTemplate<>(pf); + } + + @Component + static class MultiGroupListener extends AbstractConsumerSeekAware { + + static CountDownLatch latch1 = new CountDownLatch(2); + + static CountDownLatch latch2 = new CountDownLatch(2); + + @KafkaListener(groupId = "group1", topics = TOPIC, concurrency = "2") + void listenForGroup1(String in) { + latch1.countDown(); + } + + @KafkaListener(groupId = "group2", topics = TOPIC, concurrency = "7") + void listenForGroup2(String in) { + latch2.countDown(); + } + + void seekToBeginningFor(String groupId) { + getCallbacksAndTopics().forEach((cb, topics) -> { + if (groupId.equals(cb.getGroupId())) { + topics.forEach(tp -> cb.seekToBeginning(tp.topic(), tp.partition())); + } + }); + } + + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/AsyncAckAfterHandleTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/AsyncAckAfterHandleTests.java index ab436e457e..d301b72b6b 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/AsyncAckAfterHandleTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/AsyncAckAfterHandleTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -47,6 +45,8 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 3.0 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommitOnAssignmentTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommitOnAssignmentTests.java index 7de877420a..20170278ad 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommitOnAssignmentTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommitOnAssignmentTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2021 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.withSettings; - import java.time.Duration; import java.util.Collection; import java.util.Collections; @@ -61,6 +50,17 @@ import org.springframework.test.annotation.DirtiesContext.ClassMode; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.withSettings; + /** * @author Gary Russell * @since 2.3.6 @@ -120,7 +120,7 @@ void testLatestOnlyTx() throws InterruptedException { latch.countDown(); return null; }).given(producer).sendOffsetsToTransaction(any(), any(ConsumerGroupMetadata.class)); - props.setTransactionManager(tm); + props.setKafkaAwareTransactionManager(tm); this.registry.start(); assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); } @@ -135,8 +135,7 @@ void testLatestOnlyNoTx() throws InterruptedException { KafkaTransactionManager tm = new KafkaTransactionManager<>(pf); Producer producer = mock(Producer.class); given(pf.createProducer(any())).willReturn(producer); - CountDownLatch latch = new CountDownLatch(1); - props.setTransactionManager(tm); + props.setKafkaAwareTransactionManager(tm); this.registry.start(); assertThat(this.config.commitLatch.await(10, TimeUnit.SECONDS)).isTrue(); verify(producer, never()).sendOffsetsToTransaction(any(), any(ConsumerGroupMetadata.class)); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler1Tests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler1Tests.java index 70ba8c8c69..1ddc1e216c 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler1Tests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler1Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -60,6 +52,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.8 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler2Tests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler2Tests.java index 543960c2bb..695ae04409 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler2Tests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler2Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -58,6 +51,13 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.8 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler3Tests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler3Tests.java index 6fba8b9d99..2d9d4ef2c9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler3Tests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler3Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2021 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.Arrays; import java.util.Collection; @@ -59,6 +51,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.8 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonDelegatingErrorHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonDelegatingErrorHandlerTests.java index 1a25253a41..c994e4a1ea 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonDelegatingErrorHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonDelegatingErrorHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,12 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; - import java.io.IOException; import java.util.Collections; import java.util.Map; import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.junit.jupiter.api.Test; @@ -34,18 +29,29 @@ import org.springframework.kafka.core.KafkaProducerException; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + /** * Tests for {@link CommonDelegatingErrorHandler}. * * @author Gary Russell * @author Adrian Chlebosz + * @author Antonin Arquey + * @author Dan Blackney * @since 2.8 * */ public class CommonDelegatingErrorHandlerTests { @Test - void testRecordDelegates() { + void testHandleRemainingDelegates() { var def = mock(CommonErrorHandler.class); var one = mock(CommonErrorHandler.class); var two = mock(CommonErrorHandler.class); @@ -69,7 +75,7 @@ void testRecordDelegates() { } @Test - void testBatchDelegates() { + void testHandleBatchDelegates() { var def = mock(CommonErrorHandler.class); var one = mock(CommonErrorHandler.class); var two = mock(CommonErrorHandler.class); @@ -92,6 +98,54 @@ void testBatchDelegates() { verify(one).handleBatch(any(), any(), any(), any(), any()); } + @Test + void testHandleOtherExceptionDelegates() { + var def = mock(CommonErrorHandler.class); + var one = mock(CommonErrorHandler.class); + var two = mock(CommonErrorHandler.class); + var three = mock(CommonErrorHandler.class); + var eh = new CommonDelegatingErrorHandler(def); + eh.setErrorHandlers(Map.of(IllegalStateException.class, one, IllegalArgumentException.class, two)); + eh.addDelegate(RuntimeException.class, three); + + eh.handleOtherException(wrap(new IOException()), mock(Consumer.class), + mock(MessageListenerContainer.class), true); + verify(def).handleOtherException(any(), any(), any(), anyBoolean()); + eh.handleOtherException(wrap(new KafkaException("test")), mock(Consumer.class), + mock(MessageListenerContainer.class), true); + verify(three).handleOtherException(any(), any(), any(), anyBoolean()); + eh.handleOtherException(wrap(new IllegalArgumentException()), mock(Consumer.class), + mock(MessageListenerContainer.class), true); + verify(two).handleOtherException(any(), any(), any(), anyBoolean()); + eh.handleOtherException(wrap(new IllegalStateException()), mock(Consumer.class), + mock(MessageListenerContainer.class), true); + verify(one).handleOtherException(any(), any(), any(), anyBoolean()); + } + + @Test + void testHandleOneDelegates() { + var def = mock(CommonErrorHandler.class); + var one = mock(CommonErrorHandler.class); + var two = mock(CommonErrorHandler.class); + var three = mock(CommonErrorHandler.class); + var eh = new CommonDelegatingErrorHandler(def); + eh.setErrorHandlers(Map.of(IllegalStateException.class, one, IllegalArgumentException.class, two)); + eh.addDelegate(RuntimeException.class, three); + + eh.handleOne(wrap(new IOException()), mock(ConsumerRecord.class), mock(Consumer.class), + mock(MessageListenerContainer.class)); + verify(def).handleOne(any(), any(), any(), any()); + eh.handleOne(wrap(new KafkaException("test")), mock(ConsumerRecord.class), mock(Consumer.class), + mock(MessageListenerContainer.class)); + verify(three).handleOne(any(), any(), any(), any()); + eh.handleOne(wrap(new IllegalArgumentException()), mock(ConsumerRecord.class), mock(Consumer.class), + mock(MessageListenerContainer.class)); + verify(two).handleOne(any(), any(), any(), any()); + eh.handleOne(wrap(new IllegalStateException()), mock(ConsumerRecord.class), mock(Consumer.class), + mock(MessageListenerContainer.class)); + verify(one).handleOne(any(), any(), any(), any()); + } + @Test void testDelegateForThrowableIsAppliedWhenCauseTraversingIsEnabled() { var defaultHandler = mock(CommonErrorHandler.class); @@ -173,6 +227,48 @@ void testDefaultDelegateIsApplied() { verify(defaultHandler).handleRemaining(any(), any(), any(), any()); } + @Test + void testAddIncompatibleAckAfterHandleDelegate() { + var defaultHandler = mock(CommonErrorHandler.class); + given(defaultHandler.isAckAfterHandle()).willReturn(true); + var delegatingErrorHandler = new CommonDelegatingErrorHandler(defaultHandler); + var delegate = mock(CommonErrorHandler.class); + given(delegate.isAckAfterHandle()).willReturn(false); + + assertThatThrownBy(() -> delegatingErrorHandler.addDelegate(IllegalStateException.class, delegate)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("All delegates must return the same value when calling 'isAckAfterHandle()'"); + } + + @Test + void testAddIncompatibleSeeksAfterHandlingDelegate() { + var defaultHandler = mock(CommonErrorHandler.class); + given(defaultHandler.seeksAfterHandling()).willReturn(true); + var delegatingErrorHandler = new CommonDelegatingErrorHandler(defaultHandler); + var delegate = mock(CommonErrorHandler.class); + given(delegate.seeksAfterHandling()).willReturn(false); + + assertThatThrownBy(() -> delegatingErrorHandler.addDelegate(IllegalStateException.class, delegate)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("All delegates must return the same value when calling 'seeksAfterHandling()'"); + } + + @Test + void testAddMultipleDelegatesWithOneIncompatible() { + var defaultHandler = mock(CommonErrorHandler.class); + given(defaultHandler.seeksAfterHandling()).willReturn(true); + var delegatingErrorHandler = new CommonDelegatingErrorHandler(defaultHandler); + var one = mock(CommonErrorHandler.class); + given(one.seeksAfterHandling()).willReturn(true); + var two = mock(CommonErrorHandler.class); + given(one.seeksAfterHandling()).willReturn(false); + Map, CommonErrorHandler> delegates = Map.of(IllegalStateException.class, one, IOException.class, two); + + assertThatThrownBy(() -> delegatingErrorHandler.setErrorHandlers(delegates)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("All delegates must return the same value when calling 'seeksAfterHandling()'"); + } + private Exception wrap(Exception ex) { return new ListenerExecutionFailedException("test", ex); } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonMixedErrorHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonMixedErrorHandlerTests.java index 0d6c7bdf26..3ba31798d6 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonMixedErrorHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/CommonMixedErrorHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ package org.springframework.kafka.listener; +import org.junit.jupiter.api.Test; + import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; -import org.junit.jupiter.api.Test; - /** * @author Gary Russell * @since 2.8 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerMockTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerMockTests.java index 2b82126b81..8df4c0bea2 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerMockTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerMockTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,21 +16,8 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.atLeastOnce; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -73,11 +60,25 @@ import org.springframework.kafka.transaction.KafkaAwareTransactionManager; import org.springframework.lang.Nullable; import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; -import org.springframework.transaction.PlatformTransactionManager; import org.springframework.transaction.support.TransactionSynchronizationManager; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell + * @author Wang Zhiyang + * @author Soby Chacko + * * @since 2.2.4 * */ @@ -130,7 +131,7 @@ else if (event instanceof ConsumerFailedToStartEvent) { exec.destroy(); } - @SuppressWarnings({ "rawtypes", "unchecked", "deprecation" }) + @SuppressWarnings({ "rawtypes", "unchecked" }) @Test void testCorrectContainerForConsumerError() throws InterruptedException { ConsumerFactory consumerFactory = mock(ConsumerFactory.class); @@ -200,10 +201,8 @@ void delayedIdleEvent() throws InterruptedException { containerProperties); CountDownLatch latch1 = new CountDownLatch(1); CountDownLatch latch2 = new CountDownLatch(2); - AtomicReference eventTime = new AtomicReference<>(); container.setApplicationEventPublisher(event -> { if (event instanceof ListenerContainerIdleEvent) { - eventTime.set(System.currentTimeMillis()); latch1.countDown(); latch2.countDown(); } @@ -263,7 +262,7 @@ void testSyncRelativeSeeks() throws InterruptedException { TopicPartition tp1 = new TopicPartition("foo", 1); TopicPartition tp2 = new TopicPartition("foo", 2); TopicPartition tp3 = new TopicPartition("foo", 3); - List assignments = Arrays.asList(tp0, tp1, tp2, tp3); + List assignments = List.of(tp0, tp1, tp2, tp3); willAnswer(invocation -> { ((ConsumerRebalanceListener) invocation.getArgument(1)) .onPartitionsAssigned(assignments); @@ -292,6 +291,106 @@ void testSyncRelativeSeeks() throws InterruptedException { container.stop(); } + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Test + void seekOffsetFromComputeFnOnInitAssignmentAndIdleContainer() throws InterruptedException { + ConsumerFactory consumerFactory = mock(ConsumerFactory.class); + final Consumer consumer = mock(Consumer.class); + TestMessageListener3 listener = new TestMessageListener3(); + ConsumerRecords empty = new ConsumerRecords<>(Collections.emptyMap()); + willAnswer(invocation -> { + Thread.sleep(10); + return empty; + }).given(consumer).poll(any()); + TopicPartition tp0 = new TopicPartition("test-topic", 0); + TopicPartition tp1 = new TopicPartition("test-topic", 1); + TopicPartition tp2 = new TopicPartition("test-topic", 2); + TopicPartition tp3 = new TopicPartition("test-topic", 3); + List assignments = List.of(tp0, tp1, tp2, tp3); + willAnswer(invocation -> { + ((ConsumerRebalanceListener) invocation.getArgument(1)) + .onPartitionsAssigned(assignments); + return null; + }).given(consumer).subscribe(any(Collection.class), any()); + given(consumer.position(any())).willReturn(30L); // current offset position is always 30 + given(consumerFactory.createConsumer("grp", "", "-0", KafkaTestUtils.defaultPropertyOverrides())) + .willReturn(consumer); + ContainerProperties containerProperties = new ContainerProperties("test-topic"); + containerProperties.setGroupId("grp"); + containerProperties.setMessageListener(listener); + containerProperties.setIdleEventInterval(10L); + containerProperties.setMissingTopicsFatal(false); + ConcurrentMessageListenerContainer container = new ConcurrentMessageListenerContainer(consumerFactory, + containerProperties); + container.start(); + assertThat(listener.latch.await(10, TimeUnit.SECONDS)).isTrue(); + verify(consumer).seek(tp0, 20L); + verify(consumer).seek(tp1, 21L); + verify(consumer).seek(tp2, 22L); + verify(consumer).seek(tp3, 23L); + + verify(consumer).seek(tp0, 30L); + verify(consumer).seek(tp1, 30L); + verify(consumer).seek(tp2, 30L); + verify(consumer).seek(tp3, 30L); + container.stop(); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Test + void seekOffsetFromComputeFnFromActiveListener() throws InterruptedException { + ConsumerFactory consumerFactory = mock(ConsumerFactory.class); + final Consumer consumer = mock(Consumer.class); + TestMessageListener4 listener = new TestMessageListener4(); + CountDownLatch latch = new CountDownLatch(2); + TopicPartition tp0 = new TopicPartition("test-topic", 0); + TopicPartition tp1 = new TopicPartition("test-topic", 1); + TopicPartition tp2 = new TopicPartition("test-topic", 2); + TopicPartition tp3 = new TopicPartition("test-topic", 3); + List assignments = List.of(tp0, tp1, tp2, tp3); + Map>> recordMap = new HashMap<>(); + recordMap.put(tp0, Collections.singletonList(new ConsumerRecord("test-topic", 0, 0, null, "test-data"))); + recordMap.put(tp1, Collections.singletonList(new ConsumerRecord("test-topic", 1, 0, null, "test-data"))); + recordMap.put(tp2, Collections.singletonList(new ConsumerRecord("test-topic", 2, 0, null, "test-data"))); + recordMap.put(tp3, Collections.singletonList(new ConsumerRecord("test-topic", 3, 0, null, "test-data"))); + ConsumerRecords records = new ConsumerRecords<>(recordMap); + willAnswer(invocation -> { + Thread.sleep(10); + if (listener.latch.getCount() <= 0) { + latch.countDown(); + } + return records; + }).given(consumer).poll(any()); + willAnswer(invocation -> { + ((ConsumerRebalanceListener) invocation.getArgument(1)) + .onPartitionsAssigned(assignments); + return null; + }).given(consumer).subscribe(any(Collection.class), any()); + given(consumer.position(tp0)).willReturn(30L); // current offset 30, target 20 (see hard-coded in onMessage) + given(consumer.position(tp1)).willReturn(10L); // current 10, target 21 + given(consumer.position(tp2)).willReturn(22L); // current 22, target 22 + given(consumer.position(tp3)).willReturn(22L); // current 22, target 23 + given(consumer.beginningOffsets(any())).willReturn(assignments.stream() + .collect(Collectors.toMap(tp -> tp, tp -> 0L))); + given(consumer.endOffsets(any())).willReturn(assignments.stream() + .collect(Collectors.toMap(tp -> tp, tp -> 100L))); + given(consumerFactory.createConsumer("grp", "", "-0", KafkaTestUtils.defaultPropertyOverrides())) + .willReturn(consumer); + ContainerProperties containerProperties = new ContainerProperties("test-topic"); + containerProperties.setGroupId("grp"); + containerProperties.setMessageListener(listener); + containerProperties.setMissingTopicsFatal(false); + ConcurrentMessageListenerContainer container = new ConcurrentMessageListenerContainer(consumerFactory, + containerProperties); + container.start(); + assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + verify(consumer).seek(tp0, 20L); + verify(consumer).seek(tp1, 10L); + verify(consumer).seek(tp2, 22L); + verify(consumer).seek(tp3, 22L); + container.stop(); + } + @SuppressWarnings({ "rawtypes", "unchecked" }) @Test @DisplayName("Seek from activeListener") @@ -304,7 +403,7 @@ void testAsyncRelativeSeeks() throws InterruptedException { TopicPartition tp1 = new TopicPartition("foo", 1); TopicPartition tp2 = new TopicPartition("foo", 2); TopicPartition tp3 = new TopicPartition("foo", 3); - List assignments = Arrays.asList(tp0, tp1, tp2, tp3); + List assignments = List.of(tp0, tp1, tp2, tp3); Map>> recordMap = new HashMap<>(); recordMap.put(tp0, Collections.singletonList(new ConsumerRecord("foo", 0, 0, null, "bar"))); recordMap.put(tp1, Collections.singletonList(new ConsumerRecord("foo", 1, 0, null, "bar"))); @@ -343,7 +442,7 @@ void testAsyncRelativeSeeks() throws InterruptedException { verify(consumer).seekToEnd(Collections.singletonList(tp2)); verify(consumer).seek(tp2, 70L); // position - 30 (seekToEnd ignored by mock) verify(consumer).seekToBeginning(Collections.singletonList(tp3)); - verify(consumer).seek(tp3, 30L); + verify(consumer).seek(tp3, 130L); // position + 30 (seekToBeginning ignored by mock) container.stop(); } @@ -363,7 +462,7 @@ void testSyncTimestampSeeks() throws InterruptedException { TopicPartition tp1 = new TopicPartition("foo", 1); TopicPartition tp2 = new TopicPartition("foo", 2); TopicPartition tp3 = new TopicPartition("foo", 3); - List assignments = Arrays.asList(tp0, tp1, tp2, tp3); + List assignments = List.of(tp0, tp1, tp2, tp3); willAnswer(invocation -> { ((ConsumerRebalanceListener) invocation.getArgument(1)) .onPartitionsAssigned(assignments); @@ -410,7 +509,7 @@ void testAsyncTimestampSeeks() throws InterruptedException { TopicPartition tp1 = new TopicPartition("foo", 1); TopicPartition tp2 = new TopicPartition("foo", 2); TopicPartition tp3 = new TopicPartition("foo", 3); - List assignments = Arrays.asList(tp0, tp1, tp2, tp3); + List assignments = List.of(tp0, tp1, tp2, tp3); Map>> recordMap = new HashMap<>(); recordMap.put(tp0, Collections.singletonList(new ConsumerRecord("foo", 0, 0, null, "bar"))); recordMap.put(tp1, Collections.singletonList(new ConsumerRecord("foo", 1, 0, null, "bar"))); @@ -508,7 +607,9 @@ void testBatchInterceptBeforeTx1() throws InterruptedException { } @SuppressWarnings({ "rawtypes", "unchecked" }) - void testIntercept(boolean beforeTx, AssignmentCommitOption option, boolean batch) throws InterruptedException { + void testIntercept(boolean beforeTx, @Nullable AssignmentCommitOption option, boolean batch) + throws InterruptedException { + ConsumerFactory consumerFactory = mock(ConsumerFactory.class); final Consumer consumer = mock(Consumer.class); TopicPartition tp0 = new TopicPartition("foo", 0); @@ -523,7 +624,7 @@ void testIntercept(boolean beforeTx, AssignmentCommitOption option, boolean batc Thread.sleep(10); return firstOrSecondPoll.incrementAndGet() < 3 ? records : empty; }).given(consumer).poll(any()); - List assignments = Arrays.asList(tp0); + List assignments = List.of(tp0); willAnswer(invocation -> { ((ConsumerRebalanceListener) invocation.getArgument(1)) .onPartitionsAssigned(assignments); @@ -560,7 +661,7 @@ void testIntercept(boolean beforeTx, AssignmentCommitOption option, boolean batc given(tm.getProducerFactory()).willReturn(pf); Producer producer = mock(Producer.class); given(pf.createProducer()).willReturn(producer); - containerProperties.setTransactionManager(tm); + containerProperties.setKafkaAwareTransactionManager(tm); List order = new ArrayList<>(); CountDownLatch latch = new CountDownLatch(option == null ? 2 : 3); willAnswer(inv -> { @@ -661,104 +762,6 @@ public void failure(ConsumerRecords records, Exception exception, Consumer consu } } - @Test - @SuppressWarnings({ "rawtypes", "unchecked" }) - void testInterceptInTxNonKafkaTM() throws InterruptedException { - ConsumerFactory consumerFactory = mock(ConsumerFactory.class); - final Consumer consumer = mock(Consumer.class); - TopicPartition tp0 = new TopicPartition("foo", 0); - ConsumerRecord record1 = new ConsumerRecord("foo", 0, 0L, "bar", "baz"); - ConsumerRecords records = new ConsumerRecords( - Collections.singletonMap(tp0, Collections.singletonList(record1))); - ConsumerRecords empty = new ConsumerRecords(Collections.emptyMap()); - AtomicInteger firstOrSecondPoll = new AtomicInteger(); - willAnswer(invocation -> { - Thread.sleep(10); - return firstOrSecondPoll.incrementAndGet() < 2 ? records : empty; - }).given(consumer).poll(any()); - List assignments = Arrays.asList(tp0); - willAnswer(invocation -> { - ((ConsumerRebalanceListener) invocation.getArgument(1)) - .onPartitionsAssigned(assignments); - return null; - }).given(consumer).subscribe(any(Collection.class), any()); - given(consumer.position(any())).willReturn(0L); - given(consumerFactory.createConsumer("grp", "", "-0", KafkaTestUtils.defaultPropertyOverrides())) - .willReturn(consumer); - ContainerProperties containerProperties = new ContainerProperties("foo"); - containerProperties.setGroupId("grp"); - AtomicReference>> received = new AtomicReference<>(); - containerProperties.setMessageListener((MessageListener) rec -> { - }); - containerProperties.setMissingTopicsFatal(false); - List order = new ArrayList<>(); - AtomicReference latch = new AtomicReference<>(new CountDownLatch(2)); - PlatformTransactionManager tm = mock(PlatformTransactionManager.class); - willAnswer(inv -> { - order.add("tx"); - latch.get().countDown(); - return null; - }).given(tm).getTransaction(any()); - containerProperties.setTransactionManager(tm); - ConcurrentMessageListenerContainer container = new ConcurrentMessageListenerContainer(consumerFactory, - containerProperties); - AtomicReference successCalled = new AtomicReference<>(new CountDownLatch(1)); - container.setRecordInterceptor(new RecordInterceptor() { - - @Override - @Nullable - public ConsumerRecord intercept(ConsumerRecord rec, Consumer consumer) { - order.add("interceptor"); - latch.get().countDown(); - return rec; - } - - @Override - public void success(ConsumerRecord record, Consumer consumer) { - order.add("success"); - successCalled.get().countDown(); - } - - }); - container.setBatchInterceptor(new BatchInterceptor() { - - @Override - @Nullable - public ConsumerRecords intercept(ConsumerRecords recs, Consumer consumer) { - order.add("b.interceptor"); - latch.get().countDown(); - return new ConsumerRecords(Collections.singletonMap(tp0, Collections.singletonList(record1))); - } - - @Override - public void success(ConsumerRecords records, Consumer consumer) { - order.add("b.success"); - successCalled.get().countDown(); - } - - }); - container.setInterceptBeforeTx(false); - container.start(); - try { - assertThat(latch.get().await(10, TimeUnit.SECONDS)).isTrue(); - assertThat(successCalled.get().await(10, TimeUnit.SECONDS)).isTrue(); - assertThat(order).containsExactly("tx", "interceptor", "success"); - container.stop(); - latch.set(new CountDownLatch(2)); - successCalled.set(new CountDownLatch(1)); - container.getContainerProperties().setMessageListener((BatchMessageListener) recs -> { - }); - firstOrSecondPoll.set(0); - container.start(); - assertThat(latch.get().await(10, TimeUnit.SECONDS)).isTrue(); - assertThat(successCalled.get().await(10, TimeUnit.SECONDS)).isTrue(); - assertThat(order).containsExactly("tx", "interceptor", "success", "tx", "b.interceptor", "b.success"); - } - finally { - container.stop(); - } - } - @SuppressWarnings({ "rawtypes", "unchecked" }) @Test void testNoCommitOnAssignmentWithEarliest() throws InterruptedException { @@ -771,7 +774,7 @@ void testNoCommitOnAssignmentWithEarliest() throws InterruptedException { return records; }).given(consumer).poll(any()); TopicPartition tp0 = new TopicPartition("foo", 0); - List assignments = Arrays.asList(tp0); + List assignments = List.of(tp0); willAnswer(invocation -> { ((ConsumerRebalanceListener) invocation.getArgument(1)) .onPartitionsAssigned(assignments); @@ -814,7 +817,7 @@ private void testInitialCommitIBasedOnCommitted(boolean committed) throws Interr return records; }).given(consumer).poll(any()); TopicPartition tp0 = new TopicPartition("foo", 0); - List assignments = Arrays.asList(tp0); + List assignments = List.of(tp0); willAnswer(invocation -> { ((ConsumerRebalanceListener) invocation.getArgument(1)) .onPartitionsAssigned(assignments); @@ -865,7 +868,7 @@ void removeFromPartitionPauseRequestedWhenNotAssigned() throws InterruptedExcept return null; }).given(consumer).pause(any()); TopicPartition tp0 = new TopicPartition("foo", 0); - List assignments = Arrays.asList(tp0); + List assignments = List.of(tp0); AtomicReference rebal = new AtomicReference<>(); willAnswer(invocation -> { rebal.set(invocation.getArgument(1)); @@ -911,14 +914,14 @@ void pruneRevokedPartitionsFromRemainingRecordsWhenSeekAfterErrorFalseLegacyAssi TopicPartition tp1 = new TopicPartition("foo", 1); TopicPartition tp2 = new TopicPartition("foo", 2); TopicPartition tp3 = new TopicPartition("foo", 3); - List allAssignments = Arrays.asList(tp0, tp1, tp2, tp3); + List allAssignments = List.of(tp0, tp1, tp2, tp3); Map>> allRecordMap = new HashMap<>(); allRecordMap.put(tp0, Collections.singletonList(new ConsumerRecord("foo", 0, 0, null, "bar"))); allRecordMap.put(tp1, Collections.singletonList(new ConsumerRecord("foo", 1, 0, null, "bar"))); allRecordMap.put(tp2, Collections.singletonList(new ConsumerRecord("foo", 2, 0, null, "bar"))); allRecordMap.put(tp3, Collections.singletonList(new ConsumerRecord("foo", 3, 0, null, "bar"))); ConsumerRecords allRecords = new ConsumerRecords<>(allRecordMap); - List afterRevokeAssignments = Arrays.asList(tp1, tp3); + List afterRevokeAssignments = List.of(tp1, tp3); Map>> afterRevokeRecordMap = new HashMap<>(); afterRevokeRecordMap.put(tp1, Collections.singletonList(new ConsumerRecord("foo", 1, 0, null, "bar"))); afterRevokeRecordMap.put(tp3, Collections.singletonList(new ConsumerRecord("foo", 3, 0, null, "bar"))); @@ -979,10 +982,11 @@ public boolean handleOne(Exception thrownException, ConsumerRecord record, Thread.sleep(50); pollLatch.countDown(); switch (pollPhase.getAndIncrement()) { - case 0: + case 0 -> { rebal.get().onPartitionsAssigned(allAssignments); return allRecords; - case 1: + } + case 1 -> { rebal.get().onPartitionsRevoked(allAssignments); rebal.get().onPartitionsAssigned(afterRevokeAssignments); rebalLatch.countDown(); @@ -991,11 +995,13 @@ public boolean handleOne(Exception thrownException, ConsumerRecord record, return ConsumerRecords.empty(); } return afterRevokeRecords; - default: + } + default -> { if (paused.get()) { return ConsumerRecords.empty(); } return afterRevokeRecords; + } } }).given(consumer).poll(any()); container.start(); @@ -1023,7 +1029,7 @@ void pruneRevokedPartitionsFromRemainingRecordsWhenSeekAfterErrorFalseCoopAssign TopicPartition tp1 = new TopicPartition("foo", 1); TopicPartition tp2 = new TopicPartition("foo", 2); TopicPartition tp3 = new TopicPartition("foo", 3); - List allAssignments = Arrays.asList(tp0, tp1, tp2, tp3); + List allAssignments = List.of(tp0, tp1, tp2, tp3); Map>> allRecordMap = new LinkedHashMap<>(); ConsumerRecord record0 = new ConsumerRecord("foo", 0, 0, null, "bar"); ConsumerRecord record1 = new ConsumerRecord("foo", 1, 0, null, "bar"); @@ -1032,7 +1038,7 @@ void pruneRevokedPartitionsFromRemainingRecordsWhenSeekAfterErrorFalseCoopAssign allRecordMap.put(tp2, Collections.singletonList(new ConsumerRecord("foo", 2, 0, null, "bar"))); allRecordMap.put(tp3, Collections.singletonList(new ConsumerRecord("foo", 3, 0, null, "bar"))); ConsumerRecords allRecords = new ConsumerRecords<>(allRecordMap); - List revokedAssignments = Arrays.asList(tp0, tp2); + List revokedAssignments = List.of(tp0, tp2); AtomicInteger pollPhase = new AtomicInteger(); Consumer consumer = mock(Consumer.class); @@ -1044,9 +1050,7 @@ void pruneRevokedPartitionsFromRemainingRecordsWhenSeekAfterErrorFalseCoopAssign return null; }).given(consumer).subscribe(any(Collection.class), any()); CountDownLatch pauseLatch = new CountDownLatch(1); - AtomicBoolean paused = new AtomicBoolean(); willAnswer(inv -> { - paused.set(true); pauseLatch.countDown(); return null; }).given(consumer).pause(any()); @@ -1087,17 +1091,20 @@ public boolean handleOne(Exception thrownException, ConsumerRecord record, Thread.sleep(50); pollLatch.countDown(); switch (pollPhase.getAndIncrement()) { - case 0: + case 0 -> { rebal.get().onPartitionsAssigned(allAssignments); return allRecords; - case 1: + } + case 1 -> { rebal.get().onPartitionsRevoked(revokedAssignments); rebal.get().onPartitionsAssigned(Collections.emptyList()); rebalLatch.countDown(); continueLatch.await(10, TimeUnit.SECONDS); return ConsumerRecords.empty(); - default: + } + default -> { return ConsumerRecords.empty(); + } } }).given(consumer).poll(any()); container.start(); @@ -1128,14 +1135,14 @@ public boolean handleOne(Exception thrownException, ConsumerRecord record, void pruneRevokedPartitionsFromPendingOutOfOrderCommitsLegacyAssignor() throws InterruptedException { TopicPartition tp0 = new TopicPartition("foo", 0); TopicPartition tp1 = new TopicPartition("foo", 1); - List allAssignments = Arrays.asList(tp0, tp1); + List allAssignments = List.of(tp0, tp1); Map>> allRecordMap = new HashMap<>(); allRecordMap.put(tp0, List.of(new ConsumerRecord("foo", 0, 0, null, "bar"), new ConsumerRecord("foo", 0, 1, null, "bar"))); allRecordMap.put(tp1, List.of(new ConsumerRecord("foo", 1, 0, null, "bar"), new ConsumerRecord("foo", 1, 1, null, "bar"))); ConsumerRecords allRecords = new ConsumerRecords<>(allRecordMap); - List afterRevokeAssignments = Arrays.asList(tp1); + List afterRevokeAssignments = List.of(tp1); AtomicInteger pollPhase = new AtomicInteger(); Consumer consumer = mock(Consumer.class); @@ -1147,9 +1154,7 @@ void pruneRevokedPartitionsFromPendingOutOfOrderCommitsLegacyAssignor() throws I return null; }).given(consumer).subscribe(any(Collection.class), any()); CountDownLatch pauseLatch = new CountDownLatch(1); - AtomicBoolean paused = new AtomicBoolean(); willAnswer(inv -> { - paused.set(true); pauseLatch.countDown(); return null; }).given(consumer).pause(any()); @@ -1171,17 +1176,20 @@ void pruneRevokedPartitionsFromPendingOutOfOrderCommitsLegacyAssignor() throws I Thread.sleep(50); pollLatch.countDown(); switch (pollPhase.getAndIncrement()) { - case 0: + case 0 -> { rebal.get().onPartitionsAssigned(allAssignments); return allRecords; - case 1: + } + case 1 -> { rebal.get().onPartitionsRevoked(allAssignments); rebal.get().onPartitionsAssigned(afterRevokeAssignments); rebalLatch.countDown(); continueLatch.await(10, TimeUnit.SECONDS); return ConsumerRecords.empty(); - default: + } + default -> { return ConsumerRecords.empty(); + } } }).given(consumer).poll(any()); container.start(); @@ -1206,14 +1214,13 @@ void pruneRevokedPartitionsFromPendingOutOfOrderCommitsLegacyAssignor() throws I void pruneRevokedPartitionsFromPendingOutOfOrderCommitsCoopAssignor() throws InterruptedException { TopicPartition tp0 = new TopicPartition("foo", 0); TopicPartition tp1 = new TopicPartition("foo", 1); - List allAssignments = Arrays.asList(tp0, tp1); + List allAssignments = List.of(tp0, tp1); Map>> allRecordMap = new HashMap<>(); allRecordMap.put(tp0, List.of(new ConsumerRecord("foo", 0, 0, null, "bar"), new ConsumerRecord("foo", 0, 1, null, "bar"))); allRecordMap.put(tp1, List.of(new ConsumerRecord("foo", 1, 0, null, "bar"), new ConsumerRecord("foo", 1, 1, null, "bar"))); ConsumerRecords allRecords = new ConsumerRecords<>(allRecordMap); - List afterRevokeAssignments = Arrays.asList(tp1); AtomicInteger pollPhase = new AtomicInteger(); Consumer consumer = mock(Consumer.class); @@ -1225,9 +1232,7 @@ void pruneRevokedPartitionsFromPendingOutOfOrderCommitsCoopAssignor() throws Int return null; }).given(consumer).subscribe(any(Collection.class), any()); CountDownLatch pauseLatch = new CountDownLatch(1); - AtomicBoolean paused = new AtomicBoolean(); willAnswer(inv -> { - paused.set(true); pauseLatch.countDown(); return null; }).given(consumer).pause(any()); @@ -1249,17 +1254,20 @@ void pruneRevokedPartitionsFromPendingOutOfOrderCommitsCoopAssignor() throws Int Thread.sleep(50); pollLatch.countDown(); switch (pollPhase.getAndIncrement()) { - case 0: + case 0 -> { rebal.get().onPartitionsAssigned(allAssignments); return allRecords; - case 1: + } + case 1 -> { rebal.get().onPartitionsRevoked(List.of(tp0)); rebal.get().onPartitionsAssigned(List.of(new TopicPartition("foo", 2))); rebalLatch.countDown(); continueLatch.await(10, TimeUnit.SECONDS); return ConsumerRecords.empty(); - default: + } + default -> { return ConsumerRecords.empty(); + } } }).given(consumer).poll(any()); container.start(); @@ -1285,7 +1293,7 @@ private AcknowledgingMessageListener ackOffset1() { @Override public void onMessage(ConsumerRecord rec, @Nullable Acknowledgment ack) { - if (rec.offset() == 1) { + if (rec.offset() == 1 && ack != null) { ack.acknowledge(); } } @@ -1299,7 +1307,7 @@ public void onMessage(Object data) { public static class TestMessageListener1 implements MessageListener, ConsumerSeekAware { - private static ThreadLocal callbacks = new ThreadLocal<>(); + private static final ThreadLocal callbacks = new ThreadLocal<>(); CountDownLatch latch = new CountDownLatch(1); @@ -1335,7 +1343,7 @@ public void onIdleContainer(Map assignments, ConsumerSeekC public static class TestMessageListener2 implements MessageListener, ConsumerSeekAware { - private static ThreadLocal callbacks = new ThreadLocal<>(); + private static final ThreadLocal callbacks = new ThreadLocal<>(); CountDownLatch latch = new CountDownLatch(1); @@ -1374,4 +1382,80 @@ public void onIdleContainer(Map assignments, ConsumerSeekC } + public static class TestMessageListener3 implements MessageListener, ConsumerSeekAware { + + private static final ThreadLocal callbacks = new ThreadLocal<>(); + + CountDownLatch latch = new CountDownLatch(2); + + @Override + public void onMessage(ConsumerRecord data) { + + } + + @Override + public void registerSeekCallback(ConsumerSeekCallback callback) { + callbacks.set(callback); + } + + @Override + public void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback) { + if (latch.getCount() > 0) { + int absoluteTarget1 = 20; + int absoluteTarget2 = 21; + int absoluteTarget3 = 22; + int absoluteTarget4 = 23; + callback.seek("test-topic", 0, current -> current > absoluteTarget1 ? absoluteTarget1 : current); + callback.seek("test-topic", 1, current -> current > absoluteTarget2 ? absoluteTarget2 : current); + callback.seek("test-topic", 2, current -> current > absoluteTarget3 ? absoluteTarget3 : current); + callback.seek("test-topic", 3, current -> current > absoluteTarget4 ? absoluteTarget4 : current); + } + this.latch.countDown(); + } + + @Override + public void onIdleContainer(Map assignments, ConsumerSeekCallback callback) { + if (latch.getCount() > 0) { + int absoluteTarget = 31; + callback.seek("test-topic", 0, current -> current > absoluteTarget ? absoluteTarget : current); + callback.seek("test-topic", 1, current -> current > absoluteTarget ? absoluteTarget : current); + callback.seek("test-topic", 2, current -> current > absoluteTarget ? absoluteTarget : current); + callback.seek("test-topic", 3, current -> current > absoluteTarget ? absoluteTarget : current); + } + this.latch.countDown(); + } + + } + + public static class TestMessageListener4 implements MessageListener, ConsumerSeekAware { + + private static final ThreadLocal callbacks = new ThreadLocal<>(); + + CountDownLatch latch = new CountDownLatch(1); + + @Override + public void onMessage(ConsumerRecord data) { + ConsumerSeekCallback callback = callbacks.get(); + if (latch.getCount() > 0) { + + int absoluteTarget1 = 20; + int absoluteTarget2 = 21; + int absoluteTarget3 = 22; + int absoluteTarget4 = 23; + + callback.seek("test-topic", 0, current -> current > absoluteTarget1 ? absoluteTarget1 : current); + callback.seek("test-topic", 1, current -> current > absoluteTarget2 ? absoluteTarget2 : current); + callback.seek("test-topic", 2, current -> current > absoluteTarget3 ? absoluteTarget3 : current); + callback.seek("test-topic", 3, current -> current > absoluteTarget4 ? absoluteTarget4 : current); + } + this.latch.countDown(); + } + + @Override + public void registerSeekCallback(ConsumerSeekCallback callback) { + callbacks.set(callback); + } + + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerTests.java index bfff9e77dc..bc0e1044d6 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConcurrentMessageListenerContainerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -59,6 +53,8 @@ import org.springframework.kafka.core.DefaultKafkaProducerFactory; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.event.ConcurrentContainerStoppedEvent; +import org.springframework.kafka.event.ConsumerStoppedEvent; import org.springframework.kafka.event.ContainerStoppedEvent; import org.springframework.kafka.event.KafkaEvent; import org.springframework.kafka.support.TopicPartitionOffset; @@ -67,6 +63,14 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.ContainerTestUtils; import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.lang.Nullable; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; /** * @author Gary Russell @@ -75,15 +79,17 @@ * @author Marius Bogoevici * @author Artem Yakshin * @author Vladimir Tsanev + * @author Soby Chacko + * @author Lokesh Alamuri */ -@EmbeddedKafka(topics = { ConcurrentMessageListenerContainerTests.topic1, +@EmbeddedKafka(topics = {ConcurrentMessageListenerContainerTests.topic1, ConcurrentMessageListenerContainerTests.topic2, ConcurrentMessageListenerContainerTests.topic4, ConcurrentMessageListenerContainerTests.topic5, ConcurrentMessageListenerContainerTests.topic6, ConcurrentMessageListenerContainerTests.topic7, ConcurrentMessageListenerContainerTests.topic8, ConcurrentMessageListenerContainerTests.topic9, ConcurrentMessageListenerContainerTests.topic10, ConcurrentMessageListenerContainerTests.topic11, - ConcurrentMessageListenerContainerTests.topic12 }, - brokerProperties = "group.initial.rebalance.delay.ms:500") + ConcurrentMessageListenerContainerTests.topic12, ConcurrentMessageListenerContainerTests.topic13}, + brokerProperties = "group.initial.rebalance.delay.ms:500") public class ConcurrentMessageListenerContainerTests { private final LogAccessor logger = new LogAccessor(LogFactory.getLog(this.getClass())); @@ -110,6 +116,8 @@ public class ConcurrentMessageListenerContainerTests { public static final String topic12 = "testTopic12"; + public static final String topic13 = "testTopic13"; + private static EmbeddedKafkaBroker embeddedKafka; @BeforeAll @@ -155,11 +163,15 @@ protected Consumer createKafkaConsumer(String groupId, String c container.setChangeConsumerThreadName(true); BlockingQueue events = new LinkedBlockingQueue<>(); CountDownLatch stopLatch = new CountDownLatch(4); + CountDownLatch concurrentContainerStopLatch = new CountDownLatch(1); container.setApplicationEventPublisher(e -> { events.add((KafkaEvent) e); if (e instanceof ContainerStoppedEvent) { stopLatch.countDown(); } + if (e instanceof ConcurrentContainerStoppedEvent) { + concurrentContainerStopLatch.countDown(); + } }); CountDownLatch intercepted = new CountDownLatch(4); container.setRecordInterceptor((record, consumer) -> { @@ -198,11 +210,14 @@ protected Consumer createKafkaConsumer(String groupId, String c assertThat(container.metrics()).isNotNull(); Set> children = new HashSet<>(containers); assertThat(container.isInExpectedState()).isTrue(); - container.getContainers().get(0).stopAbnormally(() -> { }); + MessageListenerContainer childContainer = container.getContainers().get(0); + container.getContainers().get(0).stopAbnormally(() -> { + }); assertThat(container.isInExpectedState()).isFalse(); container.getContainers().get(0).start(); container.stop(); assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue(); + assertThat(concurrentContainerStopLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(container.isInExpectedState()).isTrue(); events.forEach(e -> { assertThat(e.getContainer(MessageListenerContainer.class)).isSameAs(container); @@ -214,12 +229,22 @@ protected Consumer createKafkaConsumer(String groupId, String c assertThat(children).contains((KafkaMessageListenerContainer) e.getSource()); } } + else if (e instanceof ConcurrentContainerStoppedEvent concurrentContainerStoppedEvent) { + assertThat(concurrentContainerStoppedEvent.getSource()).isSameAs(container); + assertThat(concurrentContainerStoppedEvent.getContainer(MessageListenerContainer.class)) + .isSameAs(container); + assertThat(concurrentContainerStoppedEvent.getReason()).isEqualTo(ConsumerStoppedEvent.Reason.NORMAL); + } else { assertThat(children).contains((KafkaMessageListenerContainer) e.getSource()); } }); assertThat(overrides.get().getProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)).isNull(); this.logger.info("Stop auto"); + assertThat(childContainer.isRunning()).isFalse(); + assertThat(container.isRunning()).isFalse(); + // Fenced container. Throws exception + assertThatExceptionOfType(IllegalStateException.class).isThrownBy(() -> childContainer.start()); } @Test @@ -230,13 +255,13 @@ public void testAutoCommitWithRebalanceListener() throws Exception { DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory(props) { @Override - protected Consumer createKafkaConsumer(String groupId, String clientIdPrefix, - String clientIdSuffixArg, Properties properties) { + protected Consumer createKafkaConsumer(@Nullable String groupId, @Nullable String clientIdPrefix, + @Nullable String clientIdSuffixArg, @Nullable Properties properties) { overrides.set(properties); Consumer created = super.createKafkaConsumer(groupId, clientIdPrefix, clientIdSuffixArg, properties); - assertThat(KafkaTestUtils.getPropertyValue(created, "requestTimeoutMs", Long.class)).isEqualTo(23000L); + assertThat(KafkaTestUtils.getPropertyValue(created, "delegate.requestTimeoutMs", Integer.class)).isEqualTo(23000); return created; } @@ -569,18 +594,19 @@ public void testConcurrencyWithPartitions() { Consumer consumer = mock(Consumer.class); given(cf.createConsumer(anyString(), anyString(), anyString(), any())).willReturn(consumer); given(consumer.poll(any(Duration.class))) - .willAnswer(new Answer>() { + .willAnswer(new Answer>() { - @Override - public ConsumerRecords answer(InvocationOnMock invocation) throws Throwable { - Thread.sleep(100); - return null; - } + @Override + public ConsumerRecords answer(InvocationOnMock invocation) throws Throwable { + Thread.sleep(100); + return null; + } - }); + }); ContainerProperties containerProps = new ContainerProperties(topic1PartitionS); containerProps.setGroupId("grp"); - containerProps.setMessageListener((MessageListener) message -> { }); + containerProps.setMessageListener((MessageListener) message -> { + }); containerProps.setMissingTopicsFatal(false); ConcurrentMessageListenerContainer container = @@ -650,7 +676,6 @@ public boolean handleOne(Exception thrownException, ConsumerRecord record, } - @Test public void testAckOnErrorRecord() throws Exception { logger.info("Start ack on error"); @@ -730,7 +755,7 @@ public boolean seeksAfterHandling() { } @Test - public void testAckOnErrorManualImmediate() throws Exception { + public void testAckOnErrorManualImmediate() throws Exception { //ackOnError should not affect manual commits testAckOnErrorWithManualImmediateGuts(topic10, true); testAckOnErrorWithManualImmediateGuts(topic11, false); @@ -795,4 +820,279 @@ private void testAckOnErrorWithManualImmediateGuts(String topic, boolean ackOnEr logger.info("Stop ack on error with ManualImmediate ack mode"); } + @Test + public void testIsChildRunning() throws Exception { + this.logger.info("Start isChildRunning"); + Map props = KafkaTestUtils.consumerProps("test1", "true", + embeddedKafka); + DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props) { + + @Override + protected Consumer createKafkaConsumer(String groupId, String clientIdPrefix, + String clientIdSuffixArg, Properties properties) { + return super.createKafkaConsumer(groupId, clientIdPrefix, clientIdSuffixArg, properties); + } + }; + ContainerProperties containerProps = new ContainerProperties(topic13); + containerProps.setLogContainerConfig(true); + containerProps.setClientId("client"); + containerProps.setAckMode(ContainerProperties.AckMode.RECORD); + + final CountDownLatch secondRunLatch = new CountDownLatch(5); + final Set listenerThreadNames = new ConcurrentSkipListSet<>(); + final List payloads = new ArrayList<>(); + final CountDownLatch processingLatch = new CountDownLatch(1); + final CountDownLatch firstLatch = new CountDownLatch(1); + + AtomicBoolean first = new AtomicBoolean(true); + + containerProps.setMessageListener((MessageListener) message -> { + if (first.getAndSet(false)) { + try { + firstLatch.await(100, TimeUnit.SECONDS); + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + ConcurrentMessageListenerContainerTests.this.logger.info("auto: " + message); + listenerThreadNames.add(Thread.currentThread().getName()); + payloads.add(message.value()); + secondRunLatch.countDown(); + processingLatch.countDown(); + }); + + ConcurrentMessageListenerContainer container = + new ConcurrentMessageListenerContainer<>(cf, containerProps); + container.setConcurrency(2); + container.setBeanName("testAuto"); + container.setChangeConsumerThreadName(true); + BlockingQueue events = new LinkedBlockingQueue<>(); + CountDownLatch concurrentContainerStopLatch = new CountDownLatch(1); + CountDownLatch concurrentContainerSecondStopLatch = new CountDownLatch(1); + CountDownLatch consumerStoppedEventLatch = new CountDownLatch(1); + + container.setApplicationEventPublisher(e -> { + events.add((KafkaEvent) e); + if (e instanceof ConcurrentContainerStoppedEvent) { + concurrentContainerStopLatch.countDown(); + concurrentContainerSecondStopLatch.countDown(); + } + if (e instanceof ConsumerStoppedEvent) { + consumerStoppedEventLatch.countDown(); + } + }); + + CountDownLatch interceptedSecondRun = new CountDownLatch(5); + container.setRecordInterceptor((record, consumer) -> { + interceptedSecondRun.countDown(); + return record; + }); + + container.start(); + + MessageListenerContainer childContainer0 = container.getContainers().get(0); + MessageListenerContainer childContainer1 = container.getContainers().get(1); + + ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); + assertThat(container.getAssignedPartitions()).hasSize(2); + Map> assignments = container.getAssignmentsByClientId(); + assertThat(assignments).hasSize(2); + assertThat(assignments.get("client-0")).isNotNull(); + assertThat(assignments.get("client-1")).isNotNull(); + + Map senderProps = KafkaTestUtils.producerProps(embeddedKafka); + ProducerFactory pf = new DefaultKafkaProducerFactory<>(senderProps); + KafkaTemplate template = new KafkaTemplate<>(pf); + template.setDefaultTopic(topic13); + template.sendDefault(0, 0, "foo"); + template.sendDefault(1, 2, "bar"); + template.sendDefault(0, 0, "baz"); + template.sendDefault(1, 2, "qux"); + template.flush(); + + assertThat(container.metrics()).isNotNull(); + assertThat(container.isInExpectedState()).isTrue(); + assertThat(childContainer0.isRunning()).isTrue(); + assertThat(childContainer1.isRunning()).isTrue(); + assertThat(container.isChildRunning()).isTrue(); + + assertThat(processingLatch.await(60, TimeUnit.SECONDS)).isTrue(); + + container.stop(); + + assertThat(container.isChildRunning()).isTrue(); + assertThat(container.isRunning()).isFalse(); + assertThat(childContainer0.isRunning()).isFalse(); + assertThat(childContainer1.isRunning()).isFalse(); + + assertThat(consumerStoppedEventLatch.await(30, TimeUnit.SECONDS)).isTrue(); + + // This returns true since one container is still processing message. Key validation for this test case. + assertThat(container.isChildRunning()).isTrue(); + + firstLatch.countDown(); + + assertThat(listenerThreadNames).containsAnyOf("testAuto-0", "testAuto-1"); + + assertThat(concurrentContainerStopLatch.await(10, TimeUnit.SECONDS)).isTrue(); + assertThat(container.isInExpectedState()).isTrue(); + events.forEach(e -> { + assertThat(e.getContainer(MessageListenerContainer.class)).isSameAs(container); + if (e instanceof ConcurrentContainerStoppedEvent concurrentContainerStoppedEvent) { + assertThat(concurrentContainerStoppedEvent.getSource()).isSameAs(container); + assertThat(concurrentContainerStoppedEvent.getContainer(MessageListenerContainer.class)) + .isSameAs(container); + assertThat(concurrentContainerStoppedEvent.getReason()). + isEqualTo(ConsumerStoppedEvent.Reason.NORMAL); + } + }); + assertThat(container.isChildRunning()).isFalse(); + assertThat(payloads).containsAnyOf("foo", "bar", "qux", "baz"); + + template.sendDefault(0, 0, "FOO"); + template.sendDefault(1, 2, "BAR"); + template.sendDefault(0, 0, "BAZ"); + template.sendDefault(1, 2, "QUX"); + template.flush(); + + container.start(); + + assertThat(secondRunLatch.await(60, TimeUnit.SECONDS)).isTrue(); + assertThat(interceptedSecondRun.await(10, TimeUnit.SECONDS)).isTrue(); + + container.stop(); + assertThat(concurrentContainerSecondStopLatch.await(30, TimeUnit.SECONDS)).isTrue(); + assertThat(payloads).containsAnyOf("FOO", "BAR", "QUX", "BAZ"); + + this.logger.info("Stop isChildRunning"); + } + + @Test + public void testContainerStartStop() throws Exception { + this.logger.info("Start containerStartStop"); + Map props = KafkaTestUtils.consumerProps("test1", "true", + embeddedKafka); + AtomicReference overrides = new AtomicReference<>(); + DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory(props) { + + @Override + protected Consumer createKafkaConsumer(String groupId, String clientIdPrefix, + String clientIdSuffixArg, Properties properties) { + overrides.set(properties); + return super.createKafkaConsumer(groupId, clientIdPrefix, clientIdSuffixArg, properties); + } + }; + ContainerProperties containerProps = new ContainerProperties(topic1); + containerProps.setLogContainerConfig(true); + containerProps.setClientId("client"); + containerProps.setAckMode(ContainerProperties.AckMode.RECORD); + + final List payloads = new ArrayList<>(); + + containerProps.setMessageListener((MessageListener) message -> { + payloads.add(message.value()); + }); + + ConcurrentMessageListenerContainer container = + new ConcurrentMessageListenerContainer<>(cf, containerProps); + container.setConcurrency(2); + container.setBeanName("testAuto"); + container.setChangeConsumerThreadName(true); + BlockingQueue events = new LinkedBlockingQueue<>(); + CountDownLatch concurrentContainerStopLatch = new CountDownLatch(1); + CountDownLatch concurrentContainerSecondStopLatch = new CountDownLatch(2); + CountDownLatch consumerStoppedEventLatch = new CountDownLatch(1); + + container.setApplicationEventPublisher(e -> { + events.add((KafkaEvent) e); + if (e instanceof ConcurrentContainerStoppedEvent) { + concurrentContainerStopLatch.countDown(); + concurrentContainerSecondStopLatch.countDown(); + } + if (e instanceof ConsumerStoppedEvent) { + consumerStoppedEventLatch.countDown(); + } + }); + + container.setCommonErrorHandler(null); + + container.start(); + + KafkaMessageListenerContainer childContainer0 = container.getContainers().get(0); + KafkaMessageListenerContainer childContainer1 = container.getContainers().get(1); + + ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); + assertThat(container.getAssignedPartitions()).hasSize(2); + Map> assignments = container.getAssignmentsByClientId(); + assertThat(assignments).hasSize(2); + assertThat(assignments.get("client-0")).isNotNull(); + assertThat(assignments.get("client-1")).isNotNull(); + + Map senderProps = KafkaTestUtils.producerProps(embeddedKafka); + ProducerFactory pf = new DefaultKafkaProducerFactory<>(senderProps); + + KafkaTemplate template = new KafkaTemplate<>(pf); + template.setDefaultTopic(topic1); + template.sendDefault(0, 0, "foo"); + template.sendDefault(1, 2, "bar"); + template.sendDefault(0, 0, "baz"); + template.sendDefault(1, 2, "qux"); + template.flush(); + + assertThat(container.metrics()).isNotNull(); + assertThat(container.isInExpectedState()).isTrue(); + assertThat(childContainer0.isRunning()).isTrue(); + assertThat(childContainer1.isRunning()).isTrue(); + assertThat(container.isChildRunning()).isTrue(); + + childContainer0.stop(); + + assertThat(consumerStoppedEventLatch.await(30, TimeUnit.SECONDS)).isTrue(); + + assertThat(container.isChildRunning()).isTrue(); + assertThat(childContainer1.isRunning()).isTrue(); + assertThat(childContainer0.isRunning()).isFalse(); + assertThat(container.isRunning()).isTrue(); + + //Ignore this start + container.start(); + + assertThat(container.isChildRunning()).isTrue(); + assertThat(childContainer1.isRunning()).isTrue(); + assertThat(childContainer0.isRunning()).isFalse(); + assertThat(container.getContainers()). + contains(childContainer0); + assertThat(container.getContainers()). + contains(childContainer1); + + container.stop(); + + assertThat(container.isRunning()).isFalse(); + // child container1 is stopped. + assertThat(childContainer1.isRunning()).isFalse(); + assertThat(childContainer0.isRunning()).isFalse(); + + assertThat(concurrentContainerStopLatch.await(30, TimeUnit.SECONDS)).isTrue(); + + assertThat(container.getContainers()). + doesNotContain(childContainer0); + assertThat(container.getContainers()). + doesNotContain(childContainer1); + + // Accept this start + container.start(); + assertThat(container.getContainers()). + doesNotContain(childContainer0); + assertThat(container.getContainers()). + doesNotContain(childContainer1); + + container.getContainers().forEach(containerForEach -> containerForEach.stop()); + assertThat(container.getContainers()).isNotEmpty(); + container.stop(); + assertThat(concurrentContainerSecondStopLatch.await(30, TimeUnit.SECONDS)).isTrue(); + + this.logger.info("Stop containerStartStop"); + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListenerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListenerTests.java index 605f5b3322..2044f5cd1a 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListenerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerAwareRebalanceListenerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,14 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Collection; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.kafka.common.TopicPartition; import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @author Michal Domagala @@ -46,7 +46,6 @@ public void onPartitionsAssigned(Collection partitions) { assertThat(called.get()).isTrue(); } - @Test void nonConsumerAwareTestRevoked() { AtomicBoolean called = new AtomicBoolean(); @@ -61,7 +60,6 @@ public void onPartitionsRevoked(Collection partitions) { assertThat(called.get()).isTrue(); } - @Test void nonConsumerAwareTestLost() { AtomicBoolean called = new AtomicBoolean(); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerSeekAwareTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerSeekAwareTests.java index 1442578917..67ec34823a 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerSeekAwareTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ConsumerSeekAwareTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2023 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - import java.util.Collections; import java.util.LinkedHashMap; import java.util.LinkedList; @@ -34,8 +30,13 @@ import org.springframework.kafka.listener.ConsumerSeekAware.ConsumerSeekCallback; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell + * @author Borahm Lee * @since 2.6 * */ @@ -46,6 +47,7 @@ public class ConsumerSeekAwareTests { void beginningEndAndBulkSeekToTimestamp() throws Exception { class CSA extends AbstractConsumerSeekAware { } + AbstractConsumerSeekAware csa = new CSA(); var exec1 = Executors.newSingleThreadExecutor(); var exec2 = Executors.newSingleThreadExecutor(); @@ -103,8 +105,8 @@ class CSA extends AbstractConsumerSeekAware { }; exec1.submit(revoke2).get(); exec2.submit(revoke2).get(); - assertThat(KafkaTestUtils.getPropertyValue(csa, "callbacks", Map.class)).isEmpty(); - assertThat(KafkaTestUtils.getPropertyValue(csa, "callbacksToTopic", Map.class)).isEmpty(); + assertThat(KafkaTestUtils.getPropertyValue(csa, "topicToCallbacks", Map.class)).isEmpty(); + assertThat(KafkaTestUtils.getPropertyValue(csa, "callbackToTopics", Map.class)).isEmpty(); var checkTL = (Callable) () -> { csa.unregisterSeekCallback(); assertThat(KafkaTestUtils.getPropertyValue(csa, "callbackForThread", Map.class).get(Thread.currentThread())) diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerCustomizationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerCustomizationTests.java index b93ff04406..73ed759cff 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerCustomizationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerCustomizationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - import java.nio.charset.StandardCharsets; import java.util.stream.Stream; @@ -39,10 +36,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + /** * Tests for container customizations. * * @author Francois Rosiere + * @author Soby Chacko * @since 3.1 */ @SuppressWarnings("unused") @@ -129,7 +130,7 @@ public void postProcessor(String foo) { id = CONTAINER_CUSTOMIZER_AND_POST_PROCESSOR, topics = TOPIC, containerFactory = "containerFactoryWithCustomizer", - containerPostProcessor = "infoContainerPostProcessor") + containerPostProcessor = "#{__listener.infoContainerPostProcessor}") public void containerCustomizerAndPostProcessor(String foo) { } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerEnforceRebalanceTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerEnforceRebalanceTests.java index 8c6215ec7a..1dd127a24f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerEnforceRebalanceTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerEnforceRebalanceTests.java @@ -16,9 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - import java.time.Duration; import java.util.Collection; import java.util.concurrent.CountDownLatch; @@ -47,6 +44,9 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + /** * @author Soby Chacko * @since 3.1.2 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerGroupSequencerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerGroupSequencerTests.java index fd08de1177..c476913167 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerGroupSequencerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerGroupSequencerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -48,6 +46,8 @@ import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.7.3 @@ -100,7 +100,6 @@ public static class Config { final List receivedAt = Collections.synchronizedList(new ArrayList<>()); - @KafkaListener(id = "one", topics = "ContainerGroupSequencerTests", containerGroup = "g1", concurrency = "2") public void listen1(String in) { LOGGER.debug(in); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerPauseImmediateTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerPauseImmediateTests.java index f0433f3df2..4d10d6c0fa 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerPauseImmediateTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ContainerPauseImmediateTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -60,6 +52,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DeadLetterPublishingRecovererTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DeadLetterPublishingRecovererTests.java index d3624dfab0..b9da1a86a1 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DeadLetterPublishingRecovererTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DeadLetterPublishingRecovererTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2023 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,27 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willCallRealMethod; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.atLeastOnce; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.ObjectOutputStream; -import java.io.UncheckedIOException; import java.time.Duration; import java.util.Collections; import java.util.HashMap; @@ -77,9 +56,27 @@ import org.springframework.kafka.support.serializer.SerializationUtils; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willCallRealMethod; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Tomaz Fernandes + * @author Soby Chacko * @since 2.4.3 * */ @@ -99,14 +96,14 @@ void testTxNoTx() { DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template); ConsumerRecord record = new ConsumerRecord<>("foo", 0, 0L, "bar", "baz"); Consumer consumer = mock(Consumer.class); - given(consumer.partitionsFor("foo.DLT", Duration.ofSeconds(5))) + given(consumer.partitionsFor("foo-dlt", Duration.ofSeconds(5))) .willReturn(Collections.singletonList(new PartitionInfo("foo", 0, null, null, null))); recoverer.accept(record, consumer, new RuntimeException()); verify(template, never()).executeInTransaction(any()); ArgumentCaptor captor = ArgumentCaptor.forClass(ProducerRecord.class); verify(template).send(captor.capture()); assertThat(captor.getValue().partition()).isEqualTo(0); - verify(consumer).partitionsFor("foo.DLT", Duration.ofSeconds(5)); + verify(consumer).partitionsFor("foo-dlt", Duration.ofSeconds(5)); record = new ConsumerRecord<>("foo", 1, 0L, "bar", "baz"); recoverer.accept(record, consumer, new RuntimeException()); @@ -174,9 +171,9 @@ void valueHeaderStripped() { DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template); Headers headers = new RecordHeaders(); headers.add(SerializationTestUtils.deserializationHeader(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, - header(false))); + SerializationTestUtils.header(false))); headers.add(SerializationTestUtils.deserializationHeader(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER, - header(true))); + SerializationTestUtils.header(true))); Headers custom = new RecordHeaders(); custom.add(new RecordHeader("foo", "bar".getBytes())); recoverer.setHeadersFunction((rec, ex) -> custom); @@ -206,7 +203,7 @@ void keyHeaderStripped() { DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template); Headers headers = new RecordHeaders(); headers.add(SerializationTestUtils.deserializationHeader(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER, - header(true))); + SerializationTestUtils.header(true))); CompletableFuture future = new CompletableFuture(); future.complete(new Object()); willReturn(future).given(template).send(any(ProducerRecord.class)); @@ -225,9 +222,9 @@ void keyDeserOnly() { KafkaOperations template = mock(KafkaOperations.class); DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template); Headers headers = new RecordHeaders(); - DeserializationException deserEx = createDeserEx(true); + DeserializationException deserEx = SerializationTestUtils.createDeserEx(true); headers.add(SerializationTestUtils.deserializationHeader(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER, - header(true, deserEx))); + SerializationTestUtils.header(deserEx))); CompletableFuture future = new CompletableFuture(); future.complete(new Object()); willReturn(future).given(template).send(any(ProducerRecord.class)); @@ -250,9 +247,9 @@ void headersNotStripped() { recoverer.setRetainExceptionHeader(true); Headers headers = new RecordHeaders(); headers.add(SerializationTestUtils.deserializationHeader(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, - header(false))); + SerializationTestUtils.header(false))); headers.add(SerializationTestUtils.deserializationHeader(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER, - header(true))); + SerializationTestUtils.header(true))); CompletableFuture future = new CompletableFuture(); future.complete(new Object()); willReturn(future).given(template).send(any(ProducerRecord.class)); @@ -302,27 +299,6 @@ void tombstoneWithMultiTemplatesExplicit() { verify(template2).send(any(ProducerRecord.class)); } - private byte[] header(boolean isKey) { - return header(isKey, createDeserEx(isKey)); - } - - private DeserializationException createDeserEx(boolean isKey) { - return new DeserializationException( - isKey ? "testK" : "testV", - isKey ? "key".getBytes() : "value".getBytes(), isKey, null); - } - - private byte[] header(boolean isKey, DeserializationException deserEx) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try { - new ObjectOutputStream(baos).writeObject(deserEx); - } - catch (IOException e) { - throw new UncheckedIOException(e); - } - return baos.toByteArray(); - } - @SuppressWarnings({"unchecked", "rawtypes"}) @Test void allOriginalHeaders() { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessorTests.java index 5cbf0496dc..2ea0689671 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultAfterRollbackProcessorTests.java @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -50,6 +39,17 @@ import org.springframework.util.backoff.BackOffExecution; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Francois Rosiere diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchIntegrationTests.java index 98d1a46409..3d8396ae6e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -46,6 +44,8 @@ import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; + /** * {@link DefaultErrorHandler} tests for batch listeners. * @author Gary Russell @@ -61,11 +61,11 @@ public class DefaultErrorHandlerBatchIntegrationTests { public static final String topic1 = "dehTopic1"; - public static final String topic1DLT = "dehTopic1.DLT"; + public static final String topic1DLT = "dehTopic1-dlt"; public static final String topic2 = "dehTopic2"; - public static final String topic2DLT = "dehTopic2.DLT"; + public static final String topic2DLT = "dehTopic2-dlt"; private static EmbeddedKafkaBroker embeddedKafka; @@ -133,7 +133,7 @@ public void recoveryAndDlt() throws Exception { "baz", "qux", "fiz", "buz", "qux", "fiz", "buz"); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "recoverBatch.dlt"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "recoverBatch-dlt"); DefaultKafkaConsumerFactory dltcf = new DefaultKafkaConsumerFactory<>(props); Consumer consumer = dltcf.createConsumer(); embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic1DLT); @@ -215,7 +215,7 @@ public void accept(ConsumerRecord record, Consumer consumer, Excepti "baz", "qux", "fiz", "buz", "qux", "fiz", "buz"); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "recoverBatch2.dlt"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "recoverBatch2-dlt"); DefaultKafkaConsumerFactory dltcf = new DefaultKafkaConsumerFactory<>(props); Consumer consumer = dltcf.createConsumer(); embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic2DLT); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchTests.java index c02763451b..00c7c62e21 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerBatchTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -67,6 +55,18 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * {@link DefaultErrorHandler} tests for batch listeners. * diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchAckTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchAckTests.java index f66710e395..a1514bdc1c 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchAckTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchAckTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -61,6 +53,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchListenerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchListenerTests.java index 0af228ae86..c9b5448ac1 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchListenerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksBatchListenerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,15 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -64,6 +55,15 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @author Wang Zhiyang diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumeContainerPausedTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumeContainerPausedTests.java index e3df826388..3c322315df 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumeContainerPausedTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumeContainerPausedTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -64,6 +53,17 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumePartitionTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumePartitionTests.java index e57dd10644..3ed9100dc9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumePartitionTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckNoResumePartitionTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -66,6 +55,17 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckTests.java index 6c4fd1d3f5..0dc769b96e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerNoSeeksRecordAckTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -67,6 +56,17 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerRecordTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerRecordTests.java index 69fc54ebcf..84804d43c5 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerRecordTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerRecordTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,19 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -45,16 +32,29 @@ import org.junit.jupiter.api.Test; import org.mockito.InOrder; -import org.springframework.kafka.KafkaException; import org.springframework.kafka.support.converter.ConversionException; import org.springframework.kafka.support.serializer.DeserializationException; import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * {@link DefaultErrorHandler} tests for record listeners. * * @author Gary Russell + * @author Soby Chacko * @since 2.8 * */ @@ -155,7 +155,7 @@ public void recoveryFailed(ConsumerRecord record, Exception original, Exce List> records = Arrays.asList(record1, record2); IllegalStateException illegalState = new IllegalStateException(); Consumer consumer = mock(Consumer.class); - assertThatExceptionOfType(KafkaException.class).isThrownBy(() -> handler.handleRemaining(illegalState, records, + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy(() -> handler.handleRemaining(illegalState, records, consumer, mock(MessageListenerContainer.class))) .withCause(illegalState); handler.handleRemaining(new DeserializationException("intended", null, false, illegalState), records, @@ -214,7 +214,7 @@ void testEarlyExitBackOff() { MessageListenerContainer container = mock(MessageListenerContainer.class); given(container.isRunning()).willReturn(false); long t1 = System.currentTimeMillis(); - assertThatExceptionOfType(KafkaException.class).isThrownBy(() -> handler.handleRemaining(illegalState, + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy(() -> handler.handleRemaining(illegalState, records, consumer, container)); assertThat(System.currentTimeMillis() < t1 + 5_000); } @@ -230,7 +230,7 @@ void testNoEarlyExitBackOff() { MessageListenerContainer container = mock(MessageListenerContainer.class); given(container.isRunning()).willReturn(true); long t1 = System.currentTimeMillis(); - assertThatExceptionOfType(KafkaException.class).isThrownBy(() -> handler.handleRemaining(illegalState, + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy(() -> handler.handleRemaining(illegalState, records, consumer, container)); assertThat(System.currentTimeMillis() >= t1 + 200); } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchAckTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchAckTests.java index 84bf2c1cb6..4436e12f3b 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchAckTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchAckTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -68,6 +56,18 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchListenerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchListenerTests.java index 69f8624fbf..55374e805d 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchListenerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionBatchListenerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -69,6 +58,17 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionTests.java index 26db166fe8..49afaecda7 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DefaultErrorHandlerSeekAfterCommitExceptionTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -69,6 +57,18 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListenerIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListenerIntegrationTests.java new file mode 100644 index 0000000000..58c91cb749 --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListenerIntegrationTests.java @@ -0,0 +1,269 @@ +/* + * Copyright 2019-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.listener; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.header.Header; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import org.springframework.util.backoff.FixedBackOff; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +/** + * @author Sanghyeok An + * @since 3.3.0 + */ + +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka +class DeliveryAttemptAwareRetryListenerIntegrationTests { + + static final String MAIN_TOPIC_CONTAINER_FACTORY0 = "deliveryMyTestKafkaListenerContainerFactory0"; + + static final String TEST_TOPIC0 = "myBatchDeliveryAttemptTopic0"; + + static final int MAX_ATTEMPT_COUNT0 = 3; + + static final CountDownLatch latch0 = new CountDownLatch(MAX_ATTEMPT_COUNT0 + 1); + + static final String MAIN_TOPIC_CONTAINER_FACTORY1 = "deliveryMyTestKafkaListenerContainerFactory1"; + + static final String TEST_TOPIC1 = "myBatchDeliveryAttemptTopic1"; + + static final int MAX_ATTEMPT_COUNT1 = 10; + + static final CountDownLatch latch1 = new CountDownLatch(MAX_ATTEMPT_COUNT1 + 1); + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Test + void should_have_delivery_attempt_header_in_each_consumer_record(@Autowired TestTopicListener0 listener) { + + // Given + String msg1 = "1"; + String msg2 = "2"; + String msg3 = "3"; + + // When + kafkaTemplate.send(TEST_TOPIC0, msg1); + kafkaTemplate.send(TEST_TOPIC0, msg2); + kafkaTemplate.send(TEST_TOPIC0, msg3); + + // Then + assertThat(awaitLatch(latch0)).isTrue(); + + Map deliveryAttemptCountMap = convertToMap(listener.receivedHeaders); + + for (int attemptCnt = 1; attemptCnt <= MAX_ATTEMPT_COUNT0; attemptCnt++) { + assertThat(deliveryAttemptCountMap.get(attemptCnt)).isGreaterThan(0); + } + } + + @Test + void should_have_delivery_attempt_header_in_each_consumer_record_with_more_bigger_max_attempt(@Autowired TestTopicListener1 listener) { + // Given + String msg1 = "1"; + String msg2 = "2"; + String msg3 = "3"; + + // When + kafkaTemplate.send(TEST_TOPIC1, msg1); + kafkaTemplate.send(TEST_TOPIC1, msg2); + kafkaTemplate.send(TEST_TOPIC1, msg3); + + // Then + assertThat(awaitLatch(latch1)).isTrue(); + + Map deliveryAttemptCountMap = convertToMap(listener.receivedHeaders); + + for (int attemptCnt = 1; attemptCnt <= MAX_ATTEMPT_COUNT1; attemptCnt++) { + assertThat(deliveryAttemptCountMap.get(attemptCnt)).isGreaterThan(0); + } + } + + private Map convertToMap(List
headers) { + Map map = new HashMap<>(); + for (Header header : headers) { + int attemptCount = ByteBuffer.wrap(header.value()).getInt(); + Integer cnt = map.getOrDefault(attemptCount, 0); + map.put(attemptCount, cnt + 1); + } + return map; + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(60, TimeUnit.SECONDS); + } + catch (Exception e) { + fail(e.getMessage()); + throw new RuntimeException(e); + } + } + + private static CommonErrorHandler createErrorHandler(int interval, int maxAttemptCount) { + FixedBackOff fixedBackOff = new FixedBackOff(interval, maxAttemptCount); + DefaultErrorHandler errorHandler = new DefaultErrorHandler(fixedBackOff); + errorHandler.setRetryListeners(new DeliveryAttemptAwareRetryListener()); + return errorHandler; + } + + private static ConcurrentKafkaListenerContainerFactory createListenerContainerFactory( + ConsumerFactory consumerFactory, CommonErrorHandler errorHandler) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setCommonErrorHandler(errorHandler); + + ContainerProperties containerProperties = factory.getContainerProperties(); + containerProperties.setDeliveryAttemptHeader(true); + return factory; + } + + static class TestTopicListener0 { + final List
receivedHeaders = new ArrayList<>(); + + @KafkaListener( + topics = TEST_TOPIC0, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY0, + batch = "true") + public void listen(List> records) { + for (ConsumerRecord record : records) { + Iterable
headers = record.headers().headers(KafkaHeaders.DELIVERY_ATTEMPT); + for (Header header : headers) { + receivedHeaders.add(header); + } + } + latch0.countDown(); + throw new RuntimeException("Failed."); + } + } + + static class TestTopicListener1 { + final List
receivedHeaders = new ArrayList<>(); + + @KafkaListener( + topics = TEST_TOPIC1, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY1, + batch = "true") + public void listen(List> records) { + for (ConsumerRecord record : records) { + Iterable
headers = record.headers().headers(KafkaHeaders.DELIVERY_ATTEMPT); + for (Header header : headers) { + receivedHeaders.add(header); + } + } + latch1.countDown(); + throw new RuntimeException("Failed."); + } + } + + @Configuration + static class TestConfiguration { + + @Bean + TestTopicListener0 testTopicListener0() { + return new TestTopicListener0(); + } + + @Bean + TestTopicListener1 testTopicListener1() { + return new TestTopicListener1(); + } + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map props = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + return new DefaultKafkaProducerFactory<>(props); + } + + @Bean("customKafkaTemplate") + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), + "DeliveryAttemptAwareRetryListenerIntegrationTestsGroupId", + "true"); + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory + deliveryMyTestKafkaListenerContainerFactory0(ConsumerFactory consumerFactory) { + CommonErrorHandler errorHandler = createErrorHandler(1, MAX_ATTEMPT_COUNT0); + return createListenerContainerFactory(consumerFactory, errorHandler); + } + + @Bean + ConcurrentKafkaListenerContainerFactory + deliveryMyTestKafkaListenerContainerFactory1(ConsumerFactory consumerFactory) { + CommonErrorHandler errorHandler = createErrorHandler(1, MAX_ATTEMPT_COUNT1); + return createListenerContainerFactory(consumerFactory, errorHandler); + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListenerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListenerTests.java new file mode 100644 index 0000000000..184c159e7c --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/DeliveryAttemptAwareRetryListenerTests.java @@ -0,0 +1,139 @@ +/* + * Copyright 2019-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.listener; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.header.Header; +import org.junit.jupiter.api.Test; + +import org.springframework.kafka.support.KafkaHeaders; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * @author Sanghyeok An + * @since 3.3 + */ + +class DeliveryAttemptAwareRetryListenerTests { + + @Test + void should_have_single_header_and_header_value_should_be_1() { + // Given + TopicPartition tpForTopicA = new TopicPartition("topicA", 1); + TopicPartition tpForTopicB = new TopicPartition("topicB", 1); + + ConsumerRecord record1 = new ConsumerRecord<>("topicA", 1, 1, "key", "value1"); + ConsumerRecord record2 = new ConsumerRecord<>("topicA", 1, 2, "key", "value2"); + ConsumerRecord record3 = new ConsumerRecord<>("topicA", 1, 3, "key", "value3"); + + ConsumerRecord record4 = new ConsumerRecord<>("topicB", 1, 1, "key", "value4"); + ConsumerRecord record5 = new ConsumerRecord<>("topicB", 1, 2, "key", "value5"); + ConsumerRecord record6 = new ConsumerRecord<>("topicB", 1, 3, "key", "value6"); + + Map>> map = new HashMap<>(); + + List> topicARecords = List.of(record1, record2, record3); + List> topicBRecords = List.of(record4, record5, record6); + + map.put(tpForTopicA, topicARecords); + map.put(tpForTopicB, topicBRecords); + + ConsumerRecords consumerRecords = new ConsumerRecords<>(map); + final DeliveryAttemptAwareRetryListener listener = new DeliveryAttemptAwareRetryListener(); + Exception ex = new RuntimeException("Dummy Exception"); + + // Given : Expected Value + int expectedDeliveryAttemptInHeader = 1; + + // When + listener.failedDelivery(consumerRecords, ex, 1); + + // Then + for (ConsumerRecord consumerRecord : consumerRecords) { + int deliveryAttemptHeaderCount = 0; + Iterable
headers = consumerRecord.headers().headers(KafkaHeaders.DELIVERY_ATTEMPT); + + for (Header header : headers) { + int deliveryAttempt = ByteBuffer.wrap(header.value()).getInt(); + deliveryAttemptHeaderCount++; + + // Assertion + assertThat(deliveryAttempt).isEqualTo(expectedDeliveryAttemptInHeader); + assertThat(deliveryAttemptHeaderCount).isEqualTo(1); + } + } + } + + @Test + void should_have_single_header_and_header_value_should_be_4() { + // Given + TopicPartition tpForTopicA = new TopicPartition("topicA", 1); + TopicPartition tpForTopicB = new TopicPartition("topicB", 1); + + ConsumerRecord record1 = new ConsumerRecord<>("topicA", 1, 1, "key", "value1"); + ConsumerRecord record2 = new ConsumerRecord<>("topicA", 1, 2, "key", "value2"); + ConsumerRecord record3 = new ConsumerRecord<>("topicA", 1, 3, "key", "value3"); + + ConsumerRecord record4 = new ConsumerRecord<>("topicB", 1, 1, "key", "value4"); + ConsumerRecord record5 = new ConsumerRecord<>("topicB", 1, 2, "key", "value5"); + ConsumerRecord record6 = new ConsumerRecord<>("topicB", 1, 3, "key", "value6"); + + Map>> map = new HashMap<>(); + + List> topicARecords = List.of(record1, record2, record3); + List> topicBRecords = List.of(record4, record5, record6); + + map.put(tpForTopicA, topicARecords); + map.put(tpForTopicB, topicBRecords); + + ConsumerRecords consumerRecords = new ConsumerRecords<>(map); + final DeliveryAttemptAwareRetryListener listener = new DeliveryAttemptAwareRetryListener(); + Exception ex = new RuntimeException("Dummy Exception"); + + // Given : Expected Value + int expectedDeliveryAttemptInHeader = 4; + + // When + for (int deliveryAttempt = 1; deliveryAttempt < 5; deliveryAttempt++) { + listener.failedDelivery(consumerRecords, ex, deliveryAttempt); + } + + // Then + for (ConsumerRecord consumerRecord : consumerRecords) { + int deliveryAttemptHeaderCount = 0; + Iterable
headers = consumerRecord.headers().headers(KafkaHeaders.DELIVERY_ATTEMPT); + for (Header header : headers) { + int deliveryAttempt = ByteBuffer.wrap(header.value()).getInt(); + deliveryAttemptHeaderCount++; + + // Assertion + assertThat(deliveryAttempt).isEqualTo(expectedDeliveryAttemptInHeader); + assertThat(deliveryAttemptHeaderCount).isEqualTo(1); + } + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingCoverageTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingCoverageTests.java index b0a916bbb0..306746f88b 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingCoverageTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingCoverageTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Map; import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.9.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingDeserializerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingDeserializerTests.java index 876c76c94a..a30b8519b4 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingDeserializerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingDeserializerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.io.ByteArrayInputStream; import java.io.ObjectInputStream; import java.util.Map; @@ -60,6 +58,8 @@ import org.springframework.validation.Errors; import org.springframework.validation.Validator; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.2 @@ -130,6 +130,7 @@ public String deserialize(String topic, Headers headers, byte[] data) { } } + ErrorHandlingDeserializer ehd = new ErrorHandlingDeserializer<>(new MyDes()); Headers headers = new RecordHeaders(); ehd.deserialize("foo", headers, new byte[1]); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingUtilsTest.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingUtilsTest.java index c0e576b1c4..96382450a3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingUtilsTest.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ErrorHandlingUtilsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,15 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoInteractions; - import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -45,6 +36,15 @@ import org.springframework.util.backoff.BackOff; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; + /** * @author Antonio Tomac * @since 3.0.9 @@ -53,15 +53,24 @@ class ErrorHandlingUtilsTest { private final Exception thrownException = new RuntimeException("initial cause"); + private final Consumer consumer = mock(Consumer.class); + private final MessageListenerContainer container = mock(MessageListenerContainer.class); + private final Runnable listener = mock(Runnable.class); + private final BackOff backOff = new FixedBackOff(1000, 3); + private final CommonErrorHandler seeker = mock(CommonErrorHandler.class); + @SuppressWarnings("unchecked") private final BiConsumer, Exception> recoverer = mock(BiConsumer.class); + private final LogAccessor logger = new LogAccessor(LogFactory.getLog(ErrorHandlingUtilsTest.class)); + private final List retryListeners = new ArrayList<>(); + private final BinaryExceptionClassifier classifier = BinaryExceptionClassifier.defaultClassifier(); private final ConsumerRecords consumerRecords = recordsOf( diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ExceptionClassifierTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ExceptionClassifierTests.java index 0ef5688e50..d5165d77f1 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ExceptionClassifierTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ExceptionClassifierTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.8.4 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedBatchProcessorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedBatchProcessorTests.java index 5cd79076b9..e9d8a6c4a8 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedBatchProcessorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedBatchProcessorTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - import java.util.List; import java.util.Map; import java.util.function.BiConsumer; @@ -34,6 +26,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RebalanceInProgressException; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; @@ -42,8 +35,21 @@ import org.springframework.util.backoff.BackOff; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell + * @author Francois Rosiere + * @author Soby Chacko * @since 3.0.3 * */ @@ -52,15 +58,6 @@ public class FailedBatchProcessorTests { @SuppressWarnings({ "rawtypes", "unchecked" }) @Test void indexOutOfBounds() { - class TestFBP extends FailedBatchProcessor { - - TestFBP(BiConsumer, Exception> recoverer, BackOff backOff, - CommonErrorHandler fallbackHandler) { - - super(recoverer, backOff, fallbackHandler); - } - - } CommonErrorHandler mockEH = mock(CommonErrorHandler.class); willThrow(new IllegalStateException("fallback")).given(mockEH).handleBatch(any(), any(), any(), any(), any()); @@ -83,15 +80,6 @@ records, mock(Consumer.class), mock(MessageListenerContainer.class), mock(Runnab @SuppressWarnings({ "rawtypes", "unchecked" }) @Test void recordNotPresent() { - class TestFBP extends FailedBatchProcessor { - - TestFBP(BiConsumer, Exception> recoverer, BackOff backOff, - CommonErrorHandler fallbackHandler) { - - super(recoverer, backOff, fallbackHandler); - } - - } CommonErrorHandler mockEH = mock(CommonErrorHandler.class); willThrow(new IllegalStateException("fallback")).given(mockEH).handleBatch(any(), any(), any(), any(), any()); @@ -114,4 +102,35 @@ records, mock(Consumer.class), mock(MessageListenerContainer.class), mock(Runnab assertThat(output).contains("Record not found in batch: topic-42@123;"); } + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Test + void testExceptionDuringCommit() { + CommonErrorHandler mockEH = mock(CommonErrorHandler.class); + willThrow(new IllegalStateException("ise")).given(mockEH).handleBatch(any(), any(), any(), any(), any()); + + ConsumerRecord rec1 = new ConsumerRecord("topic", 0, 0L, null, null); + ConsumerRecord rec2 = new ConsumerRecord("topic", 0, 1L, null, null); + ConsumerRecord rec3 = new ConsumerRecord("topic", 0, 2L, null, null); + + ConsumerRecords records = new ConsumerRecords(Map.of(new TopicPartition("topic", 0), List.of(rec1, rec2, rec3))); + TestFBP testFBP = new TestFBP((rec, ex) -> { }, new FixedBackOff(2L, 2L), mockEH); + final Consumer consumer = mock(Consumer.class); + willThrow(new RebalanceInProgressException("rebalance in progress")).given(consumer).commitSync(anyMap(), any()); + final MessageListenerContainer mockMLC = mock(MessageListenerContainer.class); + willReturn(new ContainerProperties("topic")).given(mockMLC).getContainerProperties(); + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy(() -> + testFBP.handle(new BatchListenerFailedException("topic", rec2), + records, consumer, mockMLC, mock(Runnable.class)) + ).withMessage("Record in retry and not yet recovered"); + } + + static class TestFBP extends FailedBatchProcessor { + + TestFBP(BiConsumer, Exception> recoverer, BackOff backOff, + CommonErrorHandler fallbackHandler) { + + super(recoverer, backOff, fallbackHandler); + } + + } } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordProcessorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordProcessorTests.java index 8d72fa9d8d..59585a087f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordProcessorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordProcessorTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Collections; import java.util.List; @@ -27,6 +25,8 @@ import org.springframework.kafka.support.TopicPartitionOffset; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.3.6 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordTrackerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordTrackerTests.java index c50db09044..700c450c5d 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordTrackerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/FailedRecordTrackerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; - import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -38,6 +34,10 @@ import org.springframework.util.backoff.BackOffExecution; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.2.5 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerIntegrationTests.java index 257cacfc06..eb9899945b 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -49,6 +47,8 @@ import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.3.7 @@ -63,11 +63,11 @@ public class FallbackBatchErrorHandlerIntegrationTests { public static final String topic1 = "retryTopic1"; - public static final String topic1DLT = "retryTopic1.DLT"; + public static final String topic1DLT = "retryTopic1-dlt"; public static final String topic2 = "retryTopic2"; - public static final String topic2DLT = "retryTopic2.DLT"; + public static final String topic2DLT = "retryTopic2-dlt"; private static EmbeddedKafkaBroker embeddedKafka; @@ -141,7 +141,7 @@ public void publishEvent(Object event) { assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(failedGroupId.get()).isEqualTo("retryBatch"); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "retryBatch.dlt"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "retryBatch-dlt"); DefaultKafkaConsumerFactory dltcf = new DefaultKafkaConsumerFactory<>(props); Consumer consumer = dltcf.createConsumer(); embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic1DLT); @@ -219,7 +219,7 @@ public void accept(ConsumerRecord record, Exception exception) { assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(failedGroupId.get()).isEqualTo("retryBatch2"); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "retryBatch2.dlt"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "retryBatch2-dlt"); DefaultKafkaConsumerFactory dltcf = new DefaultKafkaConsumerFactory<>(props); Consumer consumer = dltcf.createConsumer(); embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic2DLT); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerTests.java index 40a9f535d5..53288d6325 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/FallbackBatchErrorHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2023 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,21 +16,6 @@ package org.springframework.kafka.listener; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; - import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Collections; @@ -52,6 +37,20 @@ import org.springframework.util.ReflectionUtils; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + /** * @author Gary Russell * @since 2.3.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/HeaderMethodArgumentResolverTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/HeaderMethodArgumentResolverTests.java index 88423b6b84..4c85c267db 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/HeaderMethodArgumentResolverTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/HeaderMethodArgumentResolverTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.nio.ByteBuffer; import java.util.Map; @@ -29,6 +27,8 @@ import org.springframework.messaging.handler.invocation.InvocableHandlerMethod; import org.springframework.messaging.support.GenericMessage; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.8 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaListenerEndpointRegistryLifecycleTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaListenerEndpointRegistryLifecycleTests.java index 8401db4a06..0ea1ac330d 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaListenerEndpointRegistryLifecycleTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaListenerEndpointRegistryLifecycleTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2019 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.RETURNS_DEEP_STUBS; -import static org.mockito.Mockito.mock; - import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -38,6 +34,10 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.RETURNS_DEEP_STUBS; +import static org.mockito.Mockito.mock; + /** * @author Asi Bross * diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaMessageListenerContainerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaMessageListenerContainerTests.java index 7909189e78..6d961b286b 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaMessageListenerContainerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaMessageListenerContainerTests.java @@ -16,23 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -125,12 +108,29 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.ContainerTestUtils; import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.kafka.transaction.KafkaAwareTransactionManager; import org.springframework.lang.NonNull; import org.springframework.lang.Nullable; import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; -import org.springframework.transaction.PlatformTransactionManager; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * Tests for the listener container. * @@ -143,6 +143,9 @@ * @author Daniel Gentes * @author Soby Chacko * @author Wang Zhiyang + * @author Mikael Carlstedt + * @author Borahm Lee + * @author Sanghyeok An */ @EmbeddedKafka(topics = { KafkaMessageListenerContainerTests.topic1, KafkaMessageListenerContainerTests.topic2, KafkaMessageListenerContainerTests.topic3, KafkaMessageListenerContainerTests.topic4, @@ -813,7 +816,7 @@ else if (polled.get() == 2) { latch1.countDown(); latch2.countDown(); acks.add(ack); - if (latch1.getCount() == 0 && records1.values().size() > 0 + if (latch1.getCount() == 0 && !records1.isEmpty() && records1.values().iterator().next().size() == 4) { acks.get(3).acknowledge(); acks.get(2).acknowledge(); @@ -1448,7 +1451,6 @@ public void onMessage(List> data) { throw new IllegalStateException(); } - }); final CountDownLatch commitLatch = new CountDownLatch(1); @@ -1520,6 +1522,7 @@ public void onIdleContainer(Map assignments, ConsumerSeekC } } + Listener messageListener = new Listener(); containerProps.setMessageListener(messageListener); containerProps.setSyncCommits(true); @@ -1599,6 +1602,7 @@ public void onIdleContainer(Map assignments, ConsumerSeekC } } + Listener messageListener = new Listener(); containerProps.setMessageListener(messageListener); containerProps.setSyncCommits(true); @@ -2593,20 +2597,23 @@ public void onPartitionsAssigned(Map assignments, Consumer public void onMessage(ConsumerRecord data) { if (data.partition() == 0 && data.offset() == 0) { TopicPartition topicPartition = new TopicPartition(data.topic(), data.partition()); - final ConsumerSeekCallback seekCallbackFor = getSeekCallbackFor(topicPartition); - assertThat(seekCallbackFor).isNotNull(); - seekCallbackFor.seekToBeginning(records.keySet()); - Iterator iterator = records.keySet().iterator(); - seekCallbackFor.seekToBeginning(Collections.singletonList(iterator.next())); - seekCallbackFor.seekToBeginning(Collections.singletonList(iterator.next())); - seekCallbackFor.seekToEnd(records.keySet()); - iterator = records.keySet().iterator(); - seekCallbackFor.seekToEnd(Collections.singletonList(iterator.next())); - seekCallbackFor.seekToEnd(Collections.singletonList(iterator.next())); + final List seekCallbacksFor = getSeekCallbacksFor(topicPartition); + assertThat(seekCallbacksFor).isNotEmpty(); + seekCallbacksFor.forEach(callback -> { + callback.seekToBeginning(records.keySet()); + Iterator iterator = records.keySet().iterator(); + callback.seekToBeginning(Collections.singletonList(iterator.next())); + callback.seekToBeginning(Collections.singletonList(iterator.next())); + callback.seekToEnd(records.keySet()); + iterator = records.keySet().iterator(); + callback.seekToEnd(Collections.singletonList(iterator.next())); + callback.seekToEnd(Collections.singletonList(iterator.next())); + }); } } } + Listener messageListener = new Listener(); containerProps.setMessageListener(messageListener); containerProps.setMissingTopicsFatal(false); @@ -2769,7 +2776,6 @@ public void rePausePartitionAfterRebalance() throws Exception { rebal.get().onPartitionsAssigned(Set.of(tp0, tp1)); return null; }).given(consumer).subscribe(eq(foos), any(ConsumerRebalanceListener.class)); - final CountDownLatch resumeLatch = new CountDownLatch(1); ContainerProperties containerProps = new ContainerProperties("foo"); containerProps.setGroupId("grp"); containerProps.setAckMode(AckMode.RECORD); @@ -2780,7 +2786,6 @@ public void rePausePartitionAfterRebalance() throws Exception { KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.start(); - InOrder inOrder = inOrder(consumer); assertThat(firstPoll.await(10, TimeUnit.SECONDS)).isNotNull(); container.pausePartition(tp0); container.pausePartition(tp1); @@ -2811,7 +2816,6 @@ public void resumePartitionAfterRevokeAndReAssign() throws Exception { ConsumerFactory cf = mock(ConsumerFactory.class); Consumer consumer = mock(Consumer.class); given(cf.createConsumer(eq("grp"), eq("clientId"), isNull(), any())).willReturn(consumer); - AtomicBoolean first = new AtomicBoolean(true); TopicPartition tp0 = new TopicPartition("foo", 0); TopicPartition tp1 = new TopicPartition("foo", 1); given(consumer.assignment()).willReturn(Set.of(tp0, tp1)); @@ -2938,6 +2942,7 @@ public void onPartitionsAssigned(Map assignments, Consumer } } + containerProps.setMessageListener(new Listener()); containerProps.setMissingTopicsFatal(false); KafkaMessageListenerContainer container = @@ -3462,7 +3467,6 @@ public void testCooperativeRebalance() throws Exception { containerProps.setGroupId("grp"); containerProps.setClientId("clientId"); containerProps.setMessageListener((MessageListener) msg -> { }); - Properties consumerProps = new Properties(); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.start(); @@ -3473,25 +3477,33 @@ public void testCooperativeRebalance() throws Exception { @Test void testCommitRebalanceInProgressBatch() throws Exception { - testCommitRebalanceInProgressGuts(AckMode.BATCH, 2, commits -> { - assertThat(commits).hasSize(3); + testCommitRebalanceInProgressGuts(AckMode.BATCH, 3, commits -> { + assertThat(commits).hasSize(5); assertThat(commits.get(0)).hasSize(2); // assignment - assertThat(commits.get(1)).hasSize(2); // batch commit - assertThat(commits.get(2)).hasSize(2); // GH-2489: offsets for both partition should be re-committed before partition 1 is revoked + assertThat(commits.get(1)).hasSize(2); // batch commit which should fail due to rebalance in progress + assertThat(commits.get(2)).hasSize(2); // commit retry which should fail due to rebalance in progress + assertThat(commits.get(3)).hasSize(1); // GH-3186: additional batch commit with only one partition which should be successful + assertThat(commits.get(4)).hasSize(1); // GH-2489: offsets for both uncommitted partition should be re-committed before partition 0 is revoked + assertThat(commits.get(4).get(new TopicPartition("foo", 0))) + .isNotNull() + .extracting(OffsetAndMetadata::offset) + .isEqualTo(2L); }); } @Test void testCommitRebalanceInProgressRecord() throws Exception { - testCommitRebalanceInProgressGuts(AckMode.RECORD, 5, commits -> { - assertThat(commits).hasSize(6); + testCommitRebalanceInProgressGuts(AckMode.RECORD, 6, commits -> { + assertThat(commits).hasSize(8); assertThat(commits.get(0)).hasSize(2); // assignment - assertThat(commits.get(1)).hasSize(1); // 4 individual commits + assertThat(commits.get(1)).hasSize(1); // 4 individual commits which should fail due to rebalance in progress assertThat(commits.get(2)).hasSize(1); assertThat(commits.get(3)).hasSize(1); assertThat(commits.get(4)).hasSize(1); - assertThat(commits.get(5)).hasSize(2); // GH-2489: offsets for both partition should be re-committed before partition 1 is revoked - assertThat(commits.get(5).get(new TopicPartition("foo", 1))) + assertThat(commits.get(5)).hasSize(2); // commit retry which should fail due to rebalance in progress + assertThat(commits.get(6)).hasSize(1); // GH-3186: additional commit which should be successful + assertThat(commits.get(7)).hasSize(1); // GH-2489: offsets for both partition should be re-committed before partition 0 is revoked + assertThat(commits.get(7).get(new TopicPartition("foo", 0))) .isNotNull() .extracting(OffsetAndMetadata::offset) .isEqualTo(2L); @@ -3515,25 +3527,37 @@ private void testCommitRebalanceInProgressGuts(AckMode ackMode, int exceptions, records.put(new TopicPartition("foo", 1), Arrays.asList( new ConsumerRecord<>("foo", 1, 0L, 1, "foo"), new ConsumerRecord<>("foo", 1, 1L, 1, "bar"))); + final Map>> additionalRecords = Collections.singletonMap( + new TopicPartition("foo", 1), + Collections.singletonList(new ConsumerRecord<>("foo", 1, 2L, 1, "foo"))); ConsumerRecords consumerRecords = new ConsumerRecords<>(records); + ConsumerRecords additionalConsumerRecords = new ConsumerRecords<>(additionalRecords); ConsumerRecords emptyRecords = new ConsumerRecords<>(Collections.emptyMap()); - AtomicBoolean first = new AtomicBoolean(true); - AtomicInteger rebalance = new AtomicInteger(); + AtomicInteger pollIteration = new AtomicInteger(); AtomicReference rebal = new AtomicReference<>(); - CountDownLatch latch = new CountDownLatch(2); + CountDownLatch latch = new CountDownLatch(3); given(consumer.poll(any(Duration.class))).willAnswer(i -> { Thread.sleep(50); - int call = rebalance.getAndIncrement(); + int call = pollIteration.getAndIncrement(); + final ConsumerRecords result; if (call == 0) { rebal.get().onPartitionsRevoked(Collections.emptyList()); rebal.get().onPartitionsAssigned(records.keySet()); + result = consumerRecords; } else if (call == 1) { + result = additionalConsumerRecords; + } + else if (call == 2) { rebal.get().onPartitionsRevoked(Collections.singletonList(topicPartition0)); rebal.get().onPartitionsAssigned(Collections.emptyList()); + result = emptyRecords; + } + else { + result = emptyRecords; } latch.countDown(); - return first.getAndSet(false) ? consumerRecords : emptyRecords; + return result; }); willAnswer(invoc -> { rebal.set(invoc.getArgument(1)); @@ -3606,7 +3630,6 @@ else if (call == 1) { }).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class)); List> commits = new ArrayList<>(); AtomicBoolean firstCommit = new AtomicBoolean(true); - AtomicInteger commitCount = new AtomicInteger(); willAnswer(invoc -> { commits.add(invoc.getArgument(0, Map.class)); if (!firstCommit.getAndSet(false)) { @@ -3888,6 +3911,11 @@ public void testInvokeRecordInterceptorAllSkipped(AckMode ackMode, boolean early latch.countDown(); return null; }).given(consumer).commitSync(any(), any()); + CountDownLatch closeLatch = new CountDownLatch(1); + willAnswer(inv -> { + closeLatch.countDown(); + return null; + }).given(consumer).close(); TopicPartitionOffset[] topicPartition = new TopicPartitionOffset[] { new TopicPartitionOffset("foo", 0) }; @@ -3898,7 +3926,11 @@ public void testInvokeRecordInterceptorAllSkipped(AckMode ackMode, boolean early containerProps.setMessageListener((MessageListener) msg -> { }); containerProps.setClientId("clientId"); + if (early) { + containerProps.setKafkaAwareTransactionManager(mock(KafkaAwareTransactionManager.class)); + } + CountDownLatch afterRecordLatch = new CountDownLatch(2); RecordInterceptor recordInterceptor = spy(new RecordInterceptor() { @Override @@ -3909,6 +3941,10 @@ public ConsumerRecord intercept(ConsumerRecord return null; } + public void afterRecord(ConsumerRecord record, Consumer consumer) { + afterRecordLatch.countDown(); + } + }); KafkaMessageListenerContainer container = @@ -3917,12 +3953,15 @@ public ConsumerRecord intercept(ConsumerRecord container.setInterceptBeforeTx(early); container.start(); assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + assertThat(afterRecordLatch.await(10, TimeUnit.SECONDS)).isTrue(); + container.stop(); + assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue(); InOrder inOrder = inOrder(recordInterceptor, consumer); inOrder.verify(recordInterceptor).setupThreadState(eq(consumer)); inOrder.verify(consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT)); inOrder.verify(recordInterceptor).intercept(eq(firstRecord), eq(consumer)); - if (ackMode.equals(AckMode.RECORD)) { + if (AckMode.RECORD.equals(ackMode)) { inOrder.verify(consumer).commitSync(eq(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(1L))), any(Duration.class)); } @@ -3930,15 +3969,25 @@ public ConsumerRecord intercept(ConsumerRecord verify(consumer, never()).commitSync(eq(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(1L))), any(Duration.class)); } + inOrder.verify(recordInterceptor).success(eq(firstRecord), eq(consumer)); + inOrder.verify(recordInterceptor).afterRecord(eq(firstRecord), eq(consumer)); inOrder.verify(recordInterceptor).intercept(eq(secondRecord), eq(consumer)); - inOrder.verify(consumer).commitSync(eq(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(2L))), - any(Duration.class)); - container.stop(); + if (AckMode.RECORD.equals(ackMode)) { + inOrder.verify(consumer).commitSync(eq(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(2L))), + any(Duration.class)); + } + inOrder.verify(recordInterceptor).success(eq(secondRecord), eq(consumer)); + inOrder.verify(recordInterceptor).afterRecord(eq(secondRecord), eq(consumer)); + if (AckMode.BATCH.equals(ackMode)) { + inOrder.verify(consumer).commitSync(eq(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(2L))), + any(Duration.class)); + } + inOrder.verify(consumer).close(); } @ParameterizedTest(name = "{index} testInvokeBatchInterceptorAllSkipped early intercept {0}") @ValueSource(booleans = { true, false }) - @SuppressWarnings({"unchecked"}) + @SuppressWarnings("unchecked") public void testInvokeBatchInterceptorAllSkipped(boolean early) throws Exception { ConsumerFactory cf = mock(ConsumerFactory.class); Consumer consumer = mock(Consumer.class); @@ -3968,8 +4017,8 @@ public void testInvokeBatchInterceptorAllSkipped(boolean early) throws Exception containerProps.setMessageListener((BatchMessageListener) msgs -> { }); containerProps.setClientId("clientId"); - if (!early) { - containerProps.setTransactionManager(mock(PlatformTransactionManager.class)); + if (early) { + containerProps.setKafkaAwareTransactionManager(mock(KafkaAwareTransactionManager.class)); } BatchInterceptor interceptor = spy(new BatchInterceptor() { @@ -4215,6 +4264,80 @@ public void clearThreadState(Consumer consumer) { container.stop(); } + @Test + @SuppressWarnings("unchecked") + public void invokeBatchInterceptorSuccessFailureOnRetry() throws Exception { + ConsumerFactory cf = mock(ConsumerFactory.class); + Consumer consumer = mock(Consumer.class); + given(cf.createConsumer(eq("grp"), eq("clientId"), isNull(), any())).willReturn(consumer); + ConsumerRecord firstRecord = new ConsumerRecord<>("test-topic", 0, 0L, 1, "data-1"); + ConsumerRecord secondRecord = new ConsumerRecord<>("test-topic", 0, 1L, 1, "data-2"); + Map>> records = new HashMap<>(); + records.put(new TopicPartition("test-topic", 0), List.of(firstRecord, secondRecord)); + ConsumerRecords consumerRecords = new ConsumerRecords<>(records); + AtomicInteger invocation = new AtomicInteger(0); + given(consumer.poll(any(Duration.class))).willAnswer(i -> { + if (invocation.getAndIncrement() == 0) { + return consumerRecords; + } + else { + // Subsequent polls after the first one returns empty records. + return new ConsumerRecords(Map.of()); + } + }); + TopicPartitionOffset[] topicPartition = new TopicPartitionOffset[] { + new TopicPartitionOffset("test-topic", 0) }; + + CountDownLatch latch = new CountDownLatch(4); // 3 failures, 1 success + BatchMessageListener batchMessageListener = spy( + new BatchMessageListener() { // Cannot be lambda: Mockito doesn't mock final classes + + @Override + public void onMessage(List> data) { + latch.countDown(); + if (latch.getCount() > 0) { + throw new IllegalArgumentException("Failed record"); + } + } + + }); + + ContainerProperties containerProps = new ContainerProperties(topicPartition); + containerProps.setGroupId("grp"); + containerProps.setAckMode(ContainerProperties.AckMode.BATCH); + containerProps.setMissingTopicsFatal(false); + containerProps.setMessageListener(batchMessageListener); + containerProps.setClientId("clientId"); + + BatchInterceptor batchInterceptor = spy(new BatchInterceptor() { + + @Override + public ConsumerRecords intercept(ConsumerRecords records, + Consumer consumer) { + return records; + } + + }); + + KafkaMessageListenerContainer container = + new KafkaMessageListenerContainer<>(cf, containerProps); + container.setCommonErrorHandler(new DefaultErrorHandler(new FixedBackOff(0, 3))); + container.setBatchInterceptor(batchInterceptor); + container.start(); + assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + + InOrder inOrder = inOrder(batchInterceptor, batchMessageListener, consumer); + for (int i = 0; i < 3; i++) { + inOrder.verify(batchInterceptor).intercept(eq(consumerRecords), eq(consumer)); + inOrder.verify(batchMessageListener).onMessage(eq(List.of(firstRecord, secondRecord))); + inOrder.verify(batchInterceptor).failure(eq(consumerRecords), any(), eq(consumer)); + } + inOrder.verify(batchInterceptor).intercept(eq(consumerRecords), eq(consumer)); + inOrder.verify(batchMessageListener).onMessage(eq(List.of(firstRecord, secondRecord))); + inOrder.verify(batchInterceptor).success(eq(consumerRecords), eq(consumer)); + container.stop(); + } + @Test public void testOffsetAndMetadataWithoutProvider() throws InterruptedException { testOffsetAndMetadata(null, new OffsetAndMetadata(1)); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerContainerPauseServiceTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerContainerPauseServiceTests.java index 0aa653964a..d3dcf56b59 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerContainerPauseServiceTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerContainerPauseServiceTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.time.Instant; import java.util.concurrent.atomic.AtomicBoolean; @@ -39,6 +29,16 @@ import org.springframework.scheduling.TaskScheduler; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * Unit test for {@link ListenerContainerPauseService}. * diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerErrorHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerErrorHandlerTests.java index 53e2e66a82..b69f93374e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerErrorHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerErrorHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,11 +16,6 @@ package org.springframework.kafka.listener; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - import java.lang.reflect.Method; import java.util.Collections; import java.util.List; @@ -34,6 +29,11 @@ import org.springframework.kafka.listener.adapter.RecordMessagingMessageListenerAdapter; import org.springframework.kafka.support.Acknowledgment; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerUtilsTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerUtilsTests.java index ae31c9d5b1..1158763c07 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerUtilsTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ListenerUtilsTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,13 @@ package org.springframework.kafka.listener; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.junit.jupiter.api.Test; + import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; -import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.junit.jupiter.api.Test; - /** * @author Gary Russell * @author Francois Rosiere diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/LoggingErrorHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/LoggingErrorHandlerTests.java index 56ca58828d..81a2ba29de 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/LoggingErrorHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/LoggingErrorHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -60,6 +49,17 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAckPartialBatchTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAckPartialBatchTests.java index 10a3e4e979..91c9d5b8f3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAckPartialBatchTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAckPartialBatchTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -63,6 +55,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAssignmentInitialSeekTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAssignmentInitialSeekTests.java index add66d5f06..ebab607e3f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAssignmentInitialSeekTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualAssignmentInitialSeekTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2021 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.atLeastOnce; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.lang.reflect.Method; import java.time.Duration; import java.util.Arrays; @@ -56,6 +48,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.0.1 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTests.java index 725d154d4a..6d85836952 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -64,6 +56,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTxTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTxTests.java index e87854623a..7ed4b0d0a6 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTxTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackBatchTxTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -70,6 +60,16 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.3 @@ -279,7 +279,7 @@ public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { factory.setConsumerFactory(consumerFactory()); factory.getContainerProperties().setAckMode(AckMode.MANUAL); factory.getContainerProperties().setMissingTopicsFatal(false); - factory.getContainerProperties().setTransactionManager(tm()); + factory.getContainerProperties().setKafkaAwareTransactionManager(tm()); factory.setBatchListener(true); return factory; } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackPauseResumeTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackPauseResumeTests.java index 4df3e49156..fae1db1b77 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackPauseResumeTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackPauseResumeTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -65,6 +58,13 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.3 @@ -77,6 +77,7 @@ public class ManualNackPauseResumeTests { @SuppressWarnings("rawtypes") @Autowired private Consumer consumer; + @Autowired private Config config; diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordTests.java index a5427e6d95..05be09062c 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -65,6 +57,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.3 @@ -77,6 +77,7 @@ public class ManualNackRecordTests { @SuppressWarnings("rawtypes") @Autowired private Consumer consumer; + @Autowired private Config config; diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordZeroSleepTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordZeroSleepTests.java index 586c59897e..20454460cd 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordZeroSleepTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/ManualNackRecordZeroSleepTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -64,6 +56,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.8.5 @@ -76,6 +76,7 @@ public class ManualNackRecordZeroSleepTests { @SuppressWarnings("rawtypes") @Autowired private Consumer consumer; + @Autowired private Config config; diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingGroupIdTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingGroupIdTests.java index 4914fac6c4..0bc4b0eac9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingGroupIdTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingGroupIdTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; - import java.util.Collections; import org.apache.kafka.clients.consumer.ConsumerConfig; @@ -39,6 +37,8 @@ import org.springframework.kafka.test.condition.EmbeddedKafkaCondition; import org.springframework.kafka.test.context.EmbeddedKafka; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; + /** * @author Gary Russell * @since 2.1.5 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicCheckOverrideAdminConfigTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicCheckOverrideAdminConfigTests.java index d6bfef9a25..6ec62b5bf3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicCheckOverrideAdminConfigTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicCheckOverrideAdminConfigTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThatNoException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - import java.util.Map; import java.util.Properties; @@ -37,6 +30,13 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThatNoException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 3.0 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicsTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicsTests.java index 264cf30a82..a8832cc178 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicsTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/MissingTopicsTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - import java.util.Map; import org.junit.jupiter.api.BeforeAll; @@ -30,6 +27,9 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + /** * @author Gary Russell * @since 2.2 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/MockConsumerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/MockConsumerTests.java index ae5c79a465..cff9111046 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/MockConsumerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/MockConsumerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -47,6 +45,8 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 3.0.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerManualAssignmentTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerManualAssignmentTests.java index 380e96e45e..2f2f9b5eec 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerManualAssignmentTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerManualAssignmentTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -68,6 +57,17 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerWhileErrorHandlerIsRetryingTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerWhileErrorHandlerIsRetryingTests.java index 3d4a3bc9b6..b2e45c2f53 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerWhileErrorHandlerIsRetryingTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/PauseContainerWhileErrorHandlerIsRetryingTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023-2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.spy; - import java.time.Duration; import java.util.LinkedHashSet; import java.util.List; @@ -56,6 +50,12 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.spy; + /** * @author Antonio Tomac * @author Gary Russell @@ -153,7 +153,6 @@ void produce(int... records) { } } - @KafkaListener(id = "id", groupId = "grp", topics = "foo") public void process(List batch, Acknowledgment acknowledgment) { batch.forEach((msg) -> { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/RemainingRecordsErrorHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/RemainingRecordsErrorHandlerTests.java index a4d7e96ff8..c1a0bc9423 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/RemainingRecordsErrorHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/RemainingRecordsErrorHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -60,6 +52,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.1.12 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTXTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTXTests.java index cc33e586cd..78e70bbc33 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTXTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTXTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2021 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -67,6 +57,16 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.0.1 @@ -247,7 +247,7 @@ public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { factory.setConsumerFactory(consumerFactory()); factory.setCommonErrorHandler(new DefaultErrorHandler()); factory.getContainerProperties().setAckMode(AckMode.BATCH); - factory.getContainerProperties().setTransactionManager(tm()); + factory.getContainerProperties().setKafkaAwareTransactionManager(tm()); return factory; } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTests.java index 90962275a9..78108df7f6 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorBatchModeTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2021 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -61,6 +53,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.0.1 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTXTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTXTests.java index 96f4604200..82fd5a2ebc 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTXTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTXTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2021 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -68,6 +57,17 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.0.1 @@ -248,7 +248,7 @@ public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { factory.setConsumerFactory(consumerFactory()); factory.setCommonErrorHandler(new DefaultErrorHandler()); factory.getContainerProperties().setAckMode(AckMode.RECORD); - factory.getContainerProperties().setTransactionManager(tm()); + factory.getContainerProperties().setKafkaAwareTransactionManager(tm()); return factory; } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTests.java index b4ad2b3731..31bad8b2c3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentOnErrorRecordModeTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -63,6 +55,14 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.0.1 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentRecovererTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentRecovererTests.java index bb474f0031..2b4ef419f9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentRecovererTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SeekToCurrentRecovererTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2021 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,19 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; - import java.time.Duration; import java.util.ArrayList; import java.util.Collections; @@ -52,7 +39,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.springframework.kafka.KafkaException; import org.springframework.kafka.core.DefaultKafkaConsumerFactory; import org.springframework.kafka.core.DefaultKafkaProducerFactory; import org.springframework.kafka.core.KafkaOperations; @@ -68,8 +54,22 @@ import org.springframework.lang.Nullable; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + /** * @author Gary Russell + * @author Soby Chacko * @since 2.2 * */ @@ -153,7 +153,7 @@ public void accept(ConsumerRecord record, @Nullable Consumer consume assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(failedGroupId.get()).isEqualTo("seekTestMaxFailures"); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "seekTestMaxFailures.dlt"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "seekTestMaxFailures-dlt"); DefaultKafkaConsumerFactory dltcf = new DefaultKafkaConsumerFactory<>(props); Consumer consumer = dltcf.createConsumer(); embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic1DLT); @@ -180,7 +180,7 @@ public void seekToCurrentErrorHandlerRecovers() { records.add(new ConsumerRecord<>("foo", 0, 0, null, "foo")); records.add(new ConsumerRecord<>("foo", 0, 1, null, "bar")); Consumer consumer = mock(Consumer.class); - assertThatExceptionOfType(KafkaException.class).isThrownBy(() -> + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy(() -> eh.handleRemaining(new RuntimeException(), records, consumer, null)); verify(consumer).seek(new TopicPartition("foo", 0), 0L); verifyNoMoreInteractions(consumer); @@ -227,14 +227,14 @@ public void recoveryFailed(ConsumerRecord record, Exception original, Exce records.add(new ConsumerRecord<>("foo", 0, 0, null, "foo")); records.add(new ConsumerRecord<>("foo", 0, 1, null, "bar")); Consumer consumer = mock(Consumer.class); - assertThatExceptionOfType(KafkaException.class).isThrownBy( + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy( () -> eh.handleRemaining(new RuntimeException(), records, consumer, null)); verify(consumer).seek(new TopicPartition("foo", 0), 0L); verifyNoMoreInteractions(consumer); - assertThatExceptionOfType(KafkaException.class).isThrownBy( + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy( () -> eh.handleRemaining(new RuntimeException(), records, consumer, null)); verify(consumer, times(2)).seek(new TopicPartition("foo", 0), 0L); - assertThatExceptionOfType(KafkaException.class).isThrownBy( + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy( () -> eh.handleRemaining(new RuntimeException(), records, consumer, null)); verify(consumer, times(3)).seek(new TopicPartition("foo", 0), 0L); eh.handleRemaining(new RuntimeException(), records, consumer, null); @@ -267,11 +267,11 @@ public void seekToCurrentErrorHandlerRecovererFailsBackOffNotReset() { records.add(new ConsumerRecord<>("foo", 0, 0, null, "foo")); records.add(new ConsumerRecord<>("foo", 0, 1, null, "bar")); Consumer consumer = mock(Consumer.class); - assertThatExceptionOfType(KafkaException.class).isThrownBy( + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy( () -> eh.handleRemaining(new RuntimeException(), records, consumer, null)); verify(consumer).seek(new TopicPartition("foo", 0), 0L); verifyNoMoreInteractions(consumer); - assertThatExceptionOfType(KafkaException.class).isThrownBy( + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy( () -> eh.handleRemaining(new RuntimeException(), records, consumer, null)); verify(consumer, times(2)).seek(new TopicPartition("foo", 0), 0L); eh.handleRemaining(new RuntimeException(), records, consumer, null); // immediate re-attempt recovery @@ -308,7 +308,7 @@ private void seekToCurrentErrorHandlerRecoversManualAcks(boolean syncCommits) { OffsetCommitCallback commitCallback = (offsets, ex) -> { }; properties.setCommitCallback(commitCallback); given(container.getContainerProperties()).willReturn(properties); - assertThatExceptionOfType(KafkaException.class).isThrownBy(() -> + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy(() -> eh.handleRemaining(new RuntimeException(), records, consumer, container)); verify(consumer).seek(new TopicPartition("foo", 0), 0L); verify(consumer).seek(new TopicPartition("foo", 1), 0L); @@ -340,7 +340,7 @@ public void testNeverRecover() { records.add(new ConsumerRecord<>("foo", 0, 1, null, "bar")); Consumer consumer = mock(Consumer.class); for (int i = 0; i < 20; i++) { - assertThatExceptionOfType(KafkaException.class).isThrownBy(() -> + assertThatExceptionOfType(RecordInRetryException.class).isThrownBy(() -> eh.handleRemaining(new RuntimeException(), records, consumer, null)); } verify(consumer, times(20)).seek(new TopicPartition("foo", 0), 0L); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTests.java index fc0a781ff6..a08c8c44b8 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.atLeastOnce; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -62,10 +51,21 @@ import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.kafka.transaction.KafkaAwareTransactionManager; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.annotation.DirtiesContext.ClassMode; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import org.springframework.transaction.PlatformTransactionManager; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; /** * @author Gary Russell @@ -156,7 +156,7 @@ void defaults() { containerProps = new ContainerProperties("sbpp"); containerProps.setMessageListener(mock(MessageListener.class)); - containerProps.setTransactionManager(mock(PlatformTransactionManager.class)); + containerProps.setKafkaAwareTransactionManager(mock(KafkaAwareTransactionManager.class)); container = new KafkaMessageListenerContainer<>(cf, containerProps); container.start(); assertThat(KafkaTestUtils.getPropertyValue(container, "listenerConsumer.subBatchPerPartition")) diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxRollbackTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxRollbackTests.java index 1d3cd8dab7..851469c1ba 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxRollbackTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxRollbackTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -67,6 +57,16 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.3.2 @@ -220,7 +220,7 @@ public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory(); factory.setConsumerFactory(consumerFactory()); factory.getContainerProperties().setAckMode(AckMode.BATCH); - factory.getContainerProperties().setTransactionManager(tm()); + factory.getContainerProperties().setKafkaAwareTransactionManager(tm()); factory.setBatchListener(true); factory.getContainerProperties().setSubBatchPerPartition(true); return factory; diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxTests.java index 859bfc25be..41d224e3d1 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/SubBatchPerPartitionTxTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -67,6 +57,16 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.3.2 @@ -206,7 +206,7 @@ public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory(); factory.setConsumerFactory(consumerFactory()); factory.getContainerProperties().setAckMode(AckMode.BATCH); - factory.getContainerProperties().setTransactionManager(tm()); + factory.getContainerProperties().setKafkaAwareTransactionManager(tm()); factory.getContainerProperties().setSubBatchPerPartition(true); factory.setBatchListener(true); return factory; diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/TestOOMError.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/TestOOMError.java index 0b8c7627db..80c5bf5900 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/TestOOMError.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/TestOOMError.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2022 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.Arrays; import java.util.HashMap; @@ -42,6 +35,13 @@ import org.springframework.kafka.listener.ContainerProperties.AckMode; import org.springframework.kafka.support.TopicPartitionOffset; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.1.11 @@ -77,7 +77,8 @@ public void testOOMCMLC() throws Exception { containerProps.setClientId("clientId"); ConcurrentMessageListenerContainer container = new ConcurrentMessageListenerContainer<>(cf, containerProps); - CountDownLatch stopLatch = new CountDownLatch(1); + // concurrent container publishes one time, child container publishes concurrency time. + CountDownLatch stopLatch = new CountDownLatch(2); container.setApplicationEventPublisher(e -> { if (e instanceof ContainerStoppedEvent) { stopLatch.countDown(); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/TransactionalContainerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/TransactionalContainerTests.java index 8d2a13b364..352433fac5 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/TransactionalContainerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/TransactionalContainerTests.java @@ -16,26 +16,6 @@ package org.springframework.kafka.listener; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; - import java.time.Duration; import java.util.ArrayList; import java.util.Collection; @@ -95,16 +75,35 @@ import org.springframework.kafka.transaction.KafkaTransactionManager; import org.springframework.messaging.MessageHeaders; import org.springframework.transaction.TransactionDefinition; -import org.springframework.transaction.TransactionException; -import org.springframework.transaction.support.AbstractPlatformTransactionManager; import org.springframework.transaction.support.DefaultTransactionDefinition; -import org.springframework.transaction.support.DefaultTransactionStatus; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + /** * @author Gary Russell * @author Artem Bilan * @author Wang Zhiyang + * @author Soby Chacko + * @author Raphael Rösch * * @since 1.3 * @@ -112,7 +111,8 @@ @EmbeddedKafka(topics = { TransactionalContainerTests.topic1, TransactionalContainerTests.topic2, TransactionalContainerTests.topic3, TransactionalContainerTests.topic3DLT, TransactionalContainerTests.topic4, TransactionalContainerTests.topic5, TransactionalContainerTests.topic6, TransactionalContainerTests.topic7, - TransactionalContainerTests.topic8, TransactionalContainerTests.topic8DLT }, + TransactionalContainerTests.topic8, TransactionalContainerTests.topic8DLT, TransactionalContainerTests.topic9, + TransactionalContainerTests.topic10}, brokerProperties = { "transaction.state.log.replication.factor=1", "transaction.state.log.min.isr=1" }) public class TransactionalContainerTests { @@ -122,7 +122,7 @@ public class TransactionalContainerTests { public static final String topic3 = "txTopic3"; - public static final String topic3DLT = "txTopic3.DLT"; + public static final String topic3DLT = "txTopic3-dlt"; public static final String topic4 = "txTopic4"; @@ -134,10 +134,12 @@ public class TransactionalContainerTests { public static final String topic8 = "txTopic8"; - public static final String topic8DLT = "txTopic8.DLT"; + public static final String topic8DLT = "txTopic8-dlt"; public static final String topic9 = "txTopic9"; + public static final String topic10 = "txTopic10"; + private static EmbeddedKafkaBroker embeddedKafka; @BeforeAll @@ -221,7 +223,7 @@ private void testConsumeAndProduceTransactionGuts(boolean handleError, AckMode a ContainerProperties props = new ContainerProperties("foo"); props.setAckMode(ackMode); props.setGroupId("group"); - props.setTransactionManager(tm); + props.setKafkaAwareTransactionManager(tm); props.setAssignmentCommitOption(AssignmentCommitOption.ALWAYS); props.setEosMode(eosMode); props.setStopContainerWhenFenced(stopWhenFenced); @@ -329,7 +331,7 @@ public void testConsumeAndProduceTransactionRollback() throws Exception { ContainerProperties props = new ContainerProperties(new TopicPartitionOffset("foo", 0), new TopicPartitionOffset("foo", 1)); props.setGroupId("group"); - props.setTransactionManager(tm); + props.setKafkaAwareTransactionManager(tm); props.setDeliveryAttemptHeader(true); final KafkaTemplate template = new KafkaTemplate(pf); AtomicReference
delivery = new AtomicReference(); @@ -400,7 +402,7 @@ public void testConsumeAndProduceTransactionRollbackBatch() throws Exception { ContainerProperties props = new ContainerProperties(new TopicPartitionOffset("foo", 0), new TopicPartitionOffset("foo", 1)); props.setGroupId("group"); - props.setTransactionManager(tm); + props.setKafkaAwareTransactionManager(tm); props.setSubBatchPerPartition(false); final KafkaTemplate template = new KafkaTemplate(pf); props.setMessageListener((BatchMessageListener) recordlist -> { @@ -462,7 +464,7 @@ public void testConsumeAndProduceTransactionExternalTM() throws Exception { given(pf.createProducer(isNull())).willReturn(producer); ContainerProperties props = new ContainerProperties(new TopicPartitionOffset("foo", 0)); props.setGroupId("group"); - props.setTransactionManager(new SomeOtherTransactionManager()); + props.setKafkaAwareTransactionManager(new KafkaTransactionManager<>(pf)); final KafkaTemplate template = new KafkaTemplate(pf); ConsumerGroupMetadata meta = mock(ConsumerGroupMetadata.class); props.setMessageListener((MessageListener) m -> { @@ -529,7 +531,7 @@ public void testRollbackRecord() throws Exception { @SuppressWarnings({ "rawtypes" }) KafkaTransactionManager tm = new KafkaTransactionManager(pf); - containerProps.setTransactionManager(tm); + containerProps.setKafkaAwareTransactionManager(tm); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testRollbackRecord"); @@ -595,12 +597,7 @@ public void testFixLagKTM() throws InterruptedException { testFixLagGuts(topic6, 1); } - @Test - public void testFixLagOtherTM() throws InterruptedException { - testFixLagGuts(topic7, 2); - } - - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked"}) private void testFixLagGuts(String topic, int whichTm) throws InterruptedException { Map props = KafkaTestUtils.consumerProps("txTest2", "false", embeddedKafka); props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); @@ -617,10 +614,8 @@ private void testFixLagGuts(String topic, int whichTm) throws InterruptedExcepti case 0: break; case 1: - containerProps.setTransactionManager(new KafkaTransactionManager<>(pf)); + containerProps.setKafkaAwareTransactionManager(new KafkaTransactionManager<>(pf)); break; - case 2: - containerProps.setTransactionManager(new SomeOtherTransactionManager()); } final KafkaTemplate template = new KafkaTemplate<>(pf); @@ -656,7 +651,7 @@ private void testFixLagGuts(String topic, int whichTm) throws InterruptedExcepti pf.destroy(); } - @SuppressWarnings({ "unchecked"}) + @SuppressWarnings("unchecked") @Test public void testMaxFailures() throws Exception { String group = "groupInARBP"; @@ -682,7 +677,7 @@ public void testMaxFailures() throws Exception { @SuppressWarnings({ "rawtypes" }) KafkaTransactionManager tm = new KafkaTransactionManager(pf); - containerProps.setTransactionManager(tm); + containerProps.setKafkaAwareTransactionManager(tm); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testMaxFailures"); @@ -787,9 +782,8 @@ public void testBatchListenerMaxFailuresOnRecover() throws Exception { } }); - @SuppressWarnings({ "rawtypes" }) - KafkaTransactionManager tm = new KafkaTransactionManager(pf); - containerProps.setTransactionManager(tm); + KafkaTransactionManager tm = new KafkaTransactionManager<>(pf); + containerProps.setKafkaAwareTransactionManager(tm); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListenerMaxFailures"); @@ -908,7 +902,7 @@ public void testRollbackProcessorCrash() throws Exception { @SuppressWarnings({ "rawtypes" }) KafkaTransactionManager tm = new KafkaTransactionManager(pf); - containerProps.setTransactionManager(tm); + containerProps.setKafkaAwareTransactionManager(tm); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testRollbackNoRetries"); @@ -944,10 +938,10 @@ public void testRollbackProcessorCrash() throws Exception { assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue(); } - @SuppressWarnings("unchecked") @Test public void testBatchListenerRecoverAfterRollbackProcessorCrash() throws Exception { - Map props = KafkaTestUtils.consumerProps("testBatchListenerRollbackNoRetries", "false", embeddedKafka); + String group = "testBatchListenerRollbackNoRetries"; + Map props = KafkaTestUtils.consumerProps(group, "false", embeddedKafka); props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 2); DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props); @@ -960,24 +954,23 @@ public void testBatchListenerRecoverAfterRollbackProcessorCrash() throws Excepti pf.setTransactionIdPrefix("batchListener.noRetries."); final KafkaTemplate template = new KafkaTemplate<>(pf); final CountDownLatch latch = new CountDownLatch(1); - AtomicReference data = new AtomicReference<>(); + AtomicReference>> data = new AtomicReference<>(); containerProps.setMessageListener((BatchMessageListener) recordList -> { for (ConsumerRecord record : recordList) { - data.set(record.value()); if (record.offset() == 0) { throw new BatchListenerFailedException("fail for no retry", record); } - latch.countDown(); } + data.set(recordList); + latch.countDown(); }); - @SuppressWarnings({ "rawtypes" }) - KafkaTransactionManager tm = new KafkaTransactionManager(pf); - containerProps.setTransactionManager(tm); + KafkaTransactionManager tm = new KafkaTransactionManager<>(pf); + containerProps.setKafkaAwareTransactionManager(tm); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListenerRollbackNoRetries"); - final KafkaOperations dlTemplate = spy(new KafkaTemplate<>(pf)); + final KafkaOperations dlTemplate = new KafkaTemplate<>(pf); AtomicBoolean recovererShouldFail = new AtomicBoolean(true); DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(dlTemplate) { @Override @@ -989,7 +982,7 @@ public void accept(ConsumerRecord record, Consumer consumer, Excepti }; DefaultAfterRollbackProcessor afterRollbackProcessor = - spy(new DefaultAfterRollbackProcessor<>(recoverer, new FixedBackOff(0L, 0L), dlTemplate, true)); + new DefaultAfterRollbackProcessor<>(recoverer, new FixedBackOff(0L, 0L), dlTemplate, true); container.setAfterRollbackProcessor(afterRollbackProcessor); final CountDownLatch stopLatch = new CountDownLatch(1); container.setApplicationEventPublisher(e -> { @@ -1009,8 +1002,16 @@ public void accept(ConsumerRecord record, Consumer consumer, Excepti template.sendDefault(0, 0, "qux"); return null; }); + assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); - assertThat(data.get()).isEqualTo("qux"); + assertThat(data.get()).isNotNull(); + ConsumerRecord crBaz = data.get().get(0); + ConsumerRecord crQux = data.get().get(1); + assertThat(crBaz.offset()).isEqualTo(2L); + assertThat(crBaz.value()).isEqualTo("baz"); + assertThat(crQux.offset()).isEqualTo(3L); + assertThat(crQux.value()).isEqualTo("qux"); + container.stop(); pf.destroy(); assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue(); @@ -1049,7 +1050,7 @@ void testNoAfterRollbackWhenFenced() throws Exception { ContainerProperties props = new ContainerProperties(new TopicPartitionOffset("foo", 0), new TopicPartitionOffset("foo", 1)); props.setGroupId("group"); - props.setTransactionManager(tm); + props.setKafkaAwareTransactionManager(tm); DefaultTransactionDefinition def = new DefaultTransactionDefinition(); def.setTimeout(42); def.setName("myTx"); @@ -1085,29 +1086,66 @@ void testNoAfterRollbackWhenFenced() throws Exception { assertThatIllegalStateException().isThrownBy(container::start); } - @SuppressWarnings("serial") - public static class SomeOtherTransactionManager extends AbstractPlatformTransactionManager { + @Test + void testArbpWithoutRecovery() throws InterruptedException { + // init producer + Map producerProperties = KafkaTestUtils.producerProps(embeddedKafka); + DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(producerProperties); + pf.setTransactionIdPrefix("testArbpResetWithoutRecover.batchListener"); + final KafkaTemplate template = new KafkaTemplate<>(pf); + // init consumer + String group = "groupInARBP3"; + Map consumerProperties = KafkaTestUtils.consumerProps(group, "false", embeddedKafka); + consumerProperties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); + DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(consumerProperties); + ContainerProperties containerProps = new ContainerProperties(topic10); + containerProps.setPollTimeout(10_000); + containerProps.setBatchRecoverAfterRollback(false); // we want to test the behavior if recovery is disabled + final var successLatch = new AtomicReference<>(new CountDownLatch(2)); + containerProps.setMessageListener(new BatchMessageListener() { + private int attempt = 0; - @Override - protected Object doGetTransaction() throws TransactionException { - return new Object(); - } + @Override + public void onMessage(List> data) { + if (3 > attempt++) { // the first three attempts should fail + throw new BatchListenerFailedException("fail for test", data.get(0)); + } + data.forEach(d -> successLatch.get().countDown()); + } + }); + // init container + KafkaTransactionManager tm = new KafkaTransactionManager<>(pf); + containerProps.setKafkaAwareTransactionManager(tm); + KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, containerProps); + container.setBeanName("testArbpWithoutRecover"); + DefaultAfterRollbackProcessor afterRollbackProcessor = spy( + new DefaultAfterRollbackProcessor<>(new FixedBackOff(0L, FixedBackOff.UNLIMITED_ATTEMPTS)) + ); + container.setAfterRollbackProcessor(afterRollbackProcessor); + container.start(); - @Override - protected void doBegin(Object transaction, TransactionDefinition definition) throws TransactionException { - //noop - } + // process first batch + template.executeInTransaction(t -> { + template.send(new ProducerRecord<>(topic10, 0, 0, "bar1")); + template.send(new ProducerRecord<>(topic10, 0, 0, "bar2")); + return null; + }); + assertThat(successLatch.get().await(30, TimeUnit.SECONDS)).isTrue(); // wait for first batch - @Override - protected void doCommit(DefaultTransactionStatus status) throws TransactionException { - //noop - } + // process second batch + successLatch.set(new CountDownLatch(2)); // reset latch + template.executeInTransaction(t -> { + template.send(new ProducerRecord<>(topic10, 0, 0, "bar4")); + template.send(new ProducerRecord<>(topic10, 0, 0, "bar5")); + return null; + }); + assertThat(successLatch.get().await(30, TimeUnit.SECONDS)).isTrue(); // wait for second batch - @Override - protected void doRollback(DefaultTransactionStatus status) throws TransactionException { - //noop - } + // assert three processBatch calls due to the failed attempts + one call to clearThreadState + verify(afterRollbackProcessor, times(3)).processBatch(any(), any(), any(), any(), any(), anyBoolean(), any()); + verify(afterRollbackProcessor).clearThreadState(); + container.stop(); } } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchAdapterConversionErrorsTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchAdapterConversionErrorsTests.java index 4ac12d81cd..177f97c8b4 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchAdapterConversionErrorsTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchAdapterConversionErrorsTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.listener.adapter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.Mockito.mock; - import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -47,6 +43,10 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.8 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchListenerWithRecordAdapterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchListenerWithRecordAdapterTests.java index 0276612d89..e401d52028 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchListenerWithRecordAdapterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchListenerWithRecordAdapterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.listener.adapter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - import java.util.ArrayList; import java.util.List; @@ -36,8 +33,12 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell + * @author Soby Chacko * @since 2.2.5 * */ @@ -52,8 +53,8 @@ void test(@Autowired KafkaListenerEndpointRegistry registry, @Autowired TestList (BatchMessagingMessageListenerAdapter) registry .getListenerContainer("batchRecordAdapter").getContainerProperties().getMessageListener(); List> records = new ArrayList<>(); - records.add(new ConsumerRecord("foo", 0, 0, null, "foo")); - ConsumerRecord barRecord = new ConsumerRecord("foo", 0, 1, null, "bar"); + records.add(new ConsumerRecord<>("foo", 0, 0, null, "foo")); + ConsumerRecord barRecord = new ConsumerRecord<>("foo", 0, 1, null, "bar"); records.add(barRecord); records.add(new ConsumerRecord("foo", 0, 2, null, "baz")); adapter.onMessage(records, null, null); @@ -71,10 +72,10 @@ void testFullRecord(@Autowired KafkaListenerEndpointRegistry registry, @Autowire (BatchMessagingMessageListenerAdapter) registry .getListenerContainer("batchRecordAdapterFullRecord").getContainerProperties().getMessageListener(); List> records = new ArrayList<>(); - records.add(new ConsumerRecord("foo", 0, 0, null, "foo")); + records.add(new ConsumerRecord<>("foo", 0, 0, null, "foo")); ConsumerRecord barRecord = new ConsumerRecord("foo", 0, 1, null, "bar"); records.add(barRecord); - records.add(new ConsumerRecord("foo", 0, 2, null, "baz")); + records.add(new ConsumerRecord<>("foo", 0, 2, null, "baz")); adapter.onMessage(records, null, null); assertThat(foo.values2).contains("foo", "bar", "baz"); assertThat(config.failed).isNull(); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapterTests.java index 40fef84669..cc9a9eaa96 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapterTests.java @@ -16,14 +16,6 @@ package org.springframework.kafka.listener.adapter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -31,6 +23,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; @@ -46,7 +39,13 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import reactor.core.publisher.Mono; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; /** * @author Gary Russell diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/ConvertingMessageListenerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/ConvertingMessageListenerTests.java index 380248e63f..62afbbb050 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/ConvertingMessageListenerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/ConvertingMessageListenerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2022 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,8 @@ package org.springframework.kafka.listener.adapter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.header.internals.RecordHeader; @@ -38,8 +33,12 @@ import org.springframework.messaging.converter.MessageConversionException; import org.springframework.messaging.converter.MessageConverter; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; /** * @author Adrian Chlebosz diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/FilteringAdapterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/FilteringAdapterTests.java index 2b04bc3b80..42c5832beb 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/FilteringAdapterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/FilteringAdapterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2019 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,26 +16,29 @@ package org.springframework.kafka.listener.adapter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; - import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.KafkaConsumer; import org.junit.jupiter.api.Test; import org.springframework.kafka.listener.BatchAcknowledgingMessageListener; import org.springframework.kafka.support.Acknowledgment; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.only; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell + * @author Sanghyeok An * @since 2.0 * */ @@ -46,7 +49,7 @@ public class FilteringAdapterTests { public void testBatchFilter() throws Exception { BatchAcknowledgingMessageListener listener = mock(BatchAcknowledgingMessageListener.class); FilteringBatchMessageListenerAdapter adapter = - new FilteringBatchMessageListenerAdapter(listener, r -> false); + new FilteringBatchMessageListenerAdapter<>(listener, r -> false); List> consumerRecords = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(1); willAnswer(i -> { @@ -64,12 +67,174 @@ public void testBatchFilter() throws Exception { public void testBatchFilterAckDiscard() throws Exception { BatchAcknowledgingMessageListener listener = mock(BatchAcknowledgingMessageListener.class); FilteringBatchMessageListenerAdapter adapter = - new FilteringBatchMessageListenerAdapter(listener, r -> false, true); + new FilteringBatchMessageListenerAdapter<>(listener, r -> false, true); List> consumerRecords = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(1); - adapter.onMessage(consumerRecords, () -> latch.countDown(), null); + adapter.onMessage(consumerRecords, latch::countDown, null); assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); verify(listener, never()).onMessage(any(List.class), any(Acknowledgment.class)); } + @SuppressWarnings("unchecked") + @Test + public void listener_should_not_be_invoked_on_emptyList_and_ignoreEmptyBatch_true() { + // Given : + RecordFilterStrategy filter = new RecordFilterStrategy<>() { + + @Override + public boolean filter(ConsumerRecord consumerRecord) { + return true; + } + + @Override + public List> filterBatch( + List> consumerRecords) { + return List.of(); + } + + @Override + public boolean ignoreEmptyBatch() { + return true; + } + }; + + BatchAcknowledgingMessageListener listener = mock(); + FilteringBatchMessageListenerAdapter adapter = + new FilteringBatchMessageListenerAdapter<>(listener, filter); + List> consumerRecords = new ArrayList<>(); + Acknowledgment ack = mock(); + + // When : + adapter.onMessage(consumerRecords, ack, null); + + // Then + verify(ack, only()).acknowledge(); + verify(listener, never()).onMessage(any(List.class), any(Acknowledgment.class), any(KafkaConsumer.class)); + verify(listener, never()).onMessage(any(List.class), any(Acknowledgment.class)); + verify(listener, never()).onMessage(any(List.class), any(KafkaConsumer.class)); + verify(listener, never()).onMessage(any(List.class)); + } + + @SuppressWarnings("unchecked") + @Test + public void listener_should_be_invoked_on_notEmptyList_and_ignoreEmptyBatch_true() throws Exception { + // Given : + RecordFilterStrategy filter = new RecordFilterStrategy<>() { + + @Override + public boolean filter(ConsumerRecord consumerRecord) { + return true; + } + + @Override + public List> filterBatch( + List> consumerRecords) { + return consumerRecords; + } + + @Override + public boolean ignoreEmptyBatch() { + return true; + } + }; + + BatchAcknowledgingMessageListener listener = mock(); + FilteringBatchMessageListenerAdapter adapter = + new FilteringBatchMessageListenerAdapter<>(listener, filter); + List> consumerRecords = + List.of(new ConsumerRecord<>("hello-topic", 1, 1, "hello-key", "hello-value")); + Acknowledgment ack = mock(); + + CountDownLatch latch = new CountDownLatch(1); + willAnswer(i -> { + latch.countDown(); + return null; + }).given(listener).onMessage(any(List.class), any(Acknowledgment.class)); + + // When : + adapter.onMessage(consumerRecords, ack, null); + + // Then + assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + verify(ack, never()).acknowledge(); + } + + @SuppressWarnings("unchecked") + @Test + public void listener_should_be_invoked_on_emptyList_and_ignoreEmptyBatch_false() throws Exception { + // Given : + RecordFilterStrategy filter = new RecordFilterStrategy<>() { + + @Override + public boolean filter(ConsumerRecord consumerRecord) { + return true; + } + + @Override + public List> filterBatch( + List> consumerRecords) { + return List.of(); + } + }; + + BatchAcknowledgingMessageListener listener = mock(); + FilteringBatchMessageListenerAdapter adapter = + new FilteringBatchMessageListenerAdapter<>(listener, filter); + List> consumerRecords = new ArrayList<>(); + Acknowledgment ack = mock(); + + CountDownLatch latch = new CountDownLatch(1); + willAnswer(i -> { + latch.countDown(); + return null; + }).given(listener).onMessage(any(List.class), any(Acknowledgment.class)); + + // When : + adapter.onMessage(consumerRecords, ack, null); + + // Then + assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + verify(ack, never()).acknowledge(); + } + + @SuppressWarnings("unchecked") + @Test + public void listener_should_be_invoked_on_notEmptyList_and_ignoreEmptyBatch_false() throws Exception { + // Given : + RecordFilterStrategy filter = new RecordFilterStrategy<>() { + + @Override + public boolean filter(ConsumerRecord consumerRecord) { + return true; + } + + @Override + public List> filterBatch( + // System Under Test + List> consumerRecords) { + return consumerRecords; + } + }; + + BatchAcknowledgingMessageListener listener = mock(); + FilteringBatchMessageListenerAdapter adapter = + new FilteringBatchMessageListenerAdapter<>(listener, filter); + List> consumerRecords = + List.of(new ConsumerRecord<>("hello-topic", 1, 1, "hello-key", "hello-value")); + Acknowledgment ack = mock(); + + final CountDownLatch latch = new CountDownLatch(1); + willAnswer(i -> { + latch.countDown(); + return null; + }).given(listener).onMessage(any(List.class), any(Acknowledgment.class)); + + // When : + adapter.onMessage(consumerRecords, ack, null); + + // Then + assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); + verify(ack, never()).acknowledge(); + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/KafkaBackoffAwareMessageListenerAdapterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/KafkaBackoffAwareMessageListenerAdapterTests.java index d411dababa..e77b577340 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/KafkaBackoffAwareMessageListenerAdapterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/KafkaBackoffAwareMessageListenerAdapterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.listener.adapter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; - import java.math.BigInteger; import java.time.Clock; import java.time.Instant; @@ -50,6 +39,17 @@ import org.springframework.kafka.retrytopic.RetryTopicHeaders; import org.springframework.kafka.support.Acknowledgment; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; + /** * @author Tomaz Fernandes diff --git a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapterTests.java index 800c399cba..5b879781ad 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapterTests.java @@ -16,20 +16,12 @@ package org.springframework.kafka.listener.adapter; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.lang.reflect.Method; import java.util.concurrent.CompletableFuture; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; import org.springframework.kafka.annotation.KafkaListenerAnnotationBeanPostProcessor; import org.springframework.kafka.listener.AcknowledgingMessageListener; @@ -38,7 +30,14 @@ import org.springframework.kafka.support.converter.RecordMessageConverter; import org.springframework.messaging.support.GenericMessage; -import reactor.core.publisher.Mono; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; /** * @author Gary Russell @@ -62,6 +61,7 @@ public void onMessage(ConsumerRecord data, Acknowledgment acknow } } + MyAdapter adapter = new MyAdapter(); adapter.setFallbackType(String.class); RecordMessageConverter converter = mock(RecordMessageConverter.class); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/requestreply/CorrelationKeyTests.java b/spring-kafka/src/test/java/org/springframework/kafka/requestreply/CorrelationKeyTests.java index b6ba59a105..f3598c55b6 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/requestreply/CorrelationKeyTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/requestreply/CorrelationKeyTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ package org.springframework.kafka.requestreply; -import static org.assertj.core.api.Assertions.assertThat; - import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.8.10 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplateTests.java b/spring-kafka/src/test/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplateTests.java index dd7e406701..c587ac53a2 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplateTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplateTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,15 +16,6 @@ package org.springframework.kafka.requestreply; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; @@ -98,9 +89,19 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Nathan Xu + * @author Soby Chacko * @since 2.1.3 * */ @@ -196,11 +197,12 @@ public void testGood() throws Exception { template.setDefaultReplyTimeout(Duration.ofSeconds(30)); Headers headers = new RecordHeaders(); headers.add("baz", "buz".getBytes()); - ProducerRecord record = new ProducerRecord<>(A_REQUEST, null, null, null, "foo", headers); + ProducerRecord record = new ProducerRecord<>(A_REQUEST, null, null, 1, "foo", headers); RequestReplyFuture future = template.sendAndReceive(record); future.getSendFuture().get(10, TimeUnit.SECONDS); // send ok ConsumerRecord consumerRecord = future.get(30, TimeUnit.SECONDS); assertThat(consumerRecord.value()).isEqualTo("FOO"); + assertThat(consumerRecord.key()).isEqualTo(1); Map receivedHeaders = new HashMap<>(); new DefaultKafkaHeaderMapper().toHeaders(consumerRecord.headers(), receivedHeaders); assertThat(receivedHeaders).containsKey("baz"); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncCompletableFutureRetryTopicClassLevelIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncCompletableFutureRetryTopicClassLevelIntegrationTests.java new file mode 100644 index 0000000000..974434b567 --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncCompletableFutureRetryTopicClassLevelIntegrationTests.java @@ -0,0 +1,944 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.retrytopic; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.assertj.core.api.InstanceOfAssertFactories; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.DltHandler; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.annotation.PartitionOffset; +import org.springframework.kafka.annotation.RetryableTopic; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +import org.springframework.kafka.config.TopicBuilder; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaAdmin; +import org.springframework.kafka.core.KafkaAdmin.NewTopics; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.listener.ContainerProperties.AckMode; +import org.springframework.kafka.listener.KafkaListenerErrorHandler; +import org.springframework.kafka.support.Acknowledgment; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.messaging.Message; +import org.springframework.messaging.converter.CompositeMessageConverter; +import org.springframework.messaging.converter.GenericMessageConverter; +import org.springframework.messaging.converter.SmartMessageConverter; +import org.springframework.messaging.handler.annotation.Header; +import org.springframework.retry.annotation.Backoff; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; + +/** + * @author Sanghyeok An + * @since 3.3.0 + */ + +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka +@TestPropertySource(properties = { "five.attempts=5", "kafka.template=customKafkaTemplate"}) +public class AsyncCompletableFutureRetryTopicClassLevelIntegrationTests { + + public final static String FIRST_TOPIC = "myRetryTopic1"; + + public final static String SECOND_TOPIC = "myRetryTopic2"; + + public final static String THIRD_TOPIC = "myRetryTopic3"; + + public final static String FOURTH_TOPIC = "myRetryTopic4"; + + public final static String TWO_LISTENERS_TOPIC = "myRetryTopic5"; + + public final static String MANUAL_TOPIC = "myRetryTopic6"; + + public final static String NOT_RETRYABLE_EXCEPTION_TOPIC = "noRetryTopic"; + + public final static String FIRST_REUSE_RETRY_TOPIC = "reuseRetry1"; + + public final static String SECOND_REUSE_RETRY_TOPIC = "reuseRetry2"; + + public final static String THIRD_REUSE_RETRY_TOPIC = "reuseRetry3"; + + private final static String MAIN_TOPIC_CONTAINER_FACTORY = "kafkaListenerContainerFactory"; + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Autowired + private CountDownLatchContainer latchContainer; + + @Autowired + DestinationTopicContainer topicContainer; + + @Test + void shouldRetryFirstTopic(@Autowired KafkaListenerEndpointRegistry registry) { + kafkaTemplate.send(FIRST_TOPIC, "Testing topic 1"); + assertThat(topicContainer.getNextDestinationTopicFor("firstTopicId", FIRST_TOPIC).getDestinationName()) + .isEqualTo("myRetryTopic1-retry"); + assertThat(awaitLatch(latchContainer.countDownLatch1)).isTrue(); + assertThat(awaitLatch(latchContainer.customDltCountdownLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.customErrorHandlerCountdownLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.customMessageConverterCountdownLatch)).isTrue(); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("first")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = (ConcurrentMessageListenerContainer) registry + .getListenerContainer(id); + if (id.equals("firstTopicId")) { + assertThat(container.getConcurrency()).isEqualTo(2); + } + else { + assertThat(container.getConcurrency()) + .describedAs("Expected %s to have concurrency", id) + .isEqualTo(1); + } + }); + } + + @Test + void shouldRetrySecondTopic() { + kafkaTemplate.send(SECOND_TOPIC, "Testing topic 2"); + assertThat(awaitLatch(latchContainer.countDownLatch2)).isTrue(); + assertThat(awaitLatch(latchContainer.customDltCountdownLatch)).isTrue(); + } + + @Test + void shouldRetryThirdTopicWithTimeout( + @Autowired KafkaAdmin admin, + @Autowired KafkaListenerEndpointRegistry registry) { + + kafkaTemplate.send(THIRD_TOPIC, "Testing topic 3"); + assertThat(awaitLatch(latchContainer.countDownLatch3)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltOne)).isTrue(); + Map topics = admin.describeTopics(THIRD_TOPIC, THIRD_TOPIC + "-dlt", FOURTH_TOPIC); + assertThat(topics.get(THIRD_TOPIC).partitions()).hasSize(2); + assertThat(topics.get(THIRD_TOPIC + "-dlt").partitions()).hasSize(3); + assertThat(topics.get(FOURTH_TOPIC).partitions()).hasSize(2); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("third")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = + (ConcurrentMessageListenerContainer) registry.getListenerContainer(id); + if (id.equals("thirdTopicId")) { + assertThat(container.getConcurrency()).isEqualTo(2); + } + else { + assertThat(container.getConcurrency()) + .describedAs("Expected %s to have concurrency", id) + .isEqualTo(1); + } + }); + } + + @Test + void shouldRetryFourthTopicWithNoDlt() { + kafkaTemplate.send(FOURTH_TOPIC, "Testing topic 4"); + assertThat(awaitLatch(latchContainer.countDownLatch4)).isTrue(); + } + + @Test + void shouldRetryFifthTopicWithTwoListenersAndManualAssignment( + @Autowired FifthTopicListener1 listener1, + @Autowired FifthTopicListener2 listener2) { + + kafkaTemplate.send(TWO_LISTENERS_TOPIC, 0, "0", "Testing topic 5 - 0"); + kafkaTemplate.send(TWO_LISTENERS_TOPIC, 1, "0", "Testing topic 5 - 1"); + assertThat(awaitLatch(latchContainer.countDownLatch51)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatch52)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltThree)).isTrue(); + assertThat(listener1.topics).containsExactly( + TWO_LISTENERS_TOPIC, + TWO_LISTENERS_TOPIC + "-listener1-0", + TWO_LISTENERS_TOPIC + "-listener1-1", + TWO_LISTENERS_TOPIC + "-listener1-2", + TWO_LISTENERS_TOPIC + "-listener1-dlt" + ); + assertThat(listener2.topics).containsExactly( + TWO_LISTENERS_TOPIC, + TWO_LISTENERS_TOPIC + "-listener2-0", + TWO_LISTENERS_TOPIC + "-listener2-1", + TWO_LISTENERS_TOPIC + "-listener2-2", + TWO_LISTENERS_TOPIC + "-listener2-dlt"); + } + + @Test + void shouldRetryManualTopicWithDefaultDlt( + @Autowired KafkaListenerEndpointRegistry registry, + @Autowired ConsumerFactory cf) { + + kafkaTemplate.send(MANUAL_TOPIC, "Testing topic 6"); + assertThat(awaitLatch(latchContainer.countDownLatch6)).isTrue(); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("manual")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = + (ConcurrentMessageListenerContainer) registry.getListenerContainer(id); + assertThat(container) + .extracting("commonErrorHandler") + .extracting("seekAfterError", InstanceOfAssertFactories.BOOLEAN) + .isFalse(); + }); + Consumer consumer = cf.createConsumer("manual-dlt", ""); + Set tp = + Set.of(new org.apache.kafka.common.TopicPartition(MANUAL_TOPIC + "-dlt", 0)); + consumer.assign(tp); + try { + await().untilAsserted(() -> { + OffsetAndMetadata offsetAndMetadata = consumer.committed(tp).get(tp.iterator().next()); + assertThat(offsetAndMetadata).isNotNull(); + assertThat(offsetAndMetadata.offset()).isEqualTo(1L); + }); + } + finally { + consumer.close(); + } + } + + @Test + void shouldFirstReuseRetryTopic(@Autowired + FirstReuseRetryTopicListener listener1, + @Autowired + SecondReuseRetryTopicListener listener2, @Autowired + ThirdReuseRetryTopicListener listener3) { + + kafkaTemplate.send(FIRST_REUSE_RETRY_TOPIC, "Testing reuse topic 1"); + kafkaTemplate.send(SECOND_REUSE_RETRY_TOPIC, "Testing reuse topic 2"); + kafkaTemplate.send(THIRD_REUSE_RETRY_TOPIC, "Testing reuse topic 3"); + assertThat(awaitLatch(latchContainer.countDownLatchReuseOne)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchReuseTwo)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchReuseThree)).isTrue(); + assertThat(listener1.topics).containsExactly( + FIRST_REUSE_RETRY_TOPIC, + FIRST_REUSE_RETRY_TOPIC + "-retry"); + assertThat(listener2.topics).containsExactly( + SECOND_REUSE_RETRY_TOPIC, + SECOND_REUSE_RETRY_TOPIC + "-retry-30", + SECOND_REUSE_RETRY_TOPIC + "-retry-60", + SECOND_REUSE_RETRY_TOPIC + "-retry-100", + SECOND_REUSE_RETRY_TOPIC + "-retry-100"); + assertThat(listener3.topics).containsExactly( + THIRD_REUSE_RETRY_TOPIC, + THIRD_REUSE_RETRY_TOPIC + "-retry", + THIRD_REUSE_RETRY_TOPIC + "-retry", + THIRD_REUSE_RETRY_TOPIC + "-retry", + THIRD_REUSE_RETRY_TOPIC + "-retry"); + } + + @Test + void shouldGoStraightToDlt() { + kafkaTemplate.send(NOT_RETRYABLE_EXCEPTION_TOPIC, "Testing topic with annotation 1"); + assertThat(awaitLatch(latchContainer.countDownLatchNoRetry)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltTwo)).isTrue(); + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(60, TimeUnit.SECONDS); + } + catch (Exception e) { + fail(e.getMessage()); + throw new RuntimeException(e); + } + } + + @KafkaListener( + id = "firstTopicId", + topics = FIRST_TOPIC, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class FirstTopicListener { + + @Autowired + DestinationTopicContainer topicContainer; + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listen(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + return CompletableFuture.supplyAsync(() -> { + try { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + catch (RuntimeException e) { + throw e; + } + finally { + container.countDownLatch1.countDown(); + } + }); + } + + } + + @KafkaListener(topics = SECOND_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class SecondTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listenAgain(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + return CompletableFuture.supplyAsync(() -> { + try { + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch2); + } + }); + } + } + + @RetryableTopic( + attempts = "${five.attempts}", + backoff = @Backoff(delay = 25, maxDelay = 1000, multiplier = 1.5), + numPartitions = "#{3}", + timeout = "${missing.property:100000}", + include = MyRetryException.class, kafkaTemplate = "${kafka.template}", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + concurrency = "1") + @KafkaListener( + id = "thirdTopicId", + topics = THIRD_TOPIC, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + concurrency = "2") + static class ThirdTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listenWithAnnotation(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + return CompletableFuture.supplyAsync(() -> { + try { + throw new MyRetryException("Annotated woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch3); + } + }); + } + + @DltHandler + public void annotatedDltMethod(Object message) { + container.countDownLatchDltOne.countDown(); + } + } + + @RetryableTopic(dltStrategy = DltStrategy.NO_DLT, attempts = "4", backoff = @Backoff(30), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener(topics = FOURTH_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FourthTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listenNoDlt(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + return CompletableFuture.supplyAsync(() -> { + try { + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch4); + } + }); + } + + @DltHandler + public void shouldNotGetHere() { + fail("Dlt should not be processed!"); + } + } + + static class AbstractFifthTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @DltHandler + public void annotatedDltMethod(ConsumerRecord record) { + this.topics.add(record.topic()); + container.countDownLatchDltThree.countDown(); + } + + } + + @RetryableTopic( + attempts = "4", + backoff = @Backoff(25), + numPartitions = "2", + retryTopicSuffix = "-listener1", + dltTopicSuffix = "-listener1-dlt", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener( + id = "fifthTopicId1", + topicPartitions = {@org.springframework.kafka.annotation.TopicPartition(topic = TWO_LISTENERS_TOPIC, + partitionOffsets = @PartitionOffset(partition = "0", initialOffset = "0"))}, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FifthTopicListener1 extends AbstractFifthTopicListener { + + @KafkaHandler + public CompletableFuture listenWithAnnotation(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + this.topics.add(receivedTopic); + return CompletableFuture.supplyAsync(() -> { + try { + throw new RuntimeException("Annotated woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch51); + } + }); + } + + } + + @RetryableTopic( + attempts = "4", + backoff = @Backoff(25), + numPartitions = "2", + retryTopicSuffix = "-listener2", + dltTopicSuffix = "-listener2-dlt", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener( + id = "fifthTopicId2", + topicPartitions = {@org.springframework.kafka.annotation.TopicPartition(topic = TWO_LISTENERS_TOPIC, + partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "0"))}, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FifthTopicListener2 extends AbstractFifthTopicListener { + + @KafkaHandler + public CompletableFuture listenWithAnnotation2(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + this.topics.add(receivedTopic); + return CompletableFuture.supplyAsync(() -> { + try { + throw new RuntimeException("Annotated woooops... " + receivedTopic); + } + finally { + container.countDownLatch52.countDown(); + } + }); + } + + } + + @RetryableTopic( + attempts = "4", + backoff = @Backoff(50), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) + @KafkaListener( + id = "manual", + topics = MANUAL_TOPIC, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class SixthTopicDefaultDLTListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listenNoDlt( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @SuppressWarnings("unused") Acknowledgment ack) { + return CompletableFuture.supplyAsync(() -> { + try { + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch6); + } + }); + } + + } + + @RetryableTopic( + attempts = "3", + numPartitions = "3", + exclude = MyDontRetryException.class, + backoff = @Backoff(delay = 50, maxDelay = 100, multiplier = 3), + traversingCauses = "true", + kafkaTemplate = "${kafka.template}") + @KafkaListener(topics = NOT_RETRYABLE_EXCEPTION_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class NoRetryTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listenWithAnnotation2(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + return CompletableFuture.supplyAsync(() -> { + try { + throw new MyDontRetryException("Annotated second woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatchNoRetry); + } + }); + } + + @DltHandler + public void annotatedDltMethod(Object message) { + container.countDownLatchDltTwo.countDown(); + } + } + + @RetryableTopic( + attempts = "2", + backoff = @Backoff(50)) + @KafkaListener( + id = "reuseRetry1", + topics = FIRST_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class FirstReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listen1(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + this.topics.add(receivedTopic); + return CompletableFuture.supplyAsync(() -> { + try { + throw new RuntimeException("Another woooops... " + receivedTopic); + } + finally { + container.countDownLatchReuseOne.countDown(); + } + }); + } + + } + + @RetryableTopic( + attempts = "5", + backoff = @Backoff(delay = 30, maxDelay = 100, multiplier = 2)) + @KafkaListener( + id = "reuseRetry2", + topics = SECOND_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class SecondReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listen2(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + this.topics.add(receivedTopic); + return CompletableFuture.supplyAsync(() -> { + try { + throw new RuntimeException("Another woooops... " + receivedTopic); + } + finally { + container.countDownLatchReuseTwo.countDown(); + } + }); + } + + } + + @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 1, maxDelay = 5, multiplier = 1.4)) + @KafkaListener(id = "reuseRetry3", topics = THIRD_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class ThirdReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public CompletableFuture listen3(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + + this.topics.add(receivedTopic); + return CompletableFuture.supplyAsync(() -> { + try { + throw new RuntimeException("Another woooops... " + receivedTopic); + } + finally { + container.countDownLatchReuseThree.countDown(); + } + }); + + } + + } + + static class CountDownLatchContainer { + + CountDownLatch countDownLatch1 = new CountDownLatch(5); + + CountDownLatch countDownLatch2 = new CountDownLatch(3); + + CountDownLatch countDownLatch3 = new CountDownLatch(3); + + CountDownLatch countDownLatch4 = new CountDownLatch(4); + + CountDownLatch countDownLatch51 = new CountDownLatch(4); + + CountDownLatch countDownLatch52 = new CountDownLatch(4); + + CountDownLatch countDownLatch6 = new CountDownLatch(4); + + CountDownLatch countDownLatchNoRetry = new CountDownLatch(1); + + CountDownLatch countDownLatchDltOne = new CountDownLatch(1); + + CountDownLatch countDownLatchDltTwo = new CountDownLatch(1); + + CountDownLatch countDownLatchDltThree = new CountDownLatch(2); + + CountDownLatch countDownLatchReuseOne = new CountDownLatch(2); + + CountDownLatch countDownLatchReuseTwo = new CountDownLatch(5); + + CountDownLatch countDownLatchReuseThree = new CountDownLatch(5); + + CountDownLatch customDltCountdownLatch = new CountDownLatch(1); + + CountDownLatch customErrorHandlerCountdownLatch = new CountDownLatch(6); + + CountDownLatch customMessageConverterCountdownLatch = new CountDownLatch(6); + + final List knownTopics = new ArrayList<>(); + + private void countDownIfNotKnown(String receivedTopic, CountDownLatch countDownLatch) { + synchronized (knownTopics) { + if (!knownTopics.contains(receivedTopic)) { + knownTopics.add(receivedTopic); + countDownLatch.countDown(); + } + } + } + } + + static class MyCustomDltProcessor { + + @Autowired + CountDownLatchContainer container; + + public void processDltMessage(Object message) { + try { + throw new RuntimeException("Dlt Error!"); + } + finally { + container.customDltCountdownLatch.countDown(); + } + } + } + + @SuppressWarnings("serial") + static class MyRetryException extends RuntimeException { + MyRetryException(String msg) { + super(msg); + } + } + + @SuppressWarnings("serial") + static class MyDontRetryException extends RuntimeException { + MyDontRetryException(String msg) { + super(msg); + } + } + + @Configuration + static class RetryTopicConfigurations extends RetryTopicConfigurationSupport { + + private static final String DLT_METHOD_NAME = "processDltMessage"; + + @Bean + RetryTopicConfiguration firstRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackOff(50) + .maxAttempts(5) + .concurrency(1) + .useSingleTopicForSameIntervals() + .includeTopic(FIRST_TOPIC) + .doNotRetryOnDltFailure() + .dltHandlerMethod("myCustomDltProcessor", DLT_METHOD_NAME) + .create(template); + } + + @Bean + RetryTopicConfiguration secondRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .exponentialBackoff(50, 2, 1000) + .retryOn(Arrays.asList(IllegalStateException.class, IllegalAccessException.class)) + .traversingCauses() + .includeTopic(SECOND_TOPIC) + .doNotRetryOnDltFailure() + .dltHandlerMethod("myCustomDltProcessor", DLT_METHOD_NAME) + .create(template); + } + + @Bean + FirstTopicListener firstTopicListener() { + return new FirstTopicListener(); + } + + @Bean + KafkaListenerErrorHandler myCustomErrorHandler( + CountDownLatchContainer container) { + return (message, exception) -> { + container.customErrorHandlerCountdownLatch.countDown(); + throw exception; + }; + } + + @Bean + SmartMessageConverter myCustomMessageConverter( + CountDownLatchContainer container) { + return new CompositeMessageConverter(Collections.singletonList(new GenericMessageConverter())) { + + @Override + public Object fromMessage(Message message, Class targetClass, Object conversionHint) { + container.customMessageConverterCountdownLatch.countDown(); + return super.fromMessage(message, targetClass, conversionHint); + } + }; + } + + @Bean + SecondTopicListener secondTopicListener() { + return new SecondTopicListener(); + } + + @Bean + ThirdTopicListener thirdTopicListener() { + return new ThirdTopicListener(); + } + + @Bean + FourthTopicListener fourthTopicListener() { + return new FourthTopicListener(); + } + + @Bean + FifthTopicListener1 fifthTopicListener1() { + return new FifthTopicListener1(); + } + + @Bean + FifthTopicListener2 fifthTopicListener2() { + return new FifthTopicListener2(); + } + + @Bean + SixthTopicDefaultDLTListener manualTopicListener() { + return new SixthTopicDefaultDLTListener(); + } + + @Bean + NoRetryTopicListener noRetryTopicListener() { + return new NoRetryTopicListener(); + } + + @Bean + FirstReuseRetryTopicListener firstReuseRetryTopicListener() { + return new FirstReuseRetryTopicListener(); + } + + @Bean + SecondReuseRetryTopicListener secondReuseRetryTopicListener() { + return new SecondReuseRetryTopicListener(); + } + + @Bean + ThirdReuseRetryTopicListener thirdReuseRetryTopicListener() { + return new ThirdReuseRetryTopicListener(); + } + + @Bean + CountDownLatchContainer latchContainer() { + return new CountDownLatchContainer(); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor() { + return new MyCustomDltProcessor(); + } + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map props = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + return new DefaultKafkaProducerFactory<>(props); + } + + @Bean("customKafkaTemplate") + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + KafkaAdmin kafkaAdmin() { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, this.broker.getBrokersAsString()); + return new KafkaAdmin(configs); + } + + @Bean + NewTopic topic() { + return TopicBuilder.name(THIRD_TOPIC).partitions(2).replicas(1).build(); + } + + @Bean + NewTopics topics() { + return new NewTopics(TopicBuilder.name(FOURTH_TOPIC).partitions(2).replicas(1).build()); + } + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), + "groupId", + "false"); + props.put( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory retryTopicListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + ContainerProperties props = factory.getContainerProperties(); + props.setIdleEventInterval(100L); + props.setPollTimeout(50L); + props.setIdlePartitionEventInterval(100L); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer( + container -> container.getContainerProperties().setIdlePartitionEventInterval(100L)); + return factory; + } + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer(container -> { + if (container.getListenerId().startsWith("manual")) { + container.getContainerProperties().setAckMode(AckMode.MANUAL); + container.getContainerProperties().setAsyncAcks(true); + } + }); + return factory; + } + + @Bean + TaskScheduler sched() { + return new ThreadPoolTaskScheduler(); + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncCompletableFutureRetryTopicScenarioTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncCompletableFutureRetryTopicScenarioTests.java new file mode 100644 index 0000000000..f1b939b638 --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncCompletableFutureRetryTopicScenarioTests.java @@ -0,0 +1,1380 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.retrytopic; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringSerializer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.listener.ContainerProperties.AckMode; +import org.springframework.kafka.listener.KafkaListenerErrorHandler; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.messaging.Message; +import org.springframework.messaging.converter.CompositeMessageConverter; +import org.springframework.messaging.converter.GenericMessageConverter; +import org.springframework.messaging.converter.SmartMessageConverter; +import org.springframework.messaging.handler.annotation.Header; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +/** + * @author Sanghyeok An + * @author Artem Bilan + * + * @since 3.3.0 + */ + +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka +@TestPropertySource(properties = { "five.attempts=5", "kafka.template=customKafkaTemplate"}) +@DisabledIfEnvironmentVariable(named = "GITHUB_ACTION", matches = "true", + disabledReason = "The test is too heavy and rely a lot in the timing.") +public class AsyncCompletableFutureRetryTopicScenarioTests { + + private final static String MAIN_TOPIC_CONTAINER_FACTORY = "kafkaListenerContainerFactory"; + + public final static String TEST_TOPIC0 = "myRetryTopic0"; + + public final static String TEST_TOPIC1 = "myRetryTopic1"; + + public final static String TEST_TOPIC2 = "myRetryTopic2"; + + public final static String TEST_TOPIC3 = "myRetryTopic3"; + + public final static String TEST_TOPIC4 = "myRetryTopic4"; + + public final static String TEST_TOPIC5 = "myRetryTopic5"; + + public final static String TEST_TOPIC6 = "myRetryTopic6"; + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Autowired + private CountDownLatchContainer latchContainer; + + @Autowired + DestinationTopicContainer topicContainer; + + @Test + void allFailCaseTest( + @Autowired TestTopicListener0 testTopicListener, + @Autowired MyCustomDltProcessor myCustomDltProcessor0) { + // All Fail case. + String shortFailedMsg1 = "0"; + String shortFailedMsg2 = "1"; + String shortFailedMsg3 = "2"; + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("0-topicId", TEST_TOPIC0); + + String expectedRetryTopic = TEST_TOPIC0 + "-retry"; + String[] expectedReceivedMsgs = { + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3, + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3, + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3, + }; + String[] expectedReceivedTopics = { + TEST_TOPIC0, + TEST_TOPIC0, + TEST_TOPIC0, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + }; + String[] expectedDltMsgs = { + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3 + }; + + // When + kafkaTemplate.send(TEST_TOPIC0, shortFailedMsg1); + kafkaTemplate.send(TEST_TOPIC0, shortFailedMsg2); + kafkaTemplate.send(TEST_TOPIC0, shortFailedMsg3); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch0)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch0)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(TEST_TOPIC0 + "-retry"); + + assertThat(testTopicListener.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + + assertThat(myCustomDltProcessor0.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void firstShortFailAndLastLongSuccessRetryTest( + @Autowired TestTopicListener1 testTopicListener1, + @Autowired MyCustomDltProcessor myCustomDltProcessor1) { + // Scenario. + // 1. Short Fail msg (offset 0) + // 2. Long success msg (offset 1) -> -ing (latch wait) + // 3. Short fail msg (Retry1 offset 0) -> (latch down) + // 4. Long success msg (offset 1) -> Success! + // 5. Short fail msg (Retry2 offset 0) + // 6. Short fail msg (Retry3 offset 0) + // 7. Short fail msg (Retry4 offset 0) + + // Given + String longSuccessMsg = testTopicListener1.LONG_SUCCESS_MSG; + String shortFailedMsg = testTopicListener1.SHORT_FAIL_MSG; + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("1-topicId", TEST_TOPIC1); + + String expectedRetryTopic = TEST_TOPIC1 + "-retry"; + String[] expectedReceivedMsgs = { + shortFailedMsg, + longSuccessMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC1, + TEST_TOPIC1, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + shortFailedMsg + }; + + // When + kafkaTemplate.send(TEST_TOPIC1, shortFailedMsg); + kafkaTemplate.send(TEST_TOPIC1, longSuccessMsg); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch1)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch1)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(testTopicListener1.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener1.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(testTopicListener1.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor1.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void firstLongSuccessAndLastShortFailed( + @Autowired TestTopicListener2 testTopicListener2, + @Autowired MyCustomDltProcessor myCustomDltProcessor2) { + // Scenario. + // 1. Long success msg (offset 0) -> going on... (latch await) + // 2. Short fail msg (offset 1) -> done. + // 3. Short fail msg (Retry1 offset 1) -> done (latch down) + // 4. Long success msg (offset 0) -> succeed. + // 5. Short fail msg (Retry2 offset 1) + // 6. Short fail msg (Retry3 offset 1) + // 7. Short fail msg (Retry4 offset 1) + // 8. Short fail msg (dlt offset 1) + + // Given + String shortFailedMsg = testTopicListener2.SHORT_FAIL_MSG; + String longSuccessMsg = testTopicListener2.LONG_SUCCESS_MSG; + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("2-topicId", TEST_TOPIC2); + + String expectedRetryTopic = TEST_TOPIC2 + "-retry"; + String[] expectedReceivedMsgs = { + longSuccessMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC2, + TEST_TOPIC2, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + shortFailedMsg + }; + + // When + kafkaTemplate.send(TEST_TOPIC2, longSuccessMsg); + kafkaTemplate.send(TEST_TOPIC2, shortFailedMsg); + + // Then + + assertThat(awaitLatch(latchContainer.countDownLatch2)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch2)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(testTopicListener2.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener2.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(testTopicListener2.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor2.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void longFailMsgTwiceThenShortSuccessMsgThird( + @Autowired TestTopicListener3 testTopicListener3, + @Autowired MyCustomDltProcessor myCustomDltProcessor3) { + // Scenario + // 1. Long fail msg arrived (offset 0) -> -ing (wait latch offset 4) + // 2. Long fail msg arrived (offset 1) -> -ing (wait latch offset 1) + // 3. Short success msg arrived (offset 2) -> done + // 4. Short success msg arrived (offset 3) -> done + // 5. Short success msg arrived (offset 4) -> done (latch offset 4 count down) + // 6. Long fail msg throws error (offset 0) -> done + // 7. Long fail msg throws error (offset 1) -> done + // 8. Long fail msg (retry 1 with offset 0) -> done + // 9. Long fail msg (retry 1 with offset 1) -> done + // 10. Long fail msg (retry 2 with offset 0) -> done + // 11. Long fail msg (retry 2 with offset 1) -> done + // 12. Long fail msg (retry 3 with offset 0) -> done + // 13. Long fail msg (retry 3 with offset 1) -> done + // 14. Long fail msg (retry 4 with offset 0) -> done + // 15. Long fail msg (retry 4 with offset 1) -> done + + // Given + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("3-topicId", TEST_TOPIC3); + + String firstMsg = TestTopicListener3.FAIL_PREFIX + "0"; + String secondMsg = TestTopicListener3.FAIL_PREFIX + "1"; + String thirdMsg = TestTopicListener3.SUCCESS_PREFIX + "2"; + String fourthMsg = TestTopicListener3.SUCCESS_PREFIX + "3"; + String fifthMsg = TestTopicListener3.SUCCESS_PREFIX + "4"; + + String expectedRetryTopic = TEST_TOPIC3 + "-retry"; + + String[] expectedReceivedMsgs = { + firstMsg, + secondMsg, + thirdMsg, + fourthMsg, + fifthMsg, + firstMsg, + secondMsg, + firstMsg, + secondMsg, + firstMsg, + secondMsg, + firstMsg, + secondMsg, + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC3, + TEST_TOPIC3, + TEST_TOPIC3, + TEST_TOPIC3, + TEST_TOPIC3, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + firstMsg, + secondMsg, + }; + + // When + kafkaTemplate.send(TEST_TOPIC3, firstMsg); + kafkaTemplate.send(TEST_TOPIC3, secondMsg); + kafkaTemplate.send(TEST_TOPIC3, thirdMsg); + kafkaTemplate.send(TEST_TOPIC3, fourthMsg); + kafkaTemplate.send(TEST_TOPIC3, fifthMsg); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch3)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch3)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(testTopicListener3.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener3.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(testTopicListener3.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor3.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void longSuccessMsgTwiceThenShortFailMsgTwice( + @Autowired TestTopicListener4 topicListener4, + @Autowired MyCustomDltProcessor myCustomDltProcessor4) { + // Scenario + // 1. Msg arrived (offset 0) -> -ing + // 2. Msg arrived (offset 1) -> -ing + // 3. Msg arrived (offset 2) throws error -> done + // 4. Msg arrived (offset 3) throws error -> done + // 5. Msg arrived (offset 0) succeed -> done + // 6. Msg arrived (offset 1) succeed -> done + // 7. Msg arrived (retry 1, offset 2) -> done + // 8. Msg arrived (retry 1, offset 3) -> done + // 9. Msg arrived (retry 2, offset 2) -> done + // 10. Msg arrived (retry 2, offset 3) -> done + // 11. Msg arrived (retry 3, offset 2) -> done + // 12. Msg arrived (retry 3, offset 3) -> done + // 13. Msg arrived (retry 4, offset 2) -> done + // 14. Msg arrived (retry 4, offset 3) -> done + + // Given + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("4-TopicId", TEST_TOPIC4); + + String expectedRetryTopic = TEST_TOPIC4 + "-retry"; + String[] expectedReceivedMsgs = { + TestTopicListener4.LONG_SUCCESS_MSG, + TestTopicListener4.LONG_SUCCESS_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC4, + TEST_TOPIC4, + TEST_TOPIC4, + TEST_TOPIC4, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + }; + + String[] expectedDltMsgs = { + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + }; + + // When + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.LONG_SUCCESS_MSG); + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.LONG_SUCCESS_MSG); + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.SHORT_FAIL_MSG); + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.SHORT_FAIL_MSG); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch4)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch4)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(topicListener4.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(topicListener4.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(topicListener4.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor4.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void oneLongSuccessMsgBetween49ShortFailMsg( + @Autowired TestTopicListener5 topicListener5, + @Autowired MyCustomDltProcessor myCustomDltProcessor5) { + // Scenario. + // 1. msgs received (offsets 0 ~ 24) -> failed. + // 2. msgs received (offset 25) -> -ing + // 3. msgs received (offset 26 ~ 49) -> failed. + // 4. msgs succeed (offset 50) -> done + // 5. msgs received (Retry1 offset 0 ~ 49 except 25) -> failed. + // 6. msgs received (Retry2 offset 0 ~ 49 except 25) -> failed. + // 7. msgs received (Retry3 offset 0 ~ 49 except 25) -> failed. + // 8. msgs received (Retry4 offset 0 ~ 49 except 25) -> failed. + + // Given + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("5-TopicId", TEST_TOPIC5); + + String expectedRetryTopic = TEST_TOPIC5 + "-retry"; + + String[] expectedReceivedMsgs = new String[148]; + for (int i = 0; i < 147; i++) { + expectedReceivedMsgs[i] = TestTopicListener5.SHORT_FAIL_MSG; + } + expectedReceivedMsgs[147] = TestTopicListener5.LONG_SUCCESS_MSG; + + + String[] expectedReceivedTopics = new String[148]; + for (int i = 0; i < 49; i++) { + expectedReceivedTopics[i] = TEST_TOPIC5; + } + for (int i = 49; i < 147; i++) { + expectedReceivedTopics[i] = expectedRetryTopic; + } + expectedReceivedTopics[147] = TEST_TOPIC5; + + + String[] expectedDltMsgs = new String[49]; + for (int i = 0; i < 49; i++) { + expectedDltMsgs[i] = TestTopicListener5.SHORT_FAIL_MSG; + } + + // When + for (int i = 0; i < 50; i++) { + if (i != 25) { + kafkaTemplate.send(TEST_TOPIC5, TestTopicListener5.SHORT_FAIL_MSG); + } + else { + kafkaTemplate.send(TEST_TOPIC5, TestTopicListener5.LONG_SUCCESS_MSG); + } + } + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch5)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch5)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(topicListener5.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(topicListener5.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + + assertThat(myCustomDltProcessor5.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void moreComplexAsyncScenarioTest( + @Autowired TestTopicListener6 topicListener6, + @Autowired @Qualifier("myCustomDltProcessor6") MyCustomDltProcessor myCustomDltProcessor6) { + // Scenario. + // 1. Fail Msg (offset 0) -> -ing + // 2. Success Msg (offset 1) -> -ing + // 3. Success Msg (offset 2) -> -ing + // 4. Fail Msg (offset 3) -> done + // 5. Success Msg (offset 4) -> -ing + // 6. Success msg succeed (offset 2) - done + // 7. Success msg succeed (offset 4) -> done + // 8. Fail Msg (Retry1 offset 3) -> done + // 9. Fail Msg (Retry2 offset 3) -> done + // 10. Success msg succeed (offset 1) -> done + // 11. Fail Msg (offset 0) -> done + // 12. Fail Msg (Retry 1 offset 0) -> done + // 13. Fail Msg (Retry 2 offset 0) -> done + + // Given + String firstMsg = TestTopicListener6.FAIL_PREFIX + "0"; + String secondMsg = TestTopicListener6.SUCCESS_PREFIX + "1"; + String thirdMsg = TestTopicListener6.SUCCESS_PREFIX + "2"; + String fourthMsg = TestTopicListener6.FAIL_PREFIX + "3"; + String fifthMsg = TestTopicListener6.SUCCESS_PREFIX + "4"; + + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("6-TopicId", TEST_TOPIC6); + String expectedRetryTopic = TEST_TOPIC6 + "-retry"; + + String[] expectedReceivedMsgs = { + firstMsg, + secondMsg, + thirdMsg, + fourthMsg, + fifthMsg, + fourthMsg, + fourthMsg, + firstMsg, + firstMsg + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC6, + TEST_TOPIC6, + TEST_TOPIC6, + TEST_TOPIC6, + TEST_TOPIC6, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + TestTopicListener6.FAIL_PREFIX + "3", + TestTopicListener6.FAIL_PREFIX + "0" + }; + + // When + kafkaTemplate.send(TEST_TOPIC6, firstMsg); + kafkaTemplate.send(TEST_TOPIC6, secondMsg); + kafkaTemplate.send(TEST_TOPIC6, thirdMsg); + kafkaTemplate.send(TEST_TOPIC6, fourthMsg); + kafkaTemplate.send(TEST_TOPIC6, fifthMsg); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch6)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch6)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(topicListener6.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(topicListener6.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(topicListener6.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor6.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(60, TimeUnit.SECONDS); + } + catch (Exception e) { + fail(e.getMessage()); + throw new RuntimeException(e); + } + + } + + @KafkaListener( + id = "0-topicId", + topics = TEST_TOPIC0, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener0 { + + @Autowired + CountDownLatchContainer container; + + private final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + @KafkaHandler + public CompletableFuture listen(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + return CompletableFuture.supplyAsync(() -> { + try { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + finally { + container.countDownLatch0.countDown(); + } + }); + } + + } + + @KafkaListener( + id = "1-topicId", + topics = TEST_TOPIC1, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener1 { + + @Autowired + CountDownLatchContainer container; + + private final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + private CountDownLatch firstRetryFailMsgLatch = new CountDownLatch(1); + + protected final String LONG_SUCCESS_MSG = "success"; + + protected final String SHORT_FAIL_MSG = "fail"; + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public CompletableFuture listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedTopics.add(receivedTopic); + this.receivedMsgs.add(message); + return CompletableFuture.supplyAsync(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + firstRetryFailMsgLatch.await(10, TimeUnit.SECONDS); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (receivedTopic.equals(TEST_TOPIC1 + "-retry") && + offset.equals("0")) { + firstRetryFailMsgLatch.countDown(); + } + container.countDownLatch1.countDown(); + } + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "2-topicId", + topics = TEST_TOPIC2, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener2 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + private CountDownLatch firstRetryFailMsgLatch = new CountDownLatch(1); + + protected final String LONG_SUCCESS_MSG = "success"; + + protected final String SHORT_FAIL_MSG = "fail"; + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public CompletableFuture listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return CompletableFuture.supplyAsync(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + firstRetryFailMsgLatch.await(10, TimeUnit.SECONDS); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (receivedTopic.equals(TEST_TOPIC2 + "-retry") && + offset.equals("1")) { + firstRetryFailMsgLatch.countDown(); + } + container.countDownLatch2.countDown(); + } + + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "3-topicId", + topics = TEST_TOPIC3, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener3 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String FAIL_PREFIX = "fail"; + + public static final String SUCCESS_PREFIX = "success"; + + private CountDownLatch successLatchCount = new CountDownLatch(3); + + private CountDownLatch offset0Latch = new CountDownLatch(1); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public CompletableFuture listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC)String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return CompletableFuture.supplyAsync(() -> { + try { + if (message.startsWith(FAIL_PREFIX)) { + if (receivedTopic.equals(TEST_TOPIC3)) { + if (offset.equals("0")) { + successLatchCount.await(10, TimeUnit.SECONDS); + offset0Latch.countDown(); + } + if (offset.equals("1")) { + offset0Latch.await(10, TimeUnit.SECONDS); + } + } + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + successLatchCount.countDown(); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + container.countDownLatch3.countDown(); + } + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "4-TopicId", + topics = TEST_TOPIC4, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener4 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String LONG_SUCCESS_MSG = "success"; + + public static final String SHORT_FAIL_MSG = "fail"; + + private CountDownLatch failLatchCount = new CountDownLatch(2); + + private CountDownLatch offset0Latch = new CountDownLatch(1); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public CompletableFuture listen(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return CompletableFuture.supplyAsync(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + failLatchCount.await(10, TimeUnit.SECONDS); + if (offset.equals("1")) { + offset0Latch.await(10, TimeUnit.SECONDS); + } + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (message.equals(SHORT_FAIL_MSG) || + receivedTopic.equals(TEST_TOPIC4)) { + failLatchCount.countDown(); + } + if (offset.equals("0") && + receivedTopic.equals(TEST_TOPIC4)) { + offset0Latch.countDown(); + } + container.countDownLatch4.countDown(); + } + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "5-TopicId", + topics = TEST_TOPIC5, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener5 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String LONG_SUCCESS_MSG = "success"; + + public static final String SHORT_FAIL_MSG = "fail"; + + private CountDownLatch failLatchCount = new CountDownLatch(24 + 49); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public CompletableFuture listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return CompletableFuture.supplyAsync(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + failLatchCount.await(10, TimeUnit.SECONDS); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (message.equals(SHORT_FAIL_MSG)) { + if (receivedTopic.equals(TEST_TOPIC5) && + Integer.valueOf(offset) > 25) { + failLatchCount.countDown(); + } + else { + if (failLatchCount.getCount() > 0) { + failLatchCount.countDown(); + } + } + } + container.countDownLatch5.countDown(); + } + + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "6-TopicId", + topics = TEST_TOPIC6, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener6 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String SUCCESS_PREFIX = "success"; + + public static final String FAIL_PREFIX = "fail"; + + protected CountDownLatch offset1CompletedLatch = new CountDownLatch(1); + + protected CountDownLatch offset2CompletedLatch = new CountDownLatch(1); + + protected CountDownLatch offset3RetryCompletedLatch = new CountDownLatch(3); + + protected CountDownLatch offset4ReceivedLatch = new CountDownLatch(1); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public CompletableFuture listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return CompletableFuture.supplyAsync(() -> { + try { + if (message.startsWith(FAIL_PREFIX)) { + if (offset.equals("0")) { + if (receivedTopic.equals(TEST_TOPIC6)) { + offset1CompletedLatch.await(10, TimeUnit.SECONDS); + } + } + + if (offset.equals("3")) { + offset3RetryCompletedLatch.countDown(); + } + + throw new RuntimeException("Woooops... in topic " + receivedTopic + "msg : " + message); + } + else { + if (offset.equals("1")) { + offset3RetryCompletedLatch.await(10, TimeUnit.SECONDS); + offset1CompletedLatch.countDown(); + } + + if (offset.equals("2")) { + offset4ReceivedLatch.await(10, TimeUnit.SECONDS); + offset2CompletedLatch.countDown(); + } + + if (offset.equals("4")) { + offset4ReceivedLatch.countDown(); + offset2CompletedLatch.await(10, TimeUnit.SECONDS); + } + } + } + catch (InterruptedException ex) { + latchWaitFailCount += 1; + throw new RuntimeException(ex); + } + finally { + container.countDownLatch6.countDown(); + } + + return "Task Completed"; + }); + } + } + + static class CountDownLatchContainer { + + static int COUNT0 = 9; + + static int DLT_COUNT0 = 3; + + CountDownLatch countDownLatch0 = new CountDownLatch(COUNT0); + + CountDownLatch dltCountdownLatch0 = new CountDownLatch(DLT_COUNT0); + + static int COUNT1 = 6; + + static int DLT_COUNT1 = 1; + + CountDownLatch countDownLatch1 = new CountDownLatch(COUNT1); + + CountDownLatch dltCountdownLatch1 = new CountDownLatch(DLT_COUNT1); + + static int COUNT2 = 6; + + static int DLT_COUNT2 = 1; + + CountDownLatch countDownLatch2 = new CountDownLatch(COUNT2); + + CountDownLatch dltCountdownLatch2 = new CountDownLatch(DLT_COUNT2); + + static int COUNT3 = 13; + + static int DLT_COUNT3 = 2; + + CountDownLatch countDownLatch3 = new CountDownLatch(COUNT3); + + CountDownLatch dltCountdownLatch3 = new CountDownLatch(DLT_COUNT3); + + static int COUNT4 = 12; + + static int DLT_COUNT4 = 2; + + CountDownLatch countDownLatch4 = new CountDownLatch(COUNT4); + + CountDownLatch dltCountdownLatch4 = new CountDownLatch(DLT_COUNT4); + + static int COUNT5 = 24 + 73; + + static int DLT_COUNT5 = 49; + + CountDownLatch countDownLatch5 = new CountDownLatch(COUNT5); + + CountDownLatch dltCountdownLatch5 = new CountDownLatch(DLT_COUNT5); + + static int COUNT6 = 9; + + static int DLT_COUNT6 = 2; + + CountDownLatch countDownLatch6 = new CountDownLatch(COUNT6); + + CountDownLatch dltCountdownLatch6 = new CountDownLatch(DLT_COUNT6); + + } + + static class MyCustomDltProcessor { + + final List receivedMsg = new ArrayList<>(); + + MyCustomDltProcessor(KafkaTemplate kafkaTemplate, + CountDownLatch latch) { + this.kafkaTemplate = kafkaTemplate; + this.latch = latch; + } + + private final KafkaTemplate kafkaTemplate; + + private final CountDownLatch latch; + + public void processDltMessage(String message) { + this.receivedMsg.add(message); + latch.countDown(); + } + } + + @Configuration + static class RetryTopicConfigurations extends RetryTopicConfigurationSupport { + + private static final String DLT_METHOD_NAME = "processDltMessage"; + + static RetryTopicConfiguration createRetryTopicConfiguration( + KafkaTemplate template, + String topicName, + String dltBeanName, + int maxAttempts) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackOff(50) + .maxAttempts(maxAttempts) + .concurrency(1) + .useSingleTopicForSameIntervals() + .includeTopic(topicName) + .doNotRetryOnDltFailure() + .dltHandlerMethod(dltBeanName, DLT_METHOD_NAME) + .create(template); + } + + @Bean + RetryTopicConfiguration testRetryTopic0(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC0, + "myCustomDltProcessor0", + 3); + } + + @Bean + RetryTopicConfiguration testRetryTopic1(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC1, + "myCustomDltProcessor1", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic2(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC2, + "myCustomDltProcessor2", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic3(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC3, + "myCustomDltProcessor3", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic4(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC4, + "myCustomDltProcessor4", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic5(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC5, + "myCustomDltProcessor5", + 3); + } + + @Bean + RetryTopicConfiguration testRetryTopic6(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC6, + "myCustomDltProcessor6", + 3); + } + + @Bean + KafkaListenerErrorHandler myCustomErrorHandler( + CountDownLatchContainer container) { + return (message, exception) -> { + throw exception; + }; + } + + @Bean + SmartMessageConverter myCustomMessageConverter( + CountDownLatchContainer container) { + return new CompositeMessageConverter(Collections.singletonList(new GenericMessageConverter())) { + + @Override + public Object fromMessage(Message message, Class targetClass, Object conversionHint) { + return super.fromMessage(message, targetClass, conversionHint); + } + }; + } + + @Bean + CountDownLatchContainer latchContainer() { + return new CountDownLatchContainer(); + } + + @Bean + TestTopicListener0 testTopicListener0() { + return new TestTopicListener0(); + } + + @Bean + TestTopicListener1 testTopicListener1() { + return new TestTopicListener1(); + } + + @Bean + TestTopicListener2 testTopicListener2() { + return new TestTopicListener2(); + } + + @Bean + TestTopicListener3 testTopicListener3() { + return new TestTopicListener3(); + } + + @Bean + TestTopicListener4 testTopicListener4() { + return new TestTopicListener4(); + } + + @Bean + TestTopicListener5 testTopicListener5() { + return new TestTopicListener5(); + } + + @Bean + TestTopicListener6 testTopicListener6() { + return new TestTopicListener6(); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor0( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch0); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor1( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch1); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor2( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch2); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor3( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch3); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor4( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch4); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor5( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch5); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor6( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch6); + } + + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map props = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + return new DefaultKafkaProducerFactory<>(props); + } + + @Bean("customKafkaTemplate") + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), + "groupId", + "false"); + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory retryTopicListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + ContainerProperties props = factory.getContainerProperties(); + props.setIdleEventInterval(100L); + props.setPollTimeout(50L); + props.setIdlePartitionEventInterval(100L); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer( + container -> container.getContainerProperties().setIdlePartitionEventInterval(100L)); + return factory; + } + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer(container -> { + if (container.getListenerId().startsWith("manual")) { + container.getContainerProperties().setAckMode(AckMode.MANUAL); + container.getContainerProperties().setAsyncAcks(true); + } + }); + return factory; + } + + @Bean + TaskScheduler sched() { + return new ThreadPoolTaskScheduler(); + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncMonoFutureRetryTopicClassLevelIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncMonoFutureRetryTopicClassLevelIntegrationTests.java new file mode 100644 index 0000000000..64ef40cded --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncMonoFutureRetryTopicClassLevelIntegrationTests.java @@ -0,0 +1,943 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.retrytopic; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.assertj.core.api.InstanceOfAssertFactories; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.DltHandler; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.annotation.PartitionOffset; +import org.springframework.kafka.annotation.RetryableTopic; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +import org.springframework.kafka.config.TopicBuilder; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaAdmin; +import org.springframework.kafka.core.KafkaAdmin.NewTopics; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.listener.ContainerProperties.AckMode; +import org.springframework.kafka.listener.KafkaListenerErrorHandler; +import org.springframework.kafka.support.Acknowledgment; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.messaging.Message; +import org.springframework.messaging.converter.CompositeMessageConverter; +import org.springframework.messaging.converter.GenericMessageConverter; +import org.springframework.messaging.converter.SmartMessageConverter; +import org.springframework.messaging.handler.annotation.Header; +import org.springframework.retry.annotation.Backoff; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; + +/** + * @author Sanghyeok An + * @since 3.3.0 + */ + +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka +@TestPropertySource(properties = {"five.attempts=5", "kafka.template=customKafkaTemplate"}) +public class AsyncMonoFutureRetryTopicClassLevelIntegrationTests { + + public final static String FIRST_TOPIC = "myRetryTopic1"; + + public final static String SECOND_TOPIC = "myRetryTopic2"; + + public final static String THIRD_TOPIC = "myRetryTopic3"; + + public final static String FOURTH_TOPIC = "myRetryTopic4"; + + public final static String TWO_LISTENERS_TOPIC = "myRetryTopic5"; + + public final static String MANUAL_TOPIC = "myRetryTopic6"; + + public final static String NOT_RETRYABLE_EXCEPTION_TOPIC = "noRetryTopic"; + + public final static String FIRST_REUSE_RETRY_TOPIC = "reuseRetry1"; + + public final static String SECOND_REUSE_RETRY_TOPIC = "reuseRetry2"; + + public final static String THIRD_REUSE_RETRY_TOPIC = "reuseRetry3"; + + private final static String MAIN_TOPIC_CONTAINER_FACTORY = "kafkaListenerContainerFactory"; + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Autowired + private CountDownLatchContainer latchContainer; + + @Autowired + DestinationTopicContainer topicContainer; + + @Test + void shouldRetryFirstTopic(@Autowired KafkaListenerEndpointRegistry registry) { + kafkaTemplate.send(FIRST_TOPIC, "Testing topic 1"); + assertThat(topicContainer.getNextDestinationTopicFor("firstTopicId", FIRST_TOPIC).getDestinationName()) + .isEqualTo("myRetryTopic1-retry"); + assertThat(awaitLatch(latchContainer.countDownLatch1)).isTrue(); + assertThat(awaitLatch(latchContainer.customDltCountdownLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.customErrorHandlerCountdownLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.customMessageConverterCountdownLatch)).isTrue(); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("first")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = (ConcurrentMessageListenerContainer) registry + .getListenerContainer(id); + if (id.equals("firstTopicId")) { + assertThat(container.getConcurrency()).isEqualTo(2); + } + else { + assertThat(container.getConcurrency()) + .describedAs("Expected %s to have concurrency", id) + .isEqualTo(1); + } + }); + } + + @Test + void shouldRetrySecondTopic() { + kafkaTemplate.send(SECOND_TOPIC, "Testing topic 2"); + assertThat(awaitLatch(latchContainer.countDownLatch2)).isTrue(); + assertThat(awaitLatch(latchContainer.customDltCountdownLatch)).isTrue(); + } + + @Test + void shouldRetryThirdTopicWithTimeout( + @Autowired KafkaAdmin admin, + @Autowired KafkaListenerEndpointRegistry registry) { + + kafkaTemplate.send(THIRD_TOPIC, "Testing topic 3"); + assertThat(awaitLatch(latchContainer.countDownLatch3)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltOne)).isTrue(); + Map topics = admin.describeTopics(THIRD_TOPIC, THIRD_TOPIC + "-dlt", FOURTH_TOPIC); + assertThat(topics.get(THIRD_TOPIC).partitions()).hasSize(2); + assertThat(topics.get(THIRD_TOPIC + "-dlt").partitions()).hasSize(3); + assertThat(topics.get(FOURTH_TOPIC).partitions()).hasSize(2); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("third")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = + (ConcurrentMessageListenerContainer) registry.getListenerContainer(id); + if (id.equals("thirdTopicId")) { + assertThat(container.getConcurrency()).isEqualTo(2); + } + else { + assertThat(container.getConcurrency()) + .describedAs("Expected %s to have concurrency", id) + .isEqualTo(1); + } + }); + } + + @Test + void shouldRetryFourthTopicWithNoDlt() { + kafkaTemplate.send(FOURTH_TOPIC, "Testing topic 4"); + assertThat(awaitLatch(latchContainer.countDownLatch4)).isTrue(); + } + + @Test + void shouldRetryFifthTopicWithTwoListenersAndManualAssignment( + @Autowired FifthTopicListener1 listener1, + @Autowired FifthTopicListener2 listener2) { + + kafkaTemplate.send(TWO_LISTENERS_TOPIC, 0, "0", "Testing topic 5 - 0"); + kafkaTemplate.send(TWO_LISTENERS_TOPIC, 1, "0", "Testing topic 5 - 1"); + assertThat(awaitLatch(latchContainer.countDownLatch51)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatch52)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltThree)).isTrue(); + assertThat(listener1.topics).containsExactly( + TWO_LISTENERS_TOPIC, + TWO_LISTENERS_TOPIC + "-listener1-0", + TWO_LISTENERS_TOPIC + "-listener1-1", + TWO_LISTENERS_TOPIC + "-listener1-2", + TWO_LISTENERS_TOPIC + "-listener1-dlt"); + assertThat(listener2.topics).containsExactly( + TWO_LISTENERS_TOPIC, + TWO_LISTENERS_TOPIC + "-listener2-0", + TWO_LISTENERS_TOPIC + "-listener2-1", + TWO_LISTENERS_TOPIC + "-listener2-2", + TWO_LISTENERS_TOPIC + "-listener2-dlt"); + } + + @Test + void shouldRetryManualTopicWithDefaultDlt( + @Autowired KafkaListenerEndpointRegistry registry, + @Autowired ConsumerFactory cf) { + + kafkaTemplate.send(MANUAL_TOPIC, "Testing topic 6"); + assertThat(awaitLatch(latchContainer.countDownLatch6)).isTrue(); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("manual")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = + (ConcurrentMessageListenerContainer) registry.getListenerContainer(id); + assertThat(container) + .extracting("commonErrorHandler") + .extracting("seekAfterError", InstanceOfAssertFactories.BOOLEAN) + .isFalse(); + }); + Consumer consumer = cf.createConsumer("manual-dlt", ""); + Set tp = + Set.of(new org.apache.kafka.common.TopicPartition(MANUAL_TOPIC + "-dlt", 0)); + consumer.assign(tp); + try { + await().untilAsserted(() -> { + OffsetAndMetadata offsetAndMetadata = consumer.committed(tp).get(tp.iterator().next()); + assertThat(offsetAndMetadata).isNotNull(); + assertThat(offsetAndMetadata.offset()).isEqualTo(1L); + }); + } + finally { + consumer.close(); + } + } + + @Test + void shouldFirstReuseRetryTopic( + @Autowired FirstReuseRetryTopicListener listener1, + @Autowired SecondReuseRetryTopicListener listener2, + @Autowired ThirdReuseRetryTopicListener listener3) { + + kafkaTemplate.send(FIRST_REUSE_RETRY_TOPIC, "Testing reuse topic 1"); + kafkaTemplate.send(SECOND_REUSE_RETRY_TOPIC, "Testing reuse topic 2"); + kafkaTemplate.send(THIRD_REUSE_RETRY_TOPIC, "Testing reuse topic 3"); + assertThat(awaitLatch(latchContainer.countDownLatchReuseOne)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchReuseTwo)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchReuseThree)).isTrue(); + assertThat(listener1.topics).containsExactly( + FIRST_REUSE_RETRY_TOPIC, + FIRST_REUSE_RETRY_TOPIC + "-retry"); + assertThat(listener2.topics).containsExactly( + SECOND_REUSE_RETRY_TOPIC, + SECOND_REUSE_RETRY_TOPIC + "-retry-30", + SECOND_REUSE_RETRY_TOPIC + "-retry-60", + SECOND_REUSE_RETRY_TOPIC + "-retry-100", + SECOND_REUSE_RETRY_TOPIC + "-retry-100"); + assertThat(listener3.topics).containsExactly( + THIRD_REUSE_RETRY_TOPIC, + THIRD_REUSE_RETRY_TOPIC + "-retry", + THIRD_REUSE_RETRY_TOPIC + "-retry", + THIRD_REUSE_RETRY_TOPIC + "-retry", + THIRD_REUSE_RETRY_TOPIC + "-retry"); + } + + @Test + void shouldGoStraightToDlt() { + kafkaTemplate.send(NOT_RETRYABLE_EXCEPTION_TOPIC, "Testing topic with annotation 1"); + assertThat(awaitLatch(latchContainer.countDownLatchNoRetry)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltTwo)).isTrue(); + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(60, TimeUnit.SECONDS); + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + @KafkaListener( + id = "firstTopicId", + topics = FIRST_TOPIC, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class FirstTopicListener { + + @Autowired + DestinationTopicContainer topicContainer; + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + return Mono.fromCallable(() -> { + try { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + finally { + container.countDownLatch1.countDown(); + } + }).then(); + } + + } + + @KafkaListener(topics = SECOND_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class SecondTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listenAgain( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + return Mono.fromCallable(() -> { + try { + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch2); + } + }).then(); + } + + } + + @RetryableTopic( + attempts = "${five.attempts}", + backoff = @Backoff(delay = 250, maxDelay = 1000, multiplier = 1.5), + numPartitions = "#{3}", + timeout = "${missing.property:100000}", + include = MyRetryException.class, kafkaTemplate = "${kafka.template}", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + concurrency = "1") + @KafkaListener( + id = "thirdTopicId", + topics = THIRD_TOPIC, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + concurrency = "2") + static class ThirdTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listenWithAnnotation( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + return Mono.fromCallable(() -> { + try { + throw new MyRetryException("Annotated woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch3); + } + }).then(); + } + + @DltHandler + public void annotatedDltMethod(Object message) { + container.countDownLatchDltOne.countDown(); + } + + } + + @RetryableTopic(dltStrategy = DltStrategy.NO_DLT, attempts = "4", backoff = @Backoff(300), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener(topics = FOURTH_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FourthTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listenNoDlt( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + return Mono.fromCallable(() -> { + try { + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch4); + } + }).then(); + } + + @DltHandler + public void shouldNotGetHere() { + fail("Dlt should not be processed!"); + } + + } + + static class AbstractFifthTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @DltHandler + public void annotatedDltMethod(ConsumerRecord record) { + this.topics.add(record.topic()); + container.countDownLatchDltThree.countDown(); + } + + } + + @RetryableTopic( + attempts = "4", + backoff = @Backoff(250), + numPartitions = "2", + retryTopicSuffix = "-listener1", + dltTopicSuffix = "-listener1-dlt", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener( + id = "fifthTopicId1", + topicPartitions = {@org.springframework.kafka.annotation.TopicPartition(topic = TWO_LISTENERS_TOPIC, + partitionOffsets = @PartitionOffset(partition = "0", initialOffset = "0"))}, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FifthTopicListener1 extends AbstractFifthTopicListener { + + @KafkaHandler + public Mono listenWithAnnotation(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + return Mono.fromCallable(() -> { + try { + throw new RuntimeException("Annotated woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch51); + } + }).then(); + } + + } + + @RetryableTopic( + attempts = "4", + backoff = @Backoff(250), + numPartitions = "2", + retryTopicSuffix = "-listener2", + dltTopicSuffix = "-listener2-dlt", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener( + id = "fifthTopicId2", + topicPartitions = {@org.springframework.kafka.annotation.TopicPartition(topic = TWO_LISTENERS_TOPIC, + partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "0"))}, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FifthTopicListener2 extends AbstractFifthTopicListener { + + @KafkaHandler + public Mono listenWithAnnotation2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + return Mono.fromCallable(() -> { + try { + throw new RuntimeException("Annotated woooops... " + receivedTopic); + } + finally { + container.countDownLatch52.countDown(); + } + }).then(); + } + + } + + @RetryableTopic( + attempts = "4", + backoff = @Backoff(50), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) + @KafkaListener( + id = "manual", + topics = MANUAL_TOPIC, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class SixthTopicDefaultDLTListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listenNoDlt( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @SuppressWarnings("unused") Acknowledgment ack) { + return Mono.error(new IllegalStateException("Another woooops... " + receivedTopic)) + .doAfterTerminate(() -> container.countDownIfNotKnown(receivedTopic, container.countDownLatch6)) + .then(); + } + + } + + @RetryableTopic( + attempts = "3", + numPartitions = "3", + exclude = MyDontRetryException.class, + backoff = @Backoff(delay = 50, maxDelay = 100, multiplier = 3), + traversingCauses = "true", + kafkaTemplate = "${kafka.template}") + @KafkaListener(topics = NOT_RETRYABLE_EXCEPTION_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class NoRetryTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listenWithAnnotation2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + return Mono.fromCallable(() -> { + try { + throw new MyDontRetryException("Annotated second woooops... " + receivedTopic); + } + finally { + container.countDownIfNotKnown(receivedTopic, container.countDownLatchNoRetry); + } + }).then(); + } + + @DltHandler + public void annotatedDltMethod(Object message) { + container.countDownLatchDltTwo.countDown(); + } + + } + + @RetryableTopic( + attempts = "2", + backoff = @Backoff(50)) + @KafkaListener( + id = "reuseRetry1", + topics = FIRST_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class FirstReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listen1(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + return Mono.fromCallable(() -> { + try { + throw new RuntimeException("Another woooops... " + receivedTopic); + } + finally { + container.countDownLatchReuseOne.countDown(); + } + }).then(); + } + + } + + @RetryableTopic( + attempts = "5", + backoff = @Backoff(delay = 30, maxDelay = 100, multiplier = 2)) + @KafkaListener( + id = "reuseRetry2", + topics = SECOND_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class SecondReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listen2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + return Mono.fromCallable(() -> { + try { + throw new RuntimeException("Another woooops... " + receivedTopic); + } + finally { + container.countDownLatchReuseTwo.countDown(); + } + }).then(); + } + + } + + @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 1, maxDelay = 5, multiplier = 1.4)) + @KafkaListener(id = "reuseRetry3", topics = THIRD_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class ThirdReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public Mono listen3(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + return Mono.fromCallable(() -> { + try { + throw new RuntimeException("Another woooops... " + receivedTopic); + } + finally { + container.countDownLatchReuseThree.countDown(); + } + }).then(); + } + + } + + static class CountDownLatchContainer { + + CountDownLatch countDownLatch1 = new CountDownLatch(5); + + CountDownLatch countDownLatch2 = new CountDownLatch(3); + + CountDownLatch countDownLatch3 = new CountDownLatch(3); + + CountDownLatch countDownLatch4 = new CountDownLatch(4); + + CountDownLatch countDownLatch51 = new CountDownLatch(4); + + CountDownLatch countDownLatch52 = new CountDownLatch(4); + + CountDownLatch countDownLatch6 = new CountDownLatch(4); + + CountDownLatch countDownLatchNoRetry = new CountDownLatch(1); + + CountDownLatch countDownLatchDltOne = new CountDownLatch(1); + + CountDownLatch countDownLatchDltTwo = new CountDownLatch(1); + + CountDownLatch countDownLatchDltThree = new CountDownLatch(2); + + CountDownLatch countDownLatchReuseOne = new CountDownLatch(2); + + CountDownLatch countDownLatchReuseTwo = new CountDownLatch(5); + + CountDownLatch countDownLatchReuseThree = new CountDownLatch(5); + + CountDownLatch customDltCountdownLatch = new CountDownLatch(1); + + CountDownLatch customErrorHandlerCountdownLatch = new CountDownLatch(6); + + CountDownLatch customMessageConverterCountdownLatch = new CountDownLatch(6); + + final List knownTopics = new ArrayList<>(); + + private void countDownIfNotKnown(String receivedTopic, CountDownLatch countDownLatch) { + synchronized (knownTopics) { + if (!knownTopics.contains(receivedTopic)) { + knownTopics.add(receivedTopic); + countDownLatch.countDown(); + } + } + } + + } + + static class MyCustomDltProcessor { + + @Autowired + KafkaTemplate kafkaTemplate; + + @Autowired + CountDownLatchContainer container; + + public void processDltMessage(Object message) { + try { + throw new RuntimeException("Dlt Error!"); + } + finally { + container.customDltCountdownLatch.countDown(); + } + } + + } + + @SuppressWarnings("serial") + static class MyRetryException extends RuntimeException { + + MyRetryException(String msg) { + super(msg); + } + + } + + @SuppressWarnings("serial") + static class MyDontRetryException extends RuntimeException { + + MyDontRetryException(String msg) { + super(msg); + } + + } + + @Configuration + static class RetryTopicConfigurations extends RetryTopicConfigurationSupport { + + private static final String DLT_METHOD_NAME = "processDltMessage"; + + @Bean + RetryTopicConfiguration firstRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackOff(50) + .maxAttempts(5) + .concurrency(1) + .useSingleTopicForSameIntervals() + .includeTopic(FIRST_TOPIC) + .doNotRetryOnDltFailure() + .dltHandlerMethod("myCustomDltProcessor", DLT_METHOD_NAME) + .create(template); + } + + @Bean + RetryTopicConfiguration secondRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .exponentialBackoff(500, 2, 10000) + .retryOn(Arrays.asList(IllegalStateException.class, IllegalAccessException.class)) + .traversingCauses() + .includeTopic(SECOND_TOPIC) + .doNotRetryOnDltFailure() + .dltHandlerMethod("myCustomDltProcessor", DLT_METHOD_NAME) + .create(template); + } + + @Bean + FirstTopicListener firstTopicListener() { + return new FirstTopicListener(); + } + + @Bean + KafkaListenerErrorHandler myCustomErrorHandler( + CountDownLatchContainer container) { + return (message, exception) -> { + try { + throw exception; + } + finally { + container.customErrorHandlerCountdownLatch.countDown(); + } + }; + } + + @Bean + SmartMessageConverter myCustomMessageConverter( + CountDownLatchContainer container) { + return new CompositeMessageConverter(Collections.singletonList(new GenericMessageConverter())) { + + @Override + public Object fromMessage(Message message, Class targetClass, Object conversionHint) { + container.customMessageConverterCountdownLatch.countDown(); + return super.fromMessage(message, targetClass, conversionHint); + } + }; + } + + @Bean + SecondTopicListener secondTopicListener() { + return new SecondTopicListener(); + } + + @Bean + ThirdTopicListener thirdTopicListener() { + return new ThirdTopicListener(); + } + + @Bean + FourthTopicListener fourthTopicListener() { + return new FourthTopicListener(); + } + + @Bean + FifthTopicListener1 fifthTopicListener1() { + return new FifthTopicListener1(); + } + + @Bean + FifthTopicListener2 fifthTopicListener2() { + return new FifthTopicListener2(); + } + + @Bean + SixthTopicDefaultDLTListener manualTopicListener() { + return new SixthTopicDefaultDLTListener(); + } + + @Bean + NoRetryTopicListener noRetryTopicListener() { + return new NoRetryTopicListener(); + } + + @Bean + FirstReuseRetryTopicListener firstReuseRetryTopicListener() { + return new FirstReuseRetryTopicListener(); + } + + @Bean + SecondReuseRetryTopicListener secondReuseRetryTopicListener() { + return new SecondReuseRetryTopicListener(); + } + + @Bean + ThirdReuseRetryTopicListener thirdReuseRetryTopicListener() { + return new ThirdReuseRetryTopicListener(); + } + + @Bean + CountDownLatchContainer latchContainer() { + return new CountDownLatchContainer(); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor() { + return new MyCustomDltProcessor(); + } + + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map props = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + return new DefaultKafkaProducerFactory<>(props); + } + + @Bean("customKafkaTemplate") + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + KafkaAdmin kafkaAdmin() { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, this.broker.getBrokersAsString()); + return new KafkaAdmin(configs); + } + + @Bean + NewTopic topic() { + return TopicBuilder.name(THIRD_TOPIC).partitions(2).replicas(1).build(); + } + + @Bean + NewTopics topics() { + return new NewTopics(TopicBuilder.name(FOURTH_TOPIC).partitions(2).replicas(1).build()); + } + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), + "groupId", + "false"); + props.put( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory retryTopicListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + ContainerProperties props = factory.getContainerProperties(); + props.setIdleEventInterval(100L); + props.setPollTimeout(50L); + props.setIdlePartitionEventInterval(100L); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer( + container -> container.getContainerProperties().setIdlePartitionEventInterval(100L)); + return factory; + } + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer(container -> { + if (container.getListenerId().startsWith("manual")) { + container.getContainerProperties().setAckMode(AckMode.MANUAL); + container.getContainerProperties().setAsyncAcks(true); + } + }); + return factory; + } + + @Bean + TaskScheduler sched() { + return new ThreadPoolTaskScheduler(); + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncMonoRetryTopicScenarioTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncMonoRetryTopicScenarioTests.java new file mode 100644 index 0000000000..d3c80bf2cd --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/AsyncMonoRetryTopicScenarioTests.java @@ -0,0 +1,1375 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.retrytopic; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringSerializer; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.listener.ContainerProperties.AckMode; +import org.springframework.kafka.listener.KafkaListenerErrorHandler; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.messaging.Message; +import org.springframework.messaging.converter.CompositeMessageConverter; +import org.springframework.messaging.converter.GenericMessageConverter; +import org.springframework.messaging.converter.SmartMessageConverter; +import org.springframework.messaging.handler.annotation.Header; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +/** + * @author Sanghyeok An + * @since 3.3.0 + */ + +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka +@TestPropertySource(properties = { "five.attempts=5", "kafka.template=customKafkaTemplate"}) +public class AsyncMonoRetryTopicScenarioTests { + + private final static String MAIN_TOPIC_CONTAINER_FACTORY = "kafkaListenerContainerFactory"; + + public final static String TEST_TOPIC0 = "myRetryTopic0"; + + public final static String TEST_TOPIC1 = "myRetryTopic1"; + + public final static String TEST_TOPIC2 = "myRetryTopic2"; + + public final static String TEST_TOPIC3 = "myRetryTopic3"; + + public final static String TEST_TOPIC4 = "myRetryTopic4"; + + public final static String TEST_TOPIC5 = "myRetryTopic5"; + + public final static String TEST_TOPIC6 = "myRetryTopic6"; + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Autowired + private CountDownLatchContainer latchContainer; + + @Autowired + DestinationTopicContainer topicContainer; + + @Test + void allFailCaseTest( + @Autowired TestTopicListener0 testTopicListener, + @Autowired MyCustomDltProcessor myCustomDltProcessor0) { + // All Fail case. + String shortFailedMsg1 = "0"; + String shortFailedMsg2 = "1"; + String shortFailedMsg3 = "2"; + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("0-topicId", TEST_TOPIC0); + + String expectedRetryTopic = TEST_TOPIC0 + "-retry"; + String[] expectedReceivedMsgs = { + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3, + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3, + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3, + }; + String[] expectedReceivedTopics = { + TEST_TOPIC0, + TEST_TOPIC0, + TEST_TOPIC0, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + }; + String[] expectedDltMsgs = { + shortFailedMsg1, + shortFailedMsg2, + shortFailedMsg3 + }; + + // When + kafkaTemplate.send(TEST_TOPIC0, shortFailedMsg1); + kafkaTemplate.send(TEST_TOPIC0, shortFailedMsg2); + kafkaTemplate.send(TEST_TOPIC0, shortFailedMsg3); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch0)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch0)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(TEST_TOPIC0 + "-retry"); + + assertThat(testTopicListener.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + + assertThat(myCustomDltProcessor0.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void firstShortFailAndLastLongSuccessRetryTest( + @Autowired TestTopicListener1 testTopicListener1, + @Autowired MyCustomDltProcessor myCustomDltProcessor1) { + // Scenario. + // 1. Short Fail msg (offset 0) + // 2. Long success msg (offset 1) -> -ing (latch wait) + // 3. Short fail msg (Retry1 offset 0) -> (latch down) + // 4. Long success msg (offset 1) -> Success! + // 5. Short fail msg (Retry2 offset 0) + // 6. Short fail msg (Retry3 offset 0) + // 7. Short fail msg (Retry4 offset 0) + + // Given + String longSuccessMsg = testTopicListener1.LONG_SUCCESS_MSG; + String shortFailedMsg = testTopicListener1.SHORT_FAIL_MSG; + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("1-topicId", TEST_TOPIC1); + + String expectedRetryTopic = TEST_TOPIC1 + "-retry"; + String[] expectedReceivedMsgs = { + shortFailedMsg, + longSuccessMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC1, + TEST_TOPIC1, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + shortFailedMsg + }; + + // When + kafkaTemplate.send(TEST_TOPIC1, shortFailedMsg); + kafkaTemplate.send(TEST_TOPIC1, longSuccessMsg); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch1)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch1)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(testTopicListener1.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener1.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(testTopicListener1.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor1.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void firstLongSuccessAndLastShortFailed( + @Autowired TestTopicListener2 testTopicListener2, + @Autowired MyCustomDltProcessor myCustomDltProcessor2) { + // Scenario. + // 1. Long success msg (offset 0) -> going on... (latch await) + // 2. Short fail msg (offset 1) -> done. + // 3. Short fail msg (Retry1 offset 1) -> done (latch down) + // 4. Long success msg (offset 0) -> succeed. + // 5. Short fail msg (Retry2 offset 1) + // 6. Short fail msg (Retry3 offset 1) + // 7. Short fail msg (Retry4 offset 1) + // 8. Short fail msg (dlt offset 1) + + // Given + String shortFailedMsg = testTopicListener2.SHORT_FAIL_MSG; + String longSuccessMsg = testTopicListener2.LONG_SUCCESS_MSG; + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("2-topicId", TEST_TOPIC2); + + String expectedRetryTopic = TEST_TOPIC2 + "-retry"; + String[] expectedReceivedMsgs = { + longSuccessMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg, + shortFailedMsg + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC2, + TEST_TOPIC2, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + shortFailedMsg + }; + + // When + kafkaTemplate.send(TEST_TOPIC2, longSuccessMsg); + kafkaTemplate.send(TEST_TOPIC2, shortFailedMsg); + + // Then + + assertThat(awaitLatch(latchContainer.countDownLatch2)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch2)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(testTopicListener2.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener2.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(testTopicListener2.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor2.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void longFailMsgTwiceThenShortSuccessMsgThird( + @Autowired TestTopicListener3 testTopicListener3, + @Autowired MyCustomDltProcessor myCustomDltProcessor3) { + // Scenario + // 1. Long fail msg arrived (offset 0) -> -ing (wait latch offset 4) + // 2. Long fail msg arrived (offset 1) -> -ing (wait latch offset 1) + // 3. Short success msg arrived (offset 2) -> done + // 4. Short success msg arrived (offset 3) -> done + // 5. Short success msg arrived (offset 4) -> done (latch offset 4 count down) + // 6. Long fail msg throws error (offset 0) -> done + // 7. Long fail msg throws error (offset 1) -> done + // 8. Long fail msg (retry 1 with offset 0) -> done + // 9. Long fail msg (retry 1 with offset 1) -> done + // 10. Long fail msg (retry 2 with offset 0) -> done + // 11. Long fail msg (retry 2 with offset 1) -> done + // 12. Long fail msg (retry 3 with offset 0) -> done + // 13. Long fail msg (retry 3 with offset 1) -> done + // 14. Long fail msg (retry 4 with offset 0) -> done + // 15. Long fail msg (retry 4 with offset 1) -> done + + // Given + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("3-topicId", TEST_TOPIC3); + + String firstMsg = TestTopicListener3.FAIL_PREFIX + "0"; + String secondMsg = TestTopicListener3.FAIL_PREFIX + "1"; + String thirdMsg = TestTopicListener3.SUCCESS_PREFIX + "2"; + String fourthMsg = TestTopicListener3.SUCCESS_PREFIX + "3"; + String fifthMsg = TestTopicListener3.SUCCESS_PREFIX + "4"; + + String expectedRetryTopic = TEST_TOPIC3 + "-retry"; + + String[] expectedReceivedMsgs = { + firstMsg, + secondMsg, + thirdMsg, + fourthMsg, + fifthMsg, + firstMsg, + secondMsg, + firstMsg, + secondMsg, + firstMsg, + secondMsg, + firstMsg, + secondMsg, + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC3, + TEST_TOPIC3, + TEST_TOPIC3, + TEST_TOPIC3, + TEST_TOPIC3, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + firstMsg, + secondMsg, + }; + + // When + kafkaTemplate.send(TEST_TOPIC3, firstMsg); + kafkaTemplate.send(TEST_TOPIC3, secondMsg); + kafkaTemplate.send(TEST_TOPIC3, thirdMsg); + kafkaTemplate.send(TEST_TOPIC3, fourthMsg); + kafkaTemplate.send(TEST_TOPIC3, fifthMsg); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch3)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch3)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(testTopicListener3.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(testTopicListener3.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(testTopicListener3.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor3.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void longSuccessMsgTwiceThenShortFailMsgTwice( + @Autowired TestTopicListener4 topicListener4, + @Autowired MyCustomDltProcessor myCustomDltProcessor4) { + // Scenario + // 1. Msg arrived (offset 0) -> -ing + // 2. Msg arrived (offset 1) -> -ing + // 3. Msg arrived (offset 2) throws error -> done + // 4. Msg arrived (offset 3) throws error -> done + // 5. Msg arrived (offset 0) succeed -> done + // 6. Msg arrived (offset 1) succeed -> done + // 7. Msg arrived (retry 1, offset 2) -> done + // 8. Msg arrived (retry 1, offset 3) -> done + // 9. Msg arrived (retry 2, offset 2) -> done + // 10. Msg arrived (retry 2, offset 3) -> done + // 11. Msg arrived (retry 3, offset 2) -> done + // 12. Msg arrived (retry 3, offset 3) -> done + // 13. Msg arrived (retry 4, offset 2) -> done + // 14. Msg arrived (retry 4, offset 3) -> done + + // Given + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("4-TopicId", TEST_TOPIC4); + + String expectedRetryTopic = TEST_TOPIC4 + "-retry"; + String[] expectedReceivedMsgs = { + TestTopicListener4.LONG_SUCCESS_MSG, + TestTopicListener4.LONG_SUCCESS_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC4, + TEST_TOPIC4, + TEST_TOPIC4, + TEST_TOPIC4, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + }; + + String[] expectedDltMsgs = { + TestTopicListener4.SHORT_FAIL_MSG, + TestTopicListener4.SHORT_FAIL_MSG, + }; + + // When + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.LONG_SUCCESS_MSG); + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.LONG_SUCCESS_MSG); + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.SHORT_FAIL_MSG); + kafkaTemplate.send(TEST_TOPIC4, TestTopicListener4.SHORT_FAIL_MSG); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch4)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch4)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(topicListener4.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(topicListener4.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(topicListener4.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor4.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void oneLongSuccessMsgBetween49ShortFailMsg( + @Autowired TestTopicListener5 topicListener5, + @Autowired MyCustomDltProcessor myCustomDltProcessor5) { + // Scenario. + // 1. msgs received (offsets 0 ~ 24) -> failed. + // 2. msgs received (offset 25) -> -ing + // 3. msgs received (offset 26 ~ 49) -> failed. + // 4. msgs succeed (offset 50) -> done + // 5. msgs received (Retry1 offset 0 ~ 49 except 25) -> failed. + // 6. msgs received (Retry2 offset 0 ~ 49 except 25) -> failed. + // 7. msgs received (Retry3 offset 0 ~ 49 except 25) -> failed. + // 8. msgs received (Retry4 offset 0 ~ 49 except 25) -> failed. + + // Given + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("5-TopicId", TEST_TOPIC5); + + String expectedRetryTopic = TEST_TOPIC5 + "-retry"; + + String[] expectedReceivedMsgs = new String[148]; + for (int i = 0; i < 147; i++) { + expectedReceivedMsgs[i] = TestTopicListener5.SHORT_FAIL_MSG; + } + expectedReceivedMsgs[147] = TestTopicListener5.LONG_SUCCESS_MSG; + + + String[] expectedReceivedTopics = new String[148]; + for (int i = 0; i < 49; i++) { + expectedReceivedTopics[i] = TEST_TOPIC5; + } + for (int i = 49; i < 147; i++) { + expectedReceivedTopics[i] = expectedRetryTopic; + } + expectedReceivedTopics[147] = TEST_TOPIC5; + + + String[] expectedDltMsgs = new String[49]; + for (int i = 0; i < 49; i++) { + expectedDltMsgs[i] = TestTopicListener5.SHORT_FAIL_MSG; + } + + // When + for (int i = 0; i < 50; i++) { + if (i != 25) { + kafkaTemplate.send(TEST_TOPIC5, TestTopicListener5.SHORT_FAIL_MSG); + } + else { + kafkaTemplate.send(TEST_TOPIC5, TestTopicListener5.LONG_SUCCESS_MSG); + } + } + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch5)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch5)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(topicListener5.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(topicListener5.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + + assertThat(myCustomDltProcessor5.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + @Test + void moreComplexAsyncScenarioTest( + @Autowired TestTopicListener6 topicListener6, + @Autowired @Qualifier("myCustomDltProcessor6") + MyCustomDltProcessor myCustomDltProcessor6) { + // Scenario. + // 1. Fail Msg (offset 0) -> -ing + // 2. Success Msg (offset 1) -> -ing + // 3. Success Msg (offset 2) -> -ing + // 4. Fail Msg (offset 3) -> done + // 5. Success Msg (offset 4) -> -ing + // 6. Success msg succeed (offset 2) - done + // 7. Success msg succeed (offset 4) -> done + // 8. Fail Msg (Retry1 offset 3) -> done + // 9. Fail Msg (Retry2 offset 3) -> done + // 10. Success msg succeed (offset 1) -> done + // 11. Fail Msg (offset 0) -> done + // 12. Fail Msg (Retry 1 offset 0) -> done + // 13. Fail Msg (Retry 2 offset 0) -> done + + // Given + String firstMsg = TestTopicListener6.FAIL_PREFIX + "0"; + String secondMsg = TestTopicListener6.SUCCESS_PREFIX + "1"; + String thirdMsg = TestTopicListener6.SUCCESS_PREFIX + "2"; + String fourthMsg = TestTopicListener6.FAIL_PREFIX + "3"; + String fifthMsg = TestTopicListener6.SUCCESS_PREFIX + "4"; + + DestinationTopic destinationTopic = topicContainer.getNextDestinationTopicFor("6-TopicId", TEST_TOPIC6); + String expectedRetryTopic = TEST_TOPIC6 + "-retry"; + + String[] expectedReceivedMsgs = { + firstMsg, + secondMsg, + thirdMsg, + fourthMsg, + fifthMsg, + fourthMsg, + fourthMsg, + firstMsg, + firstMsg + }; + + String[] expectedReceivedTopics = { + TEST_TOPIC6, + TEST_TOPIC6, + TEST_TOPIC6, + TEST_TOPIC6, + TEST_TOPIC6, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic, + expectedRetryTopic + }; + + String[] expectedDltMsgs = { + TestTopicListener6.FAIL_PREFIX + "3", + TestTopicListener6.FAIL_PREFIX + "0" + }; + + // When + kafkaTemplate.send(TEST_TOPIC6, firstMsg); + kafkaTemplate.send(TEST_TOPIC6, secondMsg); + kafkaTemplate.send(TEST_TOPIC6, thirdMsg); + kafkaTemplate.send(TEST_TOPIC6, fourthMsg); + kafkaTemplate.send(TEST_TOPIC6, fifthMsg); + + // Then + assertThat(awaitLatch(latchContainer.countDownLatch6)).isTrue(); + assertThat(awaitLatch(latchContainer.dltCountdownLatch6)).isTrue(); + + assertThat(destinationTopic.getDestinationName()).isEqualTo(expectedRetryTopic); + assertThat(topicListener6.receivedMsgs).containsExactlyInAnyOrder(expectedReceivedMsgs); + assertThat(topicListener6.receivedTopics).containsExactlyInAnyOrder(expectedReceivedTopics); + assertThat(topicListener6.latchWaitFailCount).isEqualTo(0); + + assertThat(myCustomDltProcessor6.receivedMsg).containsExactlyInAnyOrder(expectedDltMsgs); + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(60, TimeUnit.SECONDS); + } + catch (Exception e) { + fail(e.getMessage()); + throw new RuntimeException(e); + } + + } + + @KafkaListener( + id = "0-topicId", + topics = TEST_TOPIC0, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener0 { + + @Autowired + CountDownLatchContainer container; + + private final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + @KafkaHandler + public Mono listen(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + return Mono.fromCallable(() -> { + try { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + finally { + container.countDownLatch0.countDown(); + } + }); + } + + } + + @KafkaListener( + id = "1-topicId", + topics = TEST_TOPIC1, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener1 { + + @Autowired + CountDownLatchContainer container; + + private final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + private CountDownLatch firstRetryFailMsgLatch = new CountDownLatch(1); + + protected final String LONG_SUCCESS_MSG = "success"; + + protected final String SHORT_FAIL_MSG = "fail"; + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public Mono listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedTopics.add(receivedTopic); + this.receivedMsgs.add(message); + return Mono.fromCallable(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + firstRetryFailMsgLatch.await(10, TimeUnit.SECONDS); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (receivedTopic.equals(TEST_TOPIC1 + "-retry") && + offset.equals("0")) { + firstRetryFailMsgLatch.countDown(); + } + container.countDownLatch1.countDown(); + } + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "2-topicId", + topics = TEST_TOPIC2, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener2 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + private CountDownLatch firstRetryFailMsgLatch = new CountDownLatch(1); + + protected final String LONG_SUCCESS_MSG = "success"; + + protected final String SHORT_FAIL_MSG = "fail"; + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public Mono listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return Mono.fromCallable(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + firstRetryFailMsgLatch.await(10, TimeUnit.SECONDS); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (receivedTopic.equals(TEST_TOPIC2 + "-retry") && + offset.equals("1")) { + firstRetryFailMsgLatch.countDown(); + } + container.countDownLatch2.countDown(); + } + + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "3-topicId", + topics = TEST_TOPIC3, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener3 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String FAIL_PREFIX = "fail"; + + public static final String SUCCESS_PREFIX = "success"; + + private CountDownLatch successLatchCount = new CountDownLatch(3); + + private CountDownLatch offset0Latch = new CountDownLatch(1); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public Mono listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC)String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return Mono.fromCallable(() -> { + try { + if (message.startsWith(FAIL_PREFIX)) { + if (receivedTopic.equals(TEST_TOPIC3)) { + if (offset.equals("0")) { + successLatchCount.await(10, TimeUnit.SECONDS); + offset0Latch.countDown(); + } + if (offset.equals("1")) { + offset0Latch.await(10, TimeUnit.SECONDS); + } + } + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + successLatchCount.countDown(); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + container.countDownLatch3.countDown(); + } + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "4-TopicId", + topics = TEST_TOPIC4, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener4 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String LONG_SUCCESS_MSG = "success"; + + public static final String SHORT_FAIL_MSG = "fail"; + + private CountDownLatch failLatchCount = new CountDownLatch(2); + + private CountDownLatch offset0Latch = new CountDownLatch(1); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public Mono listen(String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return Mono.fromCallable(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + failLatchCount.await(10, TimeUnit.SECONDS); + if (offset.equals("1")) { + offset0Latch.await(10, TimeUnit.SECONDS); + } + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (message.equals(SHORT_FAIL_MSG) || + receivedTopic.equals(TEST_TOPIC4)) { + failLatchCount.countDown(); + } + if (offset.equals("0") && + receivedTopic.equals(TEST_TOPIC4)) { + offset0Latch.countDown(); + } + container.countDownLatch4.countDown(); + } + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "5-TopicId", + topics = TEST_TOPIC5, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener5 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String LONG_SUCCESS_MSG = "success"; + + public static final String SHORT_FAIL_MSG = "fail"; + + private CountDownLatch failLatchCount = new CountDownLatch(24 + 49); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public Mono listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return Mono.fromCallable(() -> { + try { + if (message.equals(SHORT_FAIL_MSG)) { + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + else { + failLatchCount.await(10, TimeUnit.SECONDS); + } + } + catch (InterruptedException e) { + latchWaitFailCount += 1; + throw new RuntimeException(e); + } + finally { + if (message.equals(SHORT_FAIL_MSG)) { + if (receivedTopic.equals(TEST_TOPIC5) && + Integer.valueOf(offset) > 25) { + failLatchCount.countDown(); + } + else { + if (failLatchCount.getCount() > 0) { + failLatchCount.countDown(); + } + } + } + container.countDownLatch5.countDown(); + } + + return "Task Completed"; + }); + } + + } + + @KafkaListener( + id = "6-TopicId", + topics = TEST_TOPIC6, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", + contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class TestTopicListener6 { + + @Autowired + CountDownLatchContainer container; + + protected final List receivedMsgs = new ArrayList<>(); + + private final List receivedTopics = new ArrayList<>(); + + public static final String SUCCESS_PREFIX = "success"; + + public static final String FAIL_PREFIX = "fail"; + + protected CountDownLatch offset1CompletedLatch = new CountDownLatch(1); + + protected CountDownLatch offset2CompletedLatch = new CountDownLatch(1); + + protected CountDownLatch offset3RetryCompletedLatch = new CountDownLatch(3); + + protected CountDownLatch offset4ReceivedLatch = new CountDownLatch(1); + + protected int latchWaitFailCount = 0; + + @KafkaHandler + public Mono listen( + String message, + @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.OFFSET) String offset) { + this.receivedMsgs.add(message); + this.receivedTopics.add(receivedTopic); + + return Mono.fromCallable(() -> { + try { + if (message.startsWith(FAIL_PREFIX)) { + if (offset.equals("0")) { + if (receivedTopic.equals(TEST_TOPIC6)) { + offset1CompletedLatch.await(10, TimeUnit.SECONDS); + } + } + + if (offset.equals("3")) { + offset3RetryCompletedLatch.countDown(); + } + + throw new RuntimeException("Woooops... in topic " + receivedTopic + "msg : " + message); + } + else { + if (offset.equals("1")) { + offset3RetryCompletedLatch.await(10, TimeUnit.SECONDS); + offset1CompletedLatch.countDown(); + } + + if (offset.equals("2")) { + offset4ReceivedLatch.await(10, TimeUnit.SECONDS); + offset2CompletedLatch.countDown(); + } + + if (offset.equals("4")) { + offset4ReceivedLatch.countDown(); + offset2CompletedLatch.await(10, TimeUnit.SECONDS); + } + } + } + catch (InterruptedException ex) { + latchWaitFailCount += 1; + throw new RuntimeException(ex); + } + finally { + container.countDownLatch6.countDown(); + } + + return "Task Completed"; + }); + } + } + + static class CountDownLatchContainer { + + static int COUNT0 = 9; + + static int DLT_COUNT0 = 3; + + CountDownLatch countDownLatch0 = new CountDownLatch(COUNT0); + + CountDownLatch dltCountdownLatch0 = new CountDownLatch(DLT_COUNT0); + + static int COUNT1 = 6; + + static int DLT_COUNT1 = 1; + + CountDownLatch countDownLatch1 = new CountDownLatch(COUNT1); + + CountDownLatch dltCountdownLatch1 = new CountDownLatch(DLT_COUNT1); + + static int COUNT2 = 6; + + static int DLT_COUNT2 = 1; + + CountDownLatch countDownLatch2 = new CountDownLatch(COUNT2); + + CountDownLatch dltCountdownLatch2 = new CountDownLatch(DLT_COUNT2); + + static int COUNT3 = 13; + + static int DLT_COUNT3 = 2; + + CountDownLatch countDownLatch3 = new CountDownLatch(COUNT3); + + CountDownLatch dltCountdownLatch3 = new CountDownLatch(DLT_COUNT3); + + static int COUNT4 = 12; + + static int DLT_COUNT4 = 2; + + CountDownLatch countDownLatch4 = new CountDownLatch(COUNT4); + + CountDownLatch dltCountdownLatch4 = new CountDownLatch(DLT_COUNT4); + + static int COUNT5 = 24 + 73; + + static int DLT_COUNT5 = 49; + + CountDownLatch countDownLatch5 = new CountDownLatch(COUNT5); + + CountDownLatch dltCountdownLatch5 = new CountDownLatch(DLT_COUNT5); + + static int COUNT6 = 9; + + static int DLT_COUNT6 = 2; + + CountDownLatch countDownLatch6 = new CountDownLatch(COUNT6); + + CountDownLatch dltCountdownLatch6 = new CountDownLatch(DLT_COUNT6); + + } + + static class MyCustomDltProcessor { + + final List receivedMsg = new ArrayList<>(); + + MyCustomDltProcessor(KafkaTemplate kafkaTemplate, CountDownLatch latch) { + this.kafkaTemplate = kafkaTemplate; + this.latch = latch; + } + + private final KafkaTemplate kafkaTemplate; + + private final CountDownLatch latch; + + public void processDltMessage(String message) { + this.receivedMsg.add(message); + latch.countDown(); + } + } + + @Configuration + static class RetryTopicConfigurations extends RetryTopicConfigurationSupport { + + private static final String DLT_METHOD_NAME = "processDltMessage"; + + static RetryTopicConfiguration createRetryTopicConfiguration( + KafkaTemplate template, + String topicName, + String dltBeanName, + int maxAttempts) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackOff(50) + .maxAttempts(maxAttempts) + .concurrency(1) + .useSingleTopicForSameIntervals() + .includeTopic(topicName) + .doNotRetryOnDltFailure() + .dltHandlerMethod(dltBeanName, DLT_METHOD_NAME) + .create(template); + } + + @Bean + RetryTopicConfiguration testRetryTopic0(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC0, + "myCustomDltProcessor0", + 3); + } + + @Bean + RetryTopicConfiguration testRetryTopic1(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC1, + "myCustomDltProcessor1", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic2(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC2, + "myCustomDltProcessor2", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic3(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC3, + "myCustomDltProcessor3", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic4(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC4, + "myCustomDltProcessor4", + 5); + } + + @Bean + RetryTopicConfiguration testRetryTopic5(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC5, + "myCustomDltProcessor5", + 3); + } + + @Bean + RetryTopicConfiguration testRetryTopic6(KafkaTemplate template) { + return createRetryTopicConfiguration( + template, + TEST_TOPIC6, + "myCustomDltProcessor6", + 3); + } + + @Bean + KafkaListenerErrorHandler myCustomErrorHandler( + CountDownLatchContainer container) { + return (message, exception) -> { + throw exception; + }; + } + + @Bean + SmartMessageConverter myCustomMessageConverter( + CountDownLatchContainer container) { + return new CompositeMessageConverter(Collections.singletonList(new GenericMessageConverter())) { + + @Override + public Object fromMessage(Message message, Class targetClass, Object conversionHint) { + return super.fromMessage(message, targetClass, conversionHint); + } + }; + } + + @Bean + CountDownLatchContainer latchContainer() { + return new CountDownLatchContainer(); + } + + @Bean + TestTopicListener0 testTopicListener0() { + return new TestTopicListener0(); + } + + @Bean + TestTopicListener1 testTopicListener1() { + return new TestTopicListener1(); + } + + @Bean + TestTopicListener2 testTopicListener2() { + return new TestTopicListener2(); + } + + @Bean + TestTopicListener3 testTopicListener3() { + return new TestTopicListener3(); + } + + @Bean + TestTopicListener4 testTopicListener4() { + return new TestTopicListener4(); + } + + @Bean + TestTopicListener5 testTopicListener5() { + return new TestTopicListener5(); + } + + @Bean + TestTopicListener6 testTopicListener6() { + return new TestTopicListener6(); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor0( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch0); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor1( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch1); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor2( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch2); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor3( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch3); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor4( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch4); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor5( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch5); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor6( + KafkaTemplate kafkaTemplate, + CountDownLatchContainer latchContainer) { + return new MyCustomDltProcessor(kafkaTemplate, + latchContainer.dltCountdownLatch6); + } + + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map props = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + return new DefaultKafkaProducerFactory<>(props); + } + + @Bean("customKafkaTemplate") + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), + "groupId", + "false"); + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory retryTopicListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + ContainerProperties props = factory.getContainerProperties(); + props.setIdleEventInterval(100L); + props.setPollTimeout(50L); + props.setIdlePartitionEventInterval(100L); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer( + container -> container.getContainerProperties().setIdlePartitionEventInterval(100L)); + return factory; + } + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer(container -> { + if (container.getListenerId().startsWith("manual")) { + container.getContainerProperties().setAckMode(AckMode.MANUAL); + container.getContainerProperties().setAsyncAcks(true); + } + }); + return factory; + } + + @Bean + TaskScheduler sched() { + return new ThreadPoolTaskScheduler(); + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/BackOffValuesGeneratorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/BackOffValuesGeneratorTests.java index 2e3b7abb67..2fd2257653 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/BackOffValuesGeneratorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/BackOffValuesGeneratorTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2021 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Arrays; import java.util.List; @@ -27,6 +25,8 @@ import org.springframework.retry.backoff.ExponentialBackOffPolicy; import org.springframework.retry.backoff.NoBackOffPolicy; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Tomaz Fernandes * @since 2.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/CircularDltHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/CircularDltHandlerTests.java index 4cb6820080..6921b22991 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/CircularDltHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/CircularDltHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.mockito.Mockito.mock; - import org.junit.jupiter.api.Test; import org.springframework.context.annotation.AnnotationConfigApplicationContext; @@ -31,6 +29,8 @@ import org.springframework.scheduling.TaskScheduler; import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.8 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactoryTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactoryTests.java index e059f8e091..954b024afb 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactoryTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactoryTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,20 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; - import java.math.BigInteger; import java.nio.ByteBuffer; import java.time.Clock; @@ -64,6 +50,20 @@ import org.springframework.kafka.support.KafkaHeaders; import org.springframework.kafka.test.condition.LogLevels; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; + /** * @author Tomaz Fernandes * @author Gary Russell diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicProcessorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicProcessorTests.java index 8efea11c1a..f08fd60faf 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicProcessorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicProcessorTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,11 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.then; -import static org.mockito.Mockito.times; - import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -32,6 +27,11 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.then; +import static org.mockito.Mockito.times; + /** * @author Tomaz Fernandes * @author Gary Russell diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolverTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolverTests.java index 13e8bd16f9..cd584a3c8e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolverTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolverTests.java @@ -16,11 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.assertj.core.api.Assertions.assertThatNullPointerException; - import java.time.Clock; import java.time.Instant; import java.util.Arrays; @@ -41,6 +36,11 @@ import org.springframework.kafka.support.converter.ConversionException; import org.springframework.kafka.support.serializer.DeserializationException; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.assertj.core.api.Assertions.assertThatNullPointerException; + /** * @author Tomaz Fernandes * @author Yvette Quinby diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeliveryHeaderTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeliveryHeaderTests.java index 4d9ba1d794..0c4bac4498 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeliveryHeaderTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DeliveryHeaderTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -29,7 +27,9 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; import org.springframework.kafka.annotation.KafkaListener; import org.springframework.kafka.annotation.RetryableTopic; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; @@ -51,26 +51,42 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; + /** + * Test delivery header, since 3.2 support class level {@link RetryableTopic}. * @author Gary Russell + * @author Wang Zhiyang + * * @since 2.8.11 * */ @SpringJUnitConfig @DirtiesContext -@EmbeddedKafka(topics = "dh1") +@EmbeddedKafka(topics = { "dh1", DeliveryHeaderTests.DH_CLASS_LEVEL_1 }) public class DeliveryHeaderTests { + static final String DH_CLASS_LEVEL_1 = "dhClassLevel1"; + @Test - void deliveryAttempts(@Autowired Config config, @Autowired KafkaTemplate template) + void deliveryAttempts(@Autowired Config config, @Autowired KafkaTemplate template, + @Autowired RetryTopicClassLevel retryTopicClassLevel) throws InterruptedException { template.send("dh1", "test"); + template.send(DH_CLASS_LEVEL_1, "test"); assertThat(config.latch.await(10, TimeUnit.SECONDS)).isTrue(); + assertThat(retryTopicClassLevel.latchClassLevel.await(10, TimeUnit.SECONDS)).isTrue(); + assertThat(config.attempts.toString()) .isEqualTo("[[1, 1], [2, 1], [3, 1], [1, 2], [2, 2], [3, 2], [1, 3], [2, 3], [3, 3]]"); assertThat(config.accessorAttempts.toString()) .isEqualTo("[[1, 1], [2, 1], [3, 1], [1, 2], [2, 2], [3, 2], [1, 3], [2, 3], [3, 3]]"); + + assertThat(retryTopicClassLevel.attemptsClassLevel.toString()) + .isEqualTo("[[1, 1], [2, 1], [3, 1], [1, 2], [2, 2], [3, 2], [1, 3], [2, 3], [3, 3]]"); + assertThat(retryTopicClassLevel.accessorAttemptsClassLevel.toString()) + .isEqualTo("[[1, 1], [2, 1], [3, 1], [1, 2], [2, 2], [3, 2], [1, 3], [2, 3], [3, 3]]"); } @Configuration @@ -94,7 +110,7 @@ protected void configureBlockingRetries(BlockingRetriesConfigurer blockingRetrie @Override protected Consumer configureDeadLetterPublishingContainerFactory() { - return factory -> factory.neverLogListenerException(); + return DeadLetterPublishingRecovererFactory::neverLogListenerException; } @RetryableTopic(backoff = @Backoff(maxDelay = 0)) @@ -110,6 +126,11 @@ void listen(String in, @Header(KafkaHeaders.DELIVERY_ATTEMPT) int blockingAttemp throw new RuntimeException("test"); } + @Bean + RetryTopicClassLevel retryTopicClassLevel() { + return new RetryTopicClassLevel(); + } + @Bean KafkaTemplate template(ProducerFactory pf) { return new KafkaTemplate<>(pf); @@ -121,6 +142,7 @@ ProducerFactory pf() { } @Bean + @Primary ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( ConsumerFactory cf) { @@ -132,6 +154,7 @@ ConcurrentKafkaListenerContainerFactory kafkaListenerContainerF } @Bean + @Primary ConsumerFactory cf() { return new DefaultKafkaConsumerFactory<>( KafkaTestUtils.consumerProps("dh1", "false", this.broker)); @@ -144,4 +167,28 @@ TaskScheduler sched() { } + @RetryableTopic(backoff = @Backoff(maxDelay = 0)) + @KafkaListener(id = "dhClassLevel1", topics = DH_CLASS_LEVEL_1) + static class RetryTopicClassLevel { + + List> attemptsClassLevel = new ArrayList<>(); + + List> accessorAttemptsClassLevel = new ArrayList<>(); + + CountDownLatch latchClassLevel = new CountDownLatch(9); + + @KafkaHandler + void listen(String in, @Header(KafkaHeaders.DELIVERY_ATTEMPT) int blockingAttempts, + @Header(name = RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS, required = false) Integer nonBlockingAttempts, + KafkaMessageHeaderAccessor accessor) { + + this.attemptsClassLevel.add(List.of(blockingAttempts, nonBlockingAttempts == null ? 1 : nonBlockingAttempts)); + this.accessorAttemptsClassLevel.add(List.of(accessor.getBlockingRetryDeliveryAttempt(), + accessor.getNonBlockingRetryDeliveryAttempt())); + this.latchClassLevel.countDown(); + throw new RuntimeException("test"); + } + + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicPropertiesFactoryTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicPropertiesFactoryTests.java index a93322460c..a2d9e5122e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicPropertiesFactoryTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicPropertiesFactoryTests.java @@ -16,8 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Collections; import java.util.List; import java.util.Map; @@ -38,6 +36,8 @@ import org.springframework.retry.backoff.ExponentialBackOffPolicy; import org.springframework.retry.backoff.FixedBackOffPolicy; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Tomaz Fernandes * @author Wang Zhiyang diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicTests.java index d90786900b..e3da1a8b67 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DestinationTopicTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -303,7 +303,9 @@ private BiPredicate getShouldRetryOnDenyList() { } class PropsHolder { + final String topicName; + final DestinationTopic.Properties props; PropsHolder(String topicName, DestinationTopic.Properties props) { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DltStartupTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DltStartupTests.java index b8bfe6fb5d..e1e91a85ea 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DltStartupTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/DltStartupTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - import java.util.Map; import org.apache.kafka.clients.admin.AdminClientConfig; @@ -28,6 +25,7 @@ import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; import org.springframework.kafka.annotation.KafkaListener; import org.springframework.kafka.annotation.RetryableTopic; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; @@ -43,11 +41,16 @@ import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + /** * Verify that autoStartDltHandler overrides factory autoStartup (for both factory * settings). * * @author Gary Russell + * @author Wang Zhiyang + * * @since 2.8 * */ @@ -57,7 +60,7 @@ public class DltStartupTests { @Test void dltStartOverridesCorrect(@Autowired KafkaListenerEndpointRegistry registry) { - // using RetryTopicConfiguration + // using RetryTopicConfiguration support method level @KafkaListener // factory with autostartup = true assertThat(registry.getListenerContainer("shouldStartDlq1").isRunning()).isTrue(); assertThat(registry.getListenerContainer("shouldStartDlq1-dlt").isRunning()).isTrue(); @@ -69,7 +72,7 @@ void dltStartOverridesCorrect(@Autowired KafkaListenerEndpointRegistry registry) assertThat(registry.getListenerContainer("shouldStartDlq4").isRunning()).isFalse(); assertThat(registry.getListenerContainer("shouldStartDlq4-dlt").isRunning()).isTrue(); - // using @RetryableTopic + // using method level @RetryableTopic // factory with autostartup = true assertThat(registry.getListenerContainer("shouldStartDlq5").isRunning()).isTrue(); assertThat(registry.getListenerContainer("shouldStartDlq5-dlt").isRunning()).isTrue(); @@ -80,6 +83,30 @@ void dltStartOverridesCorrect(@Autowired KafkaListenerEndpointRegistry registry) assertThat(registry.getListenerContainer("shouldNotStartDlq7-dlt").isRunning()).isFalse(); assertThat(registry.getListenerContainer("shouldStartDlq8").isRunning()).isFalse(); assertThat(registry.getListenerContainer("shouldStartDlq8-dlt").isRunning()).isTrue(); + + // using RetryTopicConfiguration support class level @KafkaListener + // factory with autoStartup = true + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq1").isRunning()).isTrue(); + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq1-dlt").isRunning()).isTrue(); + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq2").isRunning()).isTrue(); + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq2-dlt").isRunning()).isFalse(); + // factory with autoStartup = false + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq3").isRunning()).isFalse(); + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq3-dlt").isRunning()).isFalse(); + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq4").isRunning()).isFalse(); + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq4-dlt").isRunning()).isTrue(); + + // using class level @RetryableTopic + // factory with autoStartup = true + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq5").isRunning()).isTrue(); + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq5-dlt").isRunning()).isTrue(); + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq6").isRunning()).isTrue(); + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq6-dlt").isRunning()).isFalse(); + // factory with autoStartup = false + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq7").isRunning()).isFalse(); + assertThat(registry.getListenerContainer("shouldNotStartClassLevelDlq7-dlt").isRunning()).isFalse(); + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq8").isRunning()).isFalse(); + assertThat(registry.getListenerContainer("shouldStartClassLevelDlq8-dlt").isRunning()).isTrue(); } @Configuration @@ -122,12 +149,53 @@ void shouldNotStartDlq7(String in) { void shoulStartDlq8(String in) { } + @Bean + ShouldStartClassLevelDlq1 shouldStartClassLevelDlq1() { + return new ShouldStartClassLevelDlq1(); + } + + @Bean + ShouldNotStartClassLevelDlq2 shouldNotStartClassLevelDlq2() { + return new ShouldNotStartClassLevelDlq2(); + } + + @Bean + ShouldNotStartClassLevelDlq3 shouldNotStartClassLevelDlq3() { + return new ShouldNotStartClassLevelDlq3(); + } + + @Bean + ShouldStartClassLevelDlq4 shouldStartClassLevelDlq4() { + return new ShouldStartClassLevelDlq4(); + } + + @Bean + ShouldStartClassLevelDlq5 shouldStartClassLevelDlq5() { + return new ShouldStartClassLevelDlq5(); + } + + @Bean + ShouldNotStartClassLevelDlq6 shouldNotStartClassLevelDlq6() { + return new ShouldNotStartClassLevelDlq6(); + } + + @Bean + ShouldNotStartClassLevelDlq7 shouldNotStartClassLevelDlq7() { + return new ShouldNotStartClassLevelDlq7(); + } + + @Bean + ShouldStartClassLevelDlq8 shouldStartClassLevelDlq8() { + return new ShouldStartClassLevelDlq8(); + } + @Bean RetryTopicConfiguration rtc1(KafkaOperations template) { return RetryTopicConfigurationBuilder .newInstance() .maxAttempts(1) .includeTopic("DltStartupTests.1") + .includeTopic("ClassLevel.DltStartupTests.1") .create(template); } @@ -137,6 +205,7 @@ RetryTopicConfiguration rtc2(KafkaOperations template) { .newInstance() .maxAttempts(1) .includeTopic("DltStartupTests.2") + .includeTopic("ClassLevel.DltStartupTests.2") .autoStartDltHandler(false) // override factory for DLT container .create(template); } @@ -147,6 +216,7 @@ RetryTopicConfiguration rtc3(KafkaOperations template) { .newInstance() .maxAttempts(1) .includeTopic("DltStartupTests.3") + .includeTopic("ClassLevel.DltStartupTests.3") .create(template); } @@ -156,6 +226,7 @@ RetryTopicConfiguration rtc4(KafkaOperations template) { .newInstance() .maxAttempts(1) .includeTopic("DltStartupTests.4") + .includeTopic("ClassLevel.DltStartupTests.4") .autoStartDltHandler(true) // override factory for DLT container .create(template); } @@ -200,4 +271,80 @@ TaskScheduler sched() { } + @KafkaListener(id = "shouldStartClassLevelDlq1", topics = "ClassLevel.DltStartupTests.1", containerFactory = "cf1") + static class ShouldStartClassLevelDlq1 { + + @KafkaHandler + void shouldStartClassLevelDlq1(String in) { + } + + } + + @KafkaListener(id = "shouldNotStartClassLevelDlq2", topics = "ClassLevel.DltStartupTests.2", containerFactory = "cf1") + static class ShouldNotStartClassLevelDlq2 { + + @KafkaHandler + void shouldNotStartClassLevelDlq2(String in) { + } + + } + + @KafkaListener(id = "shouldNotStartClassLevelDlq3", topics = "ClassLevel.DltStartupTests.3", containerFactory = "cf2") + static class ShouldNotStartClassLevelDlq3 { + + @KafkaHandler + void shouldNotStartClassLevelDlq3(String in) { + } + + } + + @KafkaListener(id = "shouldStartClassLevelDlq4", topics = "ClassLevel.DltStartupTests.4", containerFactory = "cf2") + static class ShouldStartClassLevelDlq4 { + + @KafkaHandler + void shouldStartClassLevelDlq4(String in) { + } + + } + + @KafkaListener(id = "shouldStartClassLevelDlq5", topics = "ClassLevel.DltStartupTests.5", containerFactory = "cf1") + @RetryableTopic(attempts = "1", kafkaTemplate = "template") + static class ShouldStartClassLevelDlq5 { + + @KafkaHandler + void shouldStartClassLevelDlq5(String in) { + } + + } + + @KafkaListener(id = "shouldNotStartClassLevelDlq6", topics = "ClassLevel.DltStartupTests.6", containerFactory = "cf1") + @RetryableTopic(attempts = "1", kafkaTemplate = "template", autoStartDltHandler = "false") + static class ShouldNotStartClassLevelDlq6 { + + @KafkaHandler + void shouldNotStartClassLevelDlq6(String in) { + } + + } + + @KafkaListener(id = "shouldNotStartClassLevelDlq7", topics = "ClassLevel.DltStartupTests.7", containerFactory = "cf2") + @RetryableTopic(attempts = "1", kafkaTemplate = "template") + static class ShouldNotStartClassLevelDlq7 { + + @KafkaHandler + void shouldNotStartClassLevelDlq7(String in) { + } + + } + + @KafkaListener(id = "shouldStartClassLevelDlq8", topics = "ClassLevel.DltStartupTests.8", containerFactory = "cf2") + @RetryableTopic(attempts = "1", kafkaTemplate = "template", autoStartDltHandler = "true") + static class ShouldStartClassLevelDlq8 { + + @KafkaHandler + void shouldStartClassLevelDlq8(String in) { + } + + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactoryTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactoryTests.java index 7a8c93039d..f4c83c9fd3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactoryTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/EndpointCustomizerFactoryTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,26 +16,34 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; - import java.lang.reflect.Method; import java.util.Arrays; import java.util.List; import java.util.function.Predicate; +import java.util.stream.Stream; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; import org.springframework.beans.factory.BeanFactory; import org.springframework.kafka.config.MethodKafkaListenerEndpoint; +import org.springframework.kafka.config.MultiMethodKafkaListenerEndpoint; import org.springframework.kafka.support.EndpointHandlerMethod; +import org.springframework.kafka.support.EndpointHandlerMultiMethod; import org.springframework.kafka.support.TopicPartitionOffset; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; + /** * @author Tomaz Fernandes + * @author Wang Zhiyang + * * @since 2.8.5 */ @ExtendWith(MockitoExtension.class) @@ -44,8 +52,9 @@ class EndpointCustomizerFactoryTests { @Mock private DestinationTopic.Properties properties; - @Mock - private EndpointHandlerMethod beanMethod; + private static final EndpointHandlerMethod beanMethod = mock(EndpointHandlerMethod.class); + + private static final EndpointHandlerMultiMethod beanMultiMethod = mock(EndpointHandlerMultiMethod.class); @Mock private BeanFactory beanFactory; @@ -53,15 +62,24 @@ class EndpointCustomizerFactoryTests { @Mock private RetryTopicNamesProviderFactory retryTopicNamesProviderFactory; - @Mock - private MethodKafkaListenerEndpoint endpoint; + private static final MethodKafkaListenerEndpoint endpoint = mock(MethodKafkaListenerEndpoint.class); - private final String[] topics = {"myTopic1", "myTopic2"}; + private static final MultiMethodKafkaListenerEndpoint multiEndpoint = mock(MultiMethodKafkaListenerEndpoint.class); - private final Method method = EndpointCustomizerFactory.class.getDeclaredMethods()[0]; + private static final String[] topics = {"myTopic1", "myTopic2"}; - @Test - void shouldNotCustomizeEndpointForMainTopicWithTopics() { + private static final Method method = EndpointCustomizerFactory.class.getDeclaredMethods()[0]; + + private static Stream paramsForEndpointCustomizerFactory() { + return Stream.of( + Arguments.of(beanMethod, endpoint), + Arguments.of(beanMultiMethod, multiEndpoint)); + } + + @ParameterizedTest(name = "{index} shouldNotCustomizeEndpointForMainTopicWithTopics beanMethod is {0}, endpoint is {1}") + @MethodSource("paramsForEndpointCustomizerFactory") + void shouldNotCustomizeEndpointForMainTopicWithTopics(EndpointHandlerMethod beanMethod, + MethodKafkaListenerEndpoint endpoint) { given(beanMethod.resolveBean(this.beanFactory)).willReturn(method); given(endpoint.getTopics()).willReturn(Arrays.asList(topics)); @@ -70,8 +88,8 @@ void shouldNotCustomizeEndpointForMainTopicWithTopics() { new SuffixingRetryTopicNamesProviderFactory().createRetryTopicNamesProvider(properties); given(retryTopicNamesProviderFactory.createRetryTopicNamesProvider(properties)).willReturn(provider); - EndpointCustomizer endpointCustomizer = new EndpointCustomizerFactory(properties, beanMethod, - beanFactory, retryTopicNamesProviderFactory).createEndpointCustomizer(); + EndpointCustomizer> endpointCustomizer = new EndpointCustomizerFactory( + properties, beanMethod, beanFactory, retryTopicNamesProviderFactory).createEndpointCustomizer(); List holders = (List) endpointCustomizer.customizeEndpointAndCollectTopics(endpoint); @@ -83,8 +101,15 @@ void shouldNotCustomizeEndpointForMainTopicWithTopics() { } - @Test - void shouldNotCustomizeEndpointForMainTopicWithTPO() { + private static Stream paramsCustomizeEndpointForMainTopic() { + return Stream.of( + Arguments.of(beanMethod, false), + Arguments.of(beanMultiMethod, true)); + } + + @ParameterizedTest(name = "{index} shouldNotCustomizeEndpointForMainTopicWithTPO beanMethod is {0}, is multi {1}") + @MethodSource("paramsCustomizeEndpointForMainTopic") + void shouldNotCustomizeEndpointForMainTopicWithTPO(EndpointHandlerMethod beanMethod, boolean isMulti) { given(beanMethod.resolveBean(this.beanFactory)).willReturn(method); given(properties.isMainEndpoint()).willReturn(true); @@ -94,27 +119,20 @@ void shouldNotCustomizeEndpointForMainTopicWithTPO() { given(retryTopicNamesProviderFactory.createRetryTopicNamesProvider(properties)).willReturn(provider); String testString = "testString"; - MethodKafkaListenerEndpoint endpointTPO = new MethodKafkaListenerEndpoint<>(); + MethodKafkaListenerEndpoint endpointTPO = getEndpoint(isMulti, testString); endpointTPO.setTopicPartitions(new TopicPartitionOffset(topics[0], 0, 0L), new TopicPartitionOffset(topics[1], 1, 1L)); - endpointTPO.setMethod(this.method); - endpointTPO.setId(testString); - endpointTPO.setClientIdPrefix(testString); - endpointTPO.setGroup(testString); - EndpointCustomizer endpointCustomizer = new EndpointCustomizerFactory(properties, beanMethod, - beanFactory, retryTopicNamesProviderFactory).createEndpointCustomizer(); + EndpointCustomizer> endpointCustomizer = new EndpointCustomizerFactory( + properties, beanMethod, beanFactory, retryTopicNamesProviderFactory).createEndpointCustomizer(); List holders = (List) endpointCustomizer.customizeEndpointAndCollectTopics(endpointTPO); - assertThat(holders).hasSize(2).element(0) - .matches(assertMainTopic(0)); - assertThat(holders).element(1) - .matches(assertMainTopic(1)); + assertThat(holders).hasSize(2).element(0).matches(assertMainTopic(0)); + assertThat(holders).element(1).matches(assertMainTopic(1)); - assertThat(endpointTPO.getTopics()) - .isEmpty(); + assertThat(endpointTPO.getTopics()).isEmpty(); TopicPartitionOffset[] topicPartitionsToAssign = endpointTPO.getTopicPartitionsToAssign(); assertThat(topicPartitionsToAssign).hasSize(2); @@ -125,29 +143,13 @@ void shouldNotCustomizeEndpointForMainTopicWithTPO() { } - private Predicate assertMainTopic(int index) { - return holder -> holder.getCustomizedTopic().equals(topics[index]) - && holder.getMainTopic().equals(topics[index]); - } + @ParameterizedTest(name = "{index} shouldCustomizeEndpointForRetryTopicWithTopic beanMethod is {0}, endpoint is {1}") + @MethodSource("paramsCustomizeEndpointForMainTopic") + void shouldCustomizeEndpointForRetryTopicWithTopic(EndpointHandlerMethod beanMethod, boolean isMulti) { - @Test - void shouldCustomizeEndpointForRetryTopic() { - - MethodKafkaListenerEndpoint endpoint = new MethodKafkaListenerEndpoint<>(); String testString = "testString"; - endpoint.setTopics(this.topics); - endpoint.setMethod(this.method); - endpoint.setId(testString); - endpoint.setClientIdPrefix(testString); - endpoint.setGroup(testString); - - MethodKafkaListenerEndpoint endpointTPO = new MethodKafkaListenerEndpoint<>(); - endpointTPO.setTopicPartitions(new TopicPartitionOffset(topics[0], 0, 0L), - new TopicPartitionOffset(topics[1], 1, 1L)); - endpointTPO.setMethod(this.method); - endpointTPO.setId(testString); - endpointTPO.setClientIdPrefix(testString); - endpointTPO.setGroup(testString); + MethodKafkaListenerEndpoint endpoint = getEndpoint(isMulti, testString); + endpoint.setTopics(topics); String suffix = "-retry"; given(beanMethod.resolveBean(this.beanFactory)).willReturn(method); @@ -159,8 +161,8 @@ void shouldCustomizeEndpointForRetryTopic() { new SuffixingRetryTopicNamesProviderFactory().createRetryTopicNamesProvider(properties); given(retryTopicNamesProviderFactory.createRetryTopicNamesProvider(properties)).willReturn(provider); - EndpointCustomizer endpointCustomizer = new EndpointCustomizerFactory(properties, beanMethod, - beanFactory, retryTopicNamesProviderFactory).createEndpointCustomizer(); + EndpointCustomizer> endpointCustomizer = new EndpointCustomizerFactory( + properties, beanMethod, beanFactory, retryTopicNamesProviderFactory).createEndpointCustomizer(); List holders = (List) endpointCustomizer.customizeEndpointAndCollectTopics(endpoint); @@ -175,17 +177,16 @@ void shouldCustomizeEndpointForRetryTopic() { && holder.getCustomizedTopic().equals(topic2WithSuffix)); String testStringSuffix = testString + suffix; - - assertThat(endpoint.getTopics()) - .contains(topic1WithSuffix, topic2WithSuffix); - assertThat(endpoint.getId()) - .isEqualTo(testStringSuffix); - assertThat(endpoint.getClientIdPrefix()) - .isEqualTo(testStringSuffix); - assertThat(endpoint.getGroup()) - .isEqualTo(testStringSuffix); + assertThat(endpoint.getTopics()).contains(topic1WithSuffix, topic2WithSuffix); + assertThat(endpoint.getId()).isEqualTo(testStringSuffix); + assertThat(endpoint.getClientIdPrefix()).isEqualTo(testStringSuffix); + assertThat(endpoint.getGroup()).isEqualTo(testStringSuffix); assertThat(endpoint.getTopicPartitionsToAssign()).isEmpty(); + MethodKafkaListenerEndpoint endpointTPO = getEndpoint(isMulti, testString); + endpointTPO.setTopicPartitions(new TopicPartitionOffset(topics[0], 0, 0L), + new TopicPartitionOffset(topics[1], 1, 1L)); + List holdersTPO = (List) endpointCustomizer.customizeEndpointAndCollectTopics(endpointTPO); @@ -196,9 +197,7 @@ void shouldCustomizeEndpointForRetryTopic() { .matches(holder -> holder.getMainTopic().equals(topics[1]) && holder.getCustomizedTopic().equals(topic2WithSuffix)); - assertThat(endpointTPO.getTopics()) - .isEmpty(); - + assertThat(endpointTPO.getTopics()).isEmpty(); TopicPartitionOffset[] topicPartitionsToAssign = endpointTPO.getTopicPartitionsToAssign(); assertThat(topicPartitionsToAssign).hasSize(2); assertThat(equalsTopicPartitionOffset(topicPartitionsToAssign[0], @@ -206,12 +205,29 @@ void shouldCustomizeEndpointForRetryTopic() { assertThat(equalsTopicPartitionOffset(topicPartitionsToAssign[1], new TopicPartitionOffset(topic2WithSuffix, 1, (Long) null))).isTrue(); - assertThat(endpointTPO.getId()) - .isEqualTo(testStringSuffix); - assertThat(endpointTPO.getClientIdPrefix()) - .isEqualTo(testStringSuffix); - assertThat(endpointTPO.getGroup()) - .isEqualTo(testStringSuffix); + assertThat(endpointTPO.getId()).isEqualTo(testStringSuffix); + assertThat(endpointTPO.getClientIdPrefix()).isEqualTo(testStringSuffix); + assertThat(endpointTPO.getGroup()).isEqualTo(testStringSuffix); + } + + private MethodKafkaListenerEndpoint getEndpoint(boolean isMulti, String testString) { + MethodKafkaListenerEndpoint methodEndpoint; + if (isMulti) { + methodEndpoint = new MultiMethodKafkaListenerEndpoint<>(List.of(method), method, null); + } + else { + methodEndpoint = new MethodKafkaListenerEndpoint<>(); + methodEndpoint.setMethod(method); + } + methodEndpoint.setId(testString); + methodEndpoint.setClientIdPrefix(testString); + methodEndpoint.setGroup(testString); + return methodEndpoint; + } + + private Predicate assertMainTopic(int index) { + return holder -> holder.getCustomizedTopic().equals(topics[index]) + && holder.getMainTopic().equals(topics[index]); } private boolean equalsTopicPartitionOffset(TopicPartitionOffset tpo1, TopicPartitionOffset tpo2) { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ExistingRetryTopicClassLevelIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ExistingRetryTopicClassLevelIntegrationTests.java new file mode 100644 index 0000000000..fd37a1232d --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ExistingRetryTopicClassLevelIntegrationTests.java @@ -0,0 +1,370 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.retrytopic; + +import java.time.Clock; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; + +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.core.task.TaskExecutor; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.annotation.RetryableTopic; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaAdmin; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.messaging.handler.annotation.Header; +import org.springframework.retry.annotation.Backoff; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + + +/** + * Tests for ... + * + * @author Wang Zhiyang + * @author Sanghyeok An + * + * @since 3.2 + */ +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka(topics = {ExistingRetryTopicClassLevelIntegrationTests.MAIN_TOPIC_WITH_NO_PARTITION_INFO, + ExistingRetryTopicClassLevelIntegrationTests.RETRY_TOPIC_WITH_NO_PARTITION_INFO, + ExistingRetryTopicClassLevelIntegrationTests.MAIN_TOPIC_WITH_PARTITION_INFO, + ExistingRetryTopicClassLevelIntegrationTests.RETRY_TOPIC_WITH_PARTITION_INFO}, partitions = 4) +@TestPropertySource(properties = "two.attempts=2") +class ExistingRetryTopicClassLevelIntegrationTests { + + public final static String MAIN_TOPIC_WITH_NO_PARTITION_INFO = "main-topic-1"; + + public final static String RETRY_TOPIC_WITH_NO_PARTITION_INFO = "main-topic-1-retry-1"; + + public final static String MAIN_TOPIC_WITH_PARTITION_INFO = "main-topic-2"; + + public final static String RETRY_TOPIC_WITH_PARTITION_INFO = "main-topic-2-retry-1"; + + private final static String MAIN_TOPIC_CONTAINER_FACTORY = "kafkaListenerContainerFactory"; + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Autowired + CountByPartitionContainer countByPartitionContainerWithoutPartition; + + @Autowired + CountByPartitionContainer countByPartitionContainerWithPartition; + + @Autowired + private CountDownLatchContainer latchContainer; + + @Test + @DisplayName("When a @RetryableTopic listener class, with autoCreateTopic=false and NO PARTITION info called, " + + "should send messages to be retried across partitions for a retry topic") + void whenNoPartitionInfoProvided_shouldRetryMainTopicCoveringAllPartitionOfRetryTopic() { + + send3MessagesToPartitionWithKey(0, "foo", MAIN_TOPIC_WITH_NO_PARTITION_INFO, this.countByPartitionContainerWithoutPartition); + send3MessagesToPartitionWithKey(1, "bar", MAIN_TOPIC_WITH_NO_PARTITION_INFO, this.countByPartitionContainerWithoutPartition); + send3MessagesToPartitionWithKey(2, "buzz", MAIN_TOPIC_WITH_NO_PARTITION_INFO, this.countByPartitionContainerWithoutPartition); + send3MessagesToPartitionWithKey(3, "fizz", MAIN_TOPIC_WITH_NO_PARTITION_INFO, this.countByPartitionContainerWithoutPartition); + + assertThat(awaitLatch(latchContainer.countDownLatch1)).isTrue(); + assertThat(countByPartitionContainerWithoutPartition.mainTopicMessageCountByPartition) + .isEqualTo(countByPartitionContainerWithoutPartition.retryTopicMessageCountByPartition); + } + + @Test + @DisplayName("When a @RetryableTopic listener class, with autoCreateTopic=false and WITH PARTITION info called, " + + "should send messages to be retried across partitions for a retry topic") + void whenPartitionInfoProvided_shouldRetryMainTopicCoveringAllPartitionOfRetryTopic() { + + send3MessagesToPartitionWithKey(0, "foo", MAIN_TOPIC_WITH_PARTITION_INFO, this.countByPartitionContainerWithPartition); + send3MessagesToPartitionWithKey(1, "bar", MAIN_TOPIC_WITH_PARTITION_INFO, this.countByPartitionContainerWithPartition); + send3MessagesToPartitionWithKey(2, "buzz", MAIN_TOPIC_WITH_PARTITION_INFO, this.countByPartitionContainerWithPartition); + send3MessagesToPartitionWithKey(3, "fizz", MAIN_TOPIC_WITH_PARTITION_INFO, this.countByPartitionContainerWithPartition); + + assertThat(awaitLatch(latchContainer.countDownLatch2)).isTrue(); + assertThat(countByPartitionContainerWithPartition.mainTopicMessageCountByPartition) + .isEqualTo(countByPartitionContainerWithPartition.retryTopicMessageCountByPartition); + } + + private void send3MessagesToPartitionWithKey(int partition, String messageKey, String mainTopic, CountByPartitionContainer countByPartitionContainer) { + IntStream.range(0, 3).forEach(messageNumber -> { + String data = "Test-partition-" + partition + "-messages-" + messageNumber; + kafkaTemplate.send(mainTopic, partition, messageKey, data); + countByPartitionContainer.mainTopicMessageCountByPartition.merge(String.valueOf(partition), 1, Integer::sum); + }); + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(60, TimeUnit.SECONDS); + } + catch (Exception e) { + fail(e.getMessage()); + throw new RuntimeException(e); + } + } + + @RetryableTopic(autoCreateTopics = "false", dltStrategy = DltStrategy.NO_DLT, + attempts = "${two.attempts}", backoff = @Backoff(0), kafkaTemplate = "kafkaTemplate") + @KafkaListener(id = "firstTopicId", topics = MAIN_TOPIC_WITH_NO_PARTITION_INFO, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class MainTopicListenerWithoutPartition { + + @Autowired + CountDownLatchContainer container; + + @Autowired + CountByPartitionContainer countByPartitionContainerWithoutPartition; + + @KafkaHandler + public void listenFirst(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.ORIGINAL_PARTITION) String originalPartition, + @Header(KafkaHeaders.RECEIVED_PARTITION) String receivedPartition) { + + if (receivedTopic.contains("-retry")) { + countByPartitionContainerWithoutPartition.retryTopicMessageCountByPartition.merge(receivedPartition, 1, Integer::sum); + container.countDownLatch1.countDown(); + } + + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + + } + + @RetryableTopic(autoCreateTopics = "false", numPartitions = "4", dltStrategy = DltStrategy.NO_DLT, + attempts = "${two.attempts}", backoff = @Backoff(0), kafkaTemplate = "kafkaTemplate") + @KafkaListener(id = "secondTopicId", topics = MAIN_TOPIC_WITH_PARTITION_INFO, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class MainTopicListenerWithPartition { + + @Autowired + CountDownLatchContainer container; + + @Autowired + CountByPartitionContainer countByPartitionContainerWithPartition; + + @KafkaHandler + public void listenSecond(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @Header(KafkaHeaders.ORIGINAL_PARTITION) String originalPartition, + @Header(KafkaHeaders.RECEIVED_PARTITION) String receivedPartition) { + + if (receivedTopic.contains("-retry")) { + countByPartitionContainerWithPartition.retryTopicMessageCountByPartition.merge(receivedPartition, 1, Integer::sum); + container.countDownLatch2.countDown(); + } + + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + } + + static class CountDownLatchContainer { + + CountDownLatch countDownLatch1 = new CountDownLatch(12); + + CountDownLatch countDownLatch2 = new CountDownLatch(12); + + } + + static class CountByPartitionContainer { + + Map mainTopicMessageCountByPartition = new HashMap<>(); + + Map retryTopicMessageCountByPartition = new HashMap<>(); + + } + + @Configuration + static class RetryTopicConfigurations { + + @Bean + MainTopicListenerWithPartition mainTopicListenerWithPartition() { + return new MainTopicListenerWithPartition(); + } + + @Bean + MainTopicListenerWithoutPartition mainTopicListenerWithoutPartition() { + return new MainTopicListenerWithoutPartition(); + } + + @Bean + CountDownLatchContainer latchContainer() { + return new CountDownLatchContainer(); + } + + @Bean + CountByPartitionContainer countByPartitionContainerWithoutPartition() { + return new CountByPartitionContainer(); + } + + @Bean + CountByPartitionContainer countByPartitionContainerWithPartition() { + return new CountByPartitionContainer(); + } + } + + @Configuration + static class RuntimeConfig { + + @Bean(name = "internalBackOffClock") + Clock clock() { + return Clock.systemUTC(); + } + + @Bean + TaskExecutor taskExecutor() { + return new ThreadPoolTaskExecutor(); + } + + @Bean(destroyMethod = "destroy") + TaskExecutorManager taskExecutorManager(ThreadPoolTaskExecutor taskExecutor) { + return new TaskExecutorManager(taskExecutor); + } + } + + static class TaskExecutorManager { + private final ThreadPoolTaskExecutor taskExecutor; + + TaskExecutorManager(ThreadPoolTaskExecutor taskExecutor) { + this.taskExecutor = taskExecutor; + } + + void destroy() { + this.taskExecutor.shutdown(); + } + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map configProps = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + configProps.put( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + StringSerializer.class); + configProps.put( + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + StringSerializer.class); + return new DefaultKafkaProducerFactory<>(configProps); + } + + @Bean + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig extends RetryTopicConfigurationSupport { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + KafkaAdmin kafkaAdmin() { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, this.broker.getBrokersAsString()); + return new KafkaAdmin(configs); + } + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), "groupId"); + props.put( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + props.put( + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + props.put( + ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, false); + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory retryTopicListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + ContainerProperties props = factory.getContainerProperties(); + props.setIdleEventInterval(100L); + props.setPollTimeout(50L); + props.setIdlePartitionEventInterval(100L); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer( + container -> container.getContainerProperties().setIdlePartitionEventInterval(100L)); + return factory; + } + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + return factory; + } + + @Bean + TaskScheduler sched() { + return new ThreadPoolTaskScheduler(); + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ExistingRetryTopicIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ExistingRetryTopicIntegrationTests.java index 327ab60f71..12d0764fea 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ExistingRetryTopicIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ExistingRetryTopicIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - import java.time.Clock; import java.util.HashMap; import java.util.Map; @@ -54,6 +51,7 @@ import org.springframework.kafka.support.KafkaHeaders; import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.messaging.handler.annotation.Header; import org.springframework.retry.annotation.Backoff; import org.springframework.scheduling.TaskScheduler; @@ -63,10 +61,14 @@ import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + /** * Tests for https://github.com/spring-projects/spring-kafka/issues/1828 * @author Deepesh Verma + * @author Sanghyeok An * @since 2.7 */ @SpringJUnitConfig @@ -200,13 +202,17 @@ public void listenSecond(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) St static class CountDownLatchContainer { CountDownLatch countDownLatch1 = new CountDownLatch(40); + CountDownLatch countDownLatch2 = new CountDownLatch(40); + } static class CountByPartitionContainer { Map mainTopicMessageCountByPartition = new HashMap<>(); + Map retryTopicMessageCountByPartition = new HashMap<>(); + } @Configuration @@ -272,9 +278,7 @@ public static class KafkaProducerConfig { @Bean public ProducerFactory producerFactory() { - Map configProps = new HashMap<>(); - configProps.put( - ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + Map configProps = KafkaTestUtils.producerProps( this.broker.getBrokersAsString()); configProps.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, @@ -307,13 +311,8 @@ public KafkaAdmin kafkaAdmin() { @Bean public ConsumerFactory consumerFactory() { - Map props = new HashMap<>(); - props.put( - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, - this.broker.getBrokersAsString()); - props.put( - ConsumerConfig.GROUP_ID_CONFIG, - "groupId"); + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), "groupId"); props.put( ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryConfigurerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryConfigurerTests.java index 672d0b64ef..419f2fe7ab 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryConfigurerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryConfigurerTests.java @@ -16,17 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; - import java.math.BigInteger; import java.time.Clock; import java.time.Instant; @@ -65,6 +54,17 @@ import org.springframework.util.backoff.BackOffExecution; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; + /** * @author Tomaz Fernandes * @author Gary Russell @@ -139,7 +139,6 @@ class ListenerContainerFactoryConfigurerTests { @Mock private KafkaListenerEndpoint endpoint; - @Test void shouldSetupErrorHandling() { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolverTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolverTests.java index 8179ac2db8..b7a1633636 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolverTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolverTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2022 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.Mockito.times; - import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; @@ -30,6 +24,12 @@ import org.springframework.beans.factory.BeanFactory; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.Mockito.times; + /** * @author Tomaz Fernandes * @since 2.7 @@ -56,7 +56,9 @@ class ListenerContainerFactoryResolverTests { private ConcurrentKafkaListenerContainerFactory factoryFromDefaultBeanName; private final static String factoryName = "testListenerContainerFactory"; + private final static String otherFactoryName = "otherTestListenerContainerFactory"; + private final static String defaultFactoryBeanName = "defaultTestListenerContainerFactory"; @Test diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/PartitionResolverTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/PartitionResolverTests.java index 4aa43135cf..94c752fb01 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/PartitionResolverTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/PartitionResolverTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -52,6 +46,12 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.9.2 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicClassLevelExceptionRoutingIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicClassLevelExceptionRoutingIntegrationTests.java new file mode 100644 index 0000000000..ddc40f494b --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicClassLevelExceptionRoutingIntegrationTests.java @@ -0,0 +1,497 @@ +/* + * Copyright 2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.retrytopic; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.DltHandler; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.annotation.RetryableTopic; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaAdmin; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.support.converter.ConversionException; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.messaging.handler.annotation.Header; +import org.springframework.retry.annotation.Backoff; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import org.springframework.util.backoff.FixedBackOff; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + + +/** + * Test class level non-blocking retries. + * + * @author Wang Zhiyang + * @author Sanghyeok An + * + * @since 3.2 + */ +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka +class RetryTopicClassLevelExceptionRoutingIntegrationTests { + + final static String BLOCKING_AND_TOPIC_RETRY = "blocking-and-topic-retry"; + + final static String ONLY_RETRY_VIA_BLOCKING = "only-retry-blocking-topic"; + + final static String ONLY_RETRY_VIA_TOPIC = "only-retry-topic"; + + final static String USER_FATAL_EXCEPTION_TOPIC = "user-fatal-topic"; + + final static String FRAMEWORK_FATAL_EXCEPTION_TOPIC = "framework-fatal-topic"; + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Autowired + private CountDownLatchContainer latchContainer; + + @Test + void shouldRetryViaBlockingAndTopics() { + kafkaTemplate.send(BLOCKING_AND_TOPIC_RETRY, "Test message to " + BLOCKING_AND_TOPIC_RETRY); + assertThat(awaitLatch(latchContainer.blockingAndTopicsLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.dltProcessorLatch)).isTrue(); + } + + @Test + void shouldRetryOnlyViaBlocking() { + kafkaTemplate.send(ONLY_RETRY_VIA_BLOCKING, "Test message to "); + assertThat(awaitLatch(latchContainer.onlyRetryViaBlockingLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.annotatedDltOnlyBlockingLatch)).isTrue(); + } + + @Test + void shouldRetryOnlyViaTopic() { + kafkaTemplate.send(ONLY_RETRY_VIA_TOPIC, "Test message to " + ONLY_RETRY_VIA_TOPIC); + assertThat(awaitLatch(latchContainer.onlyRetryViaTopicLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.dltProcessorWithErrorLatch)).isTrue(); + } + + @Test + void shouldGoStraightToDltIfUserProvidedFatal() { + kafkaTemplate.send(USER_FATAL_EXCEPTION_TOPIC, "Test message to " + USER_FATAL_EXCEPTION_TOPIC); + assertThat(awaitLatch(latchContainer.fatalUserLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.annotatedDltUserFatalLatch)).isTrue(); + } + + @Test + void shouldGoStraightToDltIfFrameworkProvidedFatal() { + kafkaTemplate.send(FRAMEWORK_FATAL_EXCEPTION_TOPIC, "Testing topic with annotation 1"); + assertThat(awaitLatch(latchContainer.fatalFrameworkLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.annotatedDltFrameworkFatalLatch)).isTrue(); + } + + private static void countdownIfCorrectInvocations(AtomicInteger invocations, int expected, CountDownLatch latch) { + int actual = invocations.get(); + if (actual == expected) { + latch.countDown(); + } + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(30, TimeUnit.SECONDS); + } + catch (Exception e) { + fail(e.getMessage()); + throw new RuntimeException(e); + } + } + + static class BlockingAndTopicRetriesListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaListener(id = "firstTopicId", topics = BLOCKING_AND_TOPIC_RETRY) + public void listen(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.blockingAndTopicsLatch.countDown(); + container.blockingAndTopicsListenerInvocations.incrementAndGet(); + throw new ShouldRetryViaBothException("Woooops... in topic " + receivedTopic); + } + } + + static class DltProcessor { + + @Autowired + CountDownLatchContainer container; + + public void processDltMessage(Object message) { + countdownIfCorrectInvocations(container.blockingAndTopicsListenerInvocations, 12, + container.dltProcessorLatch); + } + } + + @KafkaListener(topics = ONLY_RETRY_VIA_TOPIC) + static class OnlyRetryViaTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenAgain(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.onlyRetryViaTopicLatch.countDown(); + container.onlyRetryViaTopicListenerInvocations.incrementAndGet(); + throw new ShouldRetryOnlyByTopicException("Another woooops... " + receivedTopic); + } + } + + static class DltProcessorWithError { + + @Autowired + CountDownLatchContainer container; + + public void processDltMessage(Object message) { + countdownIfCorrectInvocations(container.onlyRetryViaTopicListenerInvocations, + 3, container.dltProcessorWithErrorLatch); + throw new RuntimeException("Dlt Error!"); + } + } + + @RetryableTopic(exclude = ShouldRetryOnlyBlockingException.class, traversingCauses = "true", + backoff = @Backoff(50), kafkaTemplate = "kafkaTemplate") + @KafkaListener(topics = ONLY_RETRY_VIA_BLOCKING) + static class OnlyRetryBlockingListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenWithAnnotation(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.onlyRetryViaBlockingLatch.countDown(); + container.onlyRetryViaBlockingListenerInvocations.incrementAndGet(); + throw new ShouldRetryOnlyBlockingException("User provided fatal exception!" + receivedTopic); + } + + @DltHandler + public void annotatedDltMethod(Object message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + countdownIfCorrectInvocations(container.onlyRetryViaBlockingListenerInvocations, 4, + container.annotatedDltOnlyBlockingLatch); + } + } + + @RetryableTopic(backoff = @Backoff(50), kafkaTemplate = "kafkaTemplate") + @KafkaListener(topics = USER_FATAL_EXCEPTION_TOPIC) + static class UserFatalTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenWithAnnotation(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.fatalUserLatch.countDown(); + container.userFatalListenerInvocations.incrementAndGet(); + throw new ShouldSkipBothRetriesException("User provided fatal exception!" + receivedTopic); + } + + @DltHandler + public void annotatedDltMethod(Object message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + countdownIfCorrectInvocations(container.userFatalListenerInvocations, 1, + container.annotatedDltUserFatalLatch); + } + } + + @RetryableTopic(backoff = @Backoff(50)) + @KafkaListener(topics = FRAMEWORK_FATAL_EXCEPTION_TOPIC) + static class FrameworkFatalTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenWithAnnotation(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.fatalFrameworkLatch.countDown(); + container.fatalFrameworkListenerInvocations.incrementAndGet(); + throw new ConversionException("Woooops... in topic " + receivedTopic, new RuntimeException("Test RTE")); + } + + @DltHandler + public void annotatedDltMethod(Object message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + countdownIfCorrectInvocations(container.fatalFrameworkListenerInvocations, 1, + container.annotatedDltFrameworkFatalLatch); + throw new ConversionException("Woooops... in topic " + receivedTopic, new RuntimeException("Test RTE")); + } + } + + static class CountDownLatchContainer { + + CountDownLatch blockingAndTopicsLatch = new CountDownLatch(12); + + CountDownLatch onlyRetryViaBlockingLatch = new CountDownLatch(4); + + CountDownLatch onlyRetryViaTopicLatch = new CountDownLatch(3); + + CountDownLatch fatalUserLatch = new CountDownLatch(1); + + CountDownLatch fatalFrameworkLatch = new CountDownLatch(1); + + CountDownLatch annotatedDltOnlyBlockingLatch = new CountDownLatch(1); + + CountDownLatch annotatedDltUserFatalLatch = new CountDownLatch(1); + + CountDownLatch annotatedDltFrameworkFatalLatch = new CountDownLatch(1); + + CountDownLatch dltProcessorLatch = new CountDownLatch(1); + + CountDownLatch dltProcessorWithErrorLatch = new CountDownLatch(1); + + AtomicInteger blockingAndTopicsListenerInvocations = new AtomicInteger(); + + AtomicInteger onlyRetryViaTopicListenerInvocations = new AtomicInteger(); + + AtomicInteger onlyRetryViaBlockingListenerInvocations = new AtomicInteger(); + + AtomicInteger userFatalListenerInvocations = new AtomicInteger(); + + AtomicInteger fatalFrameworkListenerInvocations = new AtomicInteger(); + + } + + @SuppressWarnings("serial") + static class ShouldRetryOnlyByTopicException extends RuntimeException { + ShouldRetryOnlyByTopicException(String msg) { + super(msg); + } + } + + @SuppressWarnings("serial") + static class ShouldSkipBothRetriesException extends RuntimeException { + ShouldSkipBothRetriesException(String msg) { + super(msg); + } + } + + @SuppressWarnings("serial") + static class ShouldRetryOnlyBlockingException extends RuntimeException { + ShouldRetryOnlyBlockingException(String msg) { + super(msg); + } + } + + @SuppressWarnings("serial") + static class ShouldRetryViaBothException extends RuntimeException { + ShouldRetryViaBothException(String msg) { + super(msg); + } + } + + @Configuration + static class RetryTopicConfigurations { + + private static final String DLT_METHOD_NAME = "processDltMessage"; + + @Bean + RetryTopicConfiguration blockingAndTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackOff(50) + .includeTopic(BLOCKING_AND_TOPIC_RETRY) + .dltHandlerMethod("dltProcessor", DLT_METHOD_NAME) + .create(template); + } + + @Bean + RetryTopicConfiguration onlyTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackOff(50) + .includeTopic(ONLY_RETRY_VIA_TOPIC) + .sameIntervalTopicReuseStrategy(SameIntervalTopicReuseStrategy.SINGLE_TOPIC) + .doNotRetryOnDltFailure() + .dltHandlerMethod("dltProcessorWithError", DLT_METHOD_NAME) + .create(template); + } + + @Bean + BlockingAndTopicRetriesListener blockingAndTopicRetriesListener() { + return new BlockingAndTopicRetriesListener(); + } + + @Bean + OnlyRetryViaTopicListener onlyRetryViaTopicListener() { + return new OnlyRetryViaTopicListener(); + } + + @Bean + UserFatalTopicListener userFatalTopicListener() { + return new UserFatalTopicListener(); + } + + @Bean + OnlyRetryBlockingListener onlyRetryBlockingListener() { + return new OnlyRetryBlockingListener(); + } + + @Bean + FrameworkFatalTopicListener frameworkFatalTopicListener() { + return new FrameworkFatalTopicListener(); + } + + @Bean + CountDownLatchContainer latchContainer() { + return new CountDownLatchContainer(); + } + + @Bean + DltProcessor dltProcessor() { + return new DltProcessor(); + } + + @Bean + DltProcessorWithError dltProcessorWithError() { + return new DltProcessorWithError(); + } + + @Bean + TaskScheduler sched() { + return new ThreadPoolTaskScheduler(); + } + + } + + @Configuration + static class RoutingTestsConfigurationSupport extends RetryTopicConfigurationSupport { + + @Override + protected void configureBlockingRetries(BlockingRetriesConfigurer blockingRetries) { + blockingRetries + .retryOn(ShouldRetryOnlyBlockingException.class, ShouldRetryViaBothException.class) + .backOff(new FixedBackOff(50, 3)); + } + + @Override + protected void manageNonBlockingFatalExceptions(List> nonBlockingFatalExceptions) { + nonBlockingFatalExceptions.add(ShouldSkipBothRetriesException.class); + } + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map configProps = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + configProps.put( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + StringSerializer.class); + configProps.put( + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + StringSerializer.class); + return new DefaultKafkaProducerFactory<>(configProps); + } + + @Bean + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + KafkaAdmin kafkaAdmin() { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, this.broker.getBrokersAsString()); + return new KafkaAdmin(configs); + } + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), "groupId"); + props.put( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + props.put( + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + props.put( + ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, false); + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory retryTopicListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + ContainerProperties props = factory.getContainerProperties(); + props.setIdleEventInterval(100L); + props.setPollTimeout(50L); + props.setIdlePartitionEventInterval(100L); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer( + container -> container.getContainerProperties().setIdlePartitionEventInterval(100L)); + return factory; + } + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + return factory; + } + + } +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicClassLevelIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicClassLevelIntegrationTests.java new file mode 100644 index 0000000000..910ea0f48e --- /dev/null +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicClassLevelIntegrationTests.java @@ -0,0 +1,839 @@ +/* + * Copyright 2021-2024 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.kafka.retrytopic; + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.assertj.core.api.InstanceOfAssertFactories; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.DltHandler; +import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.annotation.PartitionOffset; +import org.springframework.kafka.annotation.RetryableTopic; +import org.springframework.kafka.annotation.TopicPartition; +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +import org.springframework.kafka.config.TopicBuilder; +import org.springframework.kafka.core.ConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +import org.springframework.kafka.core.DefaultKafkaProducerFactory; +import org.springframework.kafka.core.KafkaAdmin; +import org.springframework.kafka.core.KafkaAdmin.NewTopics; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.listener.ContainerProperties.AckMode; +import org.springframework.kafka.listener.KafkaListenerErrorHandler; +import org.springframework.kafka.support.Acknowledgment; +import org.springframework.kafka.support.KafkaHeaders; +import org.springframework.kafka.test.EmbeddedKafkaBroker; +import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; +import org.springframework.messaging.Message; +import org.springframework.messaging.converter.CompositeMessageConverter; +import org.springframework.messaging.converter.GenericMessageConverter; +import org.springframework.messaging.converter.SmartMessageConverter; +import org.springframework.messaging.handler.annotation.Header; +import org.springframework.retry.annotation.Backoff; +import org.springframework.scheduling.TaskScheduler; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; + + +/** + * Test class level non-blocking retries. + * + * @author Wang Zhiyang + * @author Artem Bilan + * @author Sanghyeok An + * + * @since 3.2 + */ +@SpringJUnitConfig +@DirtiesContext +@EmbeddedKafka(topics = { RetryTopicClassLevelIntegrationTests.FIRST_TOPIC, + RetryTopicClassLevelIntegrationTests.SECOND_TOPIC, + RetryTopicClassLevelIntegrationTests.THIRD_TOPIC, + RetryTopicClassLevelIntegrationTests.FOURTH_TOPIC, + RetryTopicClassLevelIntegrationTests.TWO_LISTENERS_TOPIC, + RetryTopicClassLevelIntegrationTests.MANUAL_TOPIC }) +@TestPropertySource(properties = { "five.attempts=5", "kafka.template=customKafkaTemplate"}) +class RetryTopicClassLevelIntegrationTests { + + public final static String FIRST_TOPIC = "myRetryTopic1"; + + public final static String SECOND_TOPIC = "myRetryTopic2"; + + public final static String THIRD_TOPIC = "myRetryTopic3"; + + public final static String FOURTH_TOPIC = "myRetryTopic4"; + + public final static String TWO_LISTENERS_TOPIC = "myRetryTopic5"; + + public final static String MANUAL_TOPIC = "myRetryTopic6"; + + public final static String NOT_RETRYABLE_EXCEPTION_TOPIC = "noRetryTopic"; + + public final static String FIRST_REUSE_RETRY_TOPIC = "reuseRetry1"; + + public final static String SECOND_REUSE_RETRY_TOPIC = "reuseRetry2"; + + public final static String THIRD_REUSE_RETRY_TOPIC = "reuseRetry3"; + + private final static String MAIN_TOPIC_CONTAINER_FACTORY = "kafkaListenerContainerFactory"; + + @Autowired + private KafkaTemplate kafkaTemplate; + + @Autowired + private CountDownLatchContainer latchContainer; + + @Autowired + DestinationTopicContainer topicContainer; + + @Test + void shouldRetryFirstTopic(@Autowired KafkaListenerEndpointRegistry registry) { + kafkaTemplate.send(FIRST_TOPIC, "Testing topic 1"); + assertThat(topicContainer.getNextDestinationTopicFor("firstTopicId", FIRST_TOPIC).getDestinationName()) + .isEqualTo("myRetryTopic1-retry"); + assertThat(awaitLatch(latchContainer.countDownLatch1)).isTrue(); + assertThat(awaitLatch(latchContainer.customDltCountdownLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.customErrorHandlerCountdownLatch)).isTrue(); + assertThat(awaitLatch(latchContainer.customMessageConverterCountdownLatch)).isTrue(); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("first")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = (ConcurrentMessageListenerContainer) registry + .getListenerContainer(id); + if (id.equals("firstTopicId")) { + assertThat(container.getConcurrency()).isEqualTo(2); + } + else { + assertThat(container.getConcurrency()) + .describedAs("Expected %s to have concurrency", id) + .isEqualTo(1); + } + }); + } + + @Test + void shouldRetrySecondTopic() { + kafkaTemplate.send(SECOND_TOPIC, "Testing topic 2"); + assertThat(awaitLatch(latchContainer.countDownLatch2)).isTrue(); + assertThat(awaitLatch(latchContainer.customDltCountdownLatch)).isTrue(); + } + + @Test + void shouldRetryThirdTopicWithTimeout(@Autowired KafkaAdmin admin, + @Autowired KafkaListenerEndpointRegistry registry) throws Exception { + + kafkaTemplate.send(THIRD_TOPIC, "Testing topic 3"); + assertThat(awaitLatch(latchContainer.countDownLatch3)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltOne)).isTrue(); + Map topics = admin.describeTopics(THIRD_TOPIC, THIRD_TOPIC + "-dlt", FOURTH_TOPIC); + assertThat(topics.get(THIRD_TOPIC).partitions()).hasSize(2); + assertThat(topics.get(THIRD_TOPIC + "-dlt").partitions()).hasSize(3); + assertThat(topics.get(FOURTH_TOPIC).partitions()).hasSize(2); + AtomicReference method = new AtomicReference<>(); + org.springframework.util.ReflectionUtils.doWithMethods(KafkaAdmin.class, m -> { + m.setAccessible(true); + method.set(m); + }, m -> m.getName().equals("newTopics")); + @SuppressWarnings("unchecked") + Collection weededTopics = (Collection) method.get().invoke(admin); + AtomicInteger weeded = new AtomicInteger(); + weededTopics.forEach(topic -> { + if (topic.name().equals(THIRD_TOPIC) || topic.name().equals(FOURTH_TOPIC)) { + assertThat(topic).isExactlyInstanceOf(NewTopic.class); + weeded.incrementAndGet(); + } + }); + assertThat(weeded.get()).isEqualTo(2); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("third")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = + (ConcurrentMessageListenerContainer) registry.getListenerContainer(id); + if (id.equals("thirdTopicId")) { + assertThat(container.getConcurrency()).isEqualTo(2); + } + else { + assertThat(container.getConcurrency()) + .describedAs("Expected %s to have concurrency", id) + .isEqualTo(1); + } + }); + } + + @Test + void shouldRetryFourthTopicWithNoDlt() { + kafkaTemplate.send(FOURTH_TOPIC, "Testing topic 4"); + assertThat(awaitLatch(latchContainer.countDownLatch4)).isTrue(); + } + + @Test + void shouldRetryFifthTopicWithTwoListenersAndManualAssignment(@Autowired FifthTopicListener1 listener1, + @Autowired FifthTopicListener2 listener2) { + + kafkaTemplate.send(TWO_LISTENERS_TOPIC, 0, "0", "Testing topic 5 - 0"); + kafkaTemplate.send(TWO_LISTENERS_TOPIC, 1, "0", "Testing topic 5 - 1"); + assertThat(awaitLatch(latchContainer.countDownLatch51)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatch52)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltThree)).isTrue(); + assertThat(listener1.topics).containsExactly(TWO_LISTENERS_TOPIC, TWO_LISTENERS_TOPIC + + "-listener1-0", TWO_LISTENERS_TOPIC + "-listener1-1", TWO_LISTENERS_TOPIC + "-listener1-2", + TWO_LISTENERS_TOPIC + "-listener1-dlt"); + assertThat(listener2.topics).containsExactly(TWO_LISTENERS_TOPIC, TWO_LISTENERS_TOPIC + + "-listener2-0", TWO_LISTENERS_TOPIC + "-listener2-1", TWO_LISTENERS_TOPIC + "-listener2-2", + TWO_LISTENERS_TOPIC + "-listener2-dlt"); + } + + @Test + void shouldRetryManualTopicWithDefaultDlt(@Autowired KafkaListenerEndpointRegistry registry, + @Autowired ConsumerFactory cf) { + + kafkaTemplate.send(MANUAL_TOPIC, "Testing topic 6"); + assertThat(awaitLatch(latchContainer.countDownLatch6)).isTrue(); + registry.getListenerContainerIds().stream() + .filter(id -> id.startsWith("manual")) + .forEach(id -> { + ConcurrentMessageListenerContainer container = + (ConcurrentMessageListenerContainer) registry.getListenerContainer(id); + assertThat(container).extracting("commonErrorHandler") + .extracting("seekAfterError", InstanceOfAssertFactories.BOOLEAN) + .isFalse(); + }); + Consumer consumer = cf.createConsumer("manual-dlt", ""); + Set tp = + Set.of(new org.apache.kafka.common.TopicPartition(MANUAL_TOPIC + "-dlt", 0)); + consumer.assign(tp); + try { + await().untilAsserted(() -> { + OffsetAndMetadata offsetAndMetadata = consumer.committed(tp).get(tp.iterator().next()); + assertThat(offsetAndMetadata).isNotNull(); + assertThat(offsetAndMetadata.offset()).isEqualTo(1L); + }); + } + finally { + consumer.close(); + } + } + + @Test + void shouldFirstReuseRetryTopic(@Autowired FirstReuseRetryTopicListener listener1, + @Autowired SecondReuseRetryTopicListener listener2, @Autowired ThirdReuseRetryTopicListener listener3) { + + kafkaTemplate.send(FIRST_REUSE_RETRY_TOPIC, "Testing reuse topic 1"); + kafkaTemplate.send(SECOND_REUSE_RETRY_TOPIC, "Testing reuse topic 2"); + kafkaTemplate.send(THIRD_REUSE_RETRY_TOPIC, "Testing reuse topic 3"); + assertThat(awaitLatch(latchContainer.countDownLatchReuseOne)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchReuseTwo)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchReuseThree)).isTrue(); + assertThat(listener1.topics).containsExactly(FIRST_REUSE_RETRY_TOPIC, + FIRST_REUSE_RETRY_TOPIC + "-retry"); + assertThat(listener2.topics).containsExactly(SECOND_REUSE_RETRY_TOPIC, + SECOND_REUSE_RETRY_TOPIC + "-retry-30", SECOND_REUSE_RETRY_TOPIC + "-retry-60", + SECOND_REUSE_RETRY_TOPIC + "-retry-100", SECOND_REUSE_RETRY_TOPIC + "-retry-100"); + assertThat(listener3.topics).containsExactly(THIRD_REUSE_RETRY_TOPIC, + THIRD_REUSE_RETRY_TOPIC + "-retry", THIRD_REUSE_RETRY_TOPIC + "-retry", + THIRD_REUSE_RETRY_TOPIC + "-retry", THIRD_REUSE_RETRY_TOPIC + "-retry"); + } + + @Test + void shouldGoStraightToDlt() { + kafkaTemplate.send(NOT_RETRYABLE_EXCEPTION_TOPIC, "Testing topic with annotation 1"); + assertThat(awaitLatch(latchContainer.countDownLatchNoRetry)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchDltTwo)).isTrue(); + } + + private boolean awaitLatch(CountDownLatch latch) { + try { + return latch.await(60, TimeUnit.SECONDS); + } + catch (Exception e) { + fail(e.getMessage()); + throw new RuntimeException(e); + } + } + + @KafkaListener(id = "firstTopicId", topics = FIRST_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + errorHandler = "myCustomErrorHandler", contentTypeConverter = "myCustomMessageConverter", + concurrency = "2") + static class FirstTopicListener { + + @Autowired + DestinationTopicContainer topicContainer; + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listen(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.countDownLatch1.countDown(); + throw new RuntimeException("Woooops... in topic " + receivedTopic); + } + + } + + @KafkaListener(topics = SECOND_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class SecondTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenAgain(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch2); + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + } + + @RetryableTopic(attempts = "${five.attempts}", + backoff = @Backoff(delay = 250, maxDelay = 1000, multiplier = 1.5), + numPartitions = "#{3}", + timeout = "${missing.property:2000}", + include = MyRetryException.class, kafkaTemplate = "${kafka.template}", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + concurrency = "1") + @KafkaListener(id = "thirdTopicId", topics = THIRD_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY, + concurrency = "2") + static class ThirdTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenWithAnnotation(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch3); + throw new MyRetryException("Annotated woooops... " + receivedTopic); + } + + @DltHandler + public void annotatedDltMethod(Object message) { + container.countDownLatchDltOne.countDown(); + } + } + + @RetryableTopic(dltStrategy = DltStrategy.NO_DLT, attempts = "4", backoff = @Backoff(300), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener(topics = FOURTH_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FourthTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenNoDlt(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.countDownIfNotKnown(receivedTopic, container.countDownLatch4); + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + + @DltHandler + public void shouldNotGetHere() { + fail("Dlt should not be processed!"); + } + } + + static class AbstractFifthTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @DltHandler + public void annotatedDltMethod(ConsumerRecord record) { + this.topics.add(record.topic()); + container.countDownLatchDltThree.countDown(); + } + + } + + @RetryableTopic(attempts = "4", + backoff = @Backoff(250), + numPartitions = "2", + retryTopicSuffix = "-listener1", dltTopicSuffix = "-listener1-dlt", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener(id = "fifthTopicId1", topicPartitions = {@TopicPartition(topic = TWO_LISTENERS_TOPIC, + partitionOffsets = @PartitionOffset(partition = "0", initialOffset = "0"))}, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FifthTopicListener1 extends AbstractFifthTopicListener { + + @KafkaHandler + public void listenWithAnnotation(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + container.countDownIfNotKnown(receivedTopic, container.countDownLatch51); + throw new RuntimeException("Annotated woooops... " + receivedTopic); + } + + } + + @RetryableTopic(attempts = "4", + backoff = @Backoff(250), + numPartitions = "2", + retryTopicSuffix = "-listener2", dltTopicSuffix = "-listener2-dlt", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, + kafkaTemplate = "${kafka.template}") + @KafkaListener(id = "fifthTopicId2", topicPartitions = {@TopicPartition(topic = TWO_LISTENERS_TOPIC, + partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "0"))}, + containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class FifthTopicListener2 extends AbstractFifthTopicListener { + + @KafkaHandler + public void listenWithAnnotation2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + container.countDownLatch52.countDown(); + throw new RuntimeException("Annotated woooops... " + receivedTopic); + } + + } + + @RetryableTopic(attempts = "4", backoff = @Backoff(50), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) + @KafkaListener(id = "manual", topics = MANUAL_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class SixthTopicDefaultDLTListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenNoDlt(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, + @SuppressWarnings("unused") Acknowledgment ack) { + + container.countDownIfNotKnown(receivedTopic, container.countDownLatch6); + throw new IllegalStateException("Another woooops... " + receivedTopic); + } + + } + + @RetryableTopic(attempts = "3", numPartitions = "3", exclude = MyDontRetryException.class, + backoff = @Backoff(delay = 50, maxDelay = 100, multiplier = 3), + traversingCauses = "true", kafkaTemplate = "${kafka.template}") + @KafkaListener(topics = NOT_RETRYABLE_EXCEPTION_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) + static class NoRetryTopicListener { + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listenWithAnnotation2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + container.countDownIfNotKnown(receivedTopic, container.countDownLatchNoRetry); + throw new MyDontRetryException("Annotated second woooops... " + receivedTopic); + } + + @DltHandler + public void annotatedDltMethod(Object message) { + container.countDownLatchDltTwo.countDown(); + } + } + + @RetryableTopic(attempts = "2", backoff = @Backoff(50)) + @KafkaListener(id = "reuseRetry1", topics = FIRST_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class FirstReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listen1(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + container.countDownLatchReuseOne.countDown(); + throw new RuntimeException("Another woooops... " + receivedTopic); + } + + } + + @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 30, maxDelay = 100, multiplier = 2)) + @KafkaListener(id = "reuseRetry2", topics = SECOND_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class SecondReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listen2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + container.countDownLatchReuseTwo.countDown(); + throw new RuntimeException("Another woooops... " + receivedTopic); + } + + } + + @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 1, maxDelay = 5, multiplier = 1.4)) + @KafkaListener(id = "reuseRetry3", topics = THIRD_REUSE_RETRY_TOPIC, + containerFactory = "retryTopicListenerContainerFactory") + static class ThirdReuseRetryTopicListener { + + final List topics = Collections.synchronizedList(new ArrayList<>()); + + @Autowired + CountDownLatchContainer container; + + @KafkaHandler + public void listen3(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { + this.topics.add(receivedTopic); + container.countDownLatchReuseThree.countDown(); + throw new RuntimeException("Another woooops... " + receivedTopic); + } + + } + + static class CountDownLatchContainer { + + CountDownLatch countDownLatch1 = new CountDownLatch(5); + + CountDownLatch countDownLatch2 = new CountDownLatch(3); + + CountDownLatch countDownLatch3 = new CountDownLatch(3); + + CountDownLatch countDownLatch4 = new CountDownLatch(4); + + CountDownLatch countDownLatch51 = new CountDownLatch(4); + + CountDownLatch countDownLatch52 = new CountDownLatch(4); + + CountDownLatch countDownLatch6 = new CountDownLatch(4); + + CountDownLatch countDownLatchNoRetry = new CountDownLatch(1); + + CountDownLatch countDownLatchDltOne = new CountDownLatch(1); + + CountDownLatch countDownLatchDltTwo = new CountDownLatch(1); + + CountDownLatch countDownLatchDltThree = new CountDownLatch(2); + + CountDownLatch countDownLatchReuseOne = new CountDownLatch(2); + + CountDownLatch countDownLatchReuseTwo = new CountDownLatch(5); + + CountDownLatch countDownLatchReuseThree = new CountDownLatch(5); + + CountDownLatch customDltCountdownLatch = new CountDownLatch(1); + + CountDownLatch customErrorHandlerCountdownLatch = new CountDownLatch(6); + + CountDownLatch customMessageConverterCountdownLatch = new CountDownLatch(6); + + final List knownTopics = new ArrayList<>(); + + private void countDownIfNotKnown(String receivedTopic, CountDownLatch countDownLatch) { + synchronized (knownTopics) { + if (!knownTopics.contains(receivedTopic)) { + knownTopics.add(receivedTopic); + countDownLatch.countDown(); + } + } + } + } + + static class MyCustomDltProcessor { + + @Autowired + KafkaTemplate kafkaTemplate; + + @Autowired + CountDownLatchContainer container; + + public void processDltMessage(Object message) { + container.customDltCountdownLatch.countDown(); + throw new RuntimeException("Dlt Error!"); + } + } + + @SuppressWarnings("serial") + static class MyRetryException extends RuntimeException { + MyRetryException(String msg) { + super(msg); + } + } + + @SuppressWarnings("serial") + static class MyDontRetryException extends RuntimeException { + MyDontRetryException(String msg) { + super(msg); + } + } + + @Configuration + static class RetryTopicConfigurations extends RetryTopicConfigurationSupport { + + private static final String DLT_METHOD_NAME = "processDltMessage"; + + @Bean + RetryTopicConfiguration firstRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackOff(50) + .maxAttempts(5) + .concurrency(1) + .useSingleTopicForSameIntervals() + .includeTopic(FIRST_TOPIC) + .doNotRetryOnDltFailure() + .dltHandlerMethod("myCustomDltProcessor", DLT_METHOD_NAME) + .create(template); + } + + @Bean + RetryTopicConfiguration secondRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .exponentialBackoff(500, 2, 10000) + .retryOn(Arrays.asList(IllegalStateException.class, IllegalAccessException.class)) + .traversingCauses() + .includeTopic(SECOND_TOPIC) + .doNotRetryOnDltFailure() + .dltHandlerMethod("myCustomDltProcessor", DLT_METHOD_NAME) + .create(template); + } + + @Bean + FirstTopicListener firstTopicListener() { + return new FirstTopicListener(); + } + + @Bean + KafkaListenerErrorHandler myCustomErrorHandler(CountDownLatchContainer container) { + return (message, exception) -> { + container.customErrorHandlerCountdownLatch.countDown(); + throw exception; + }; + } + + @Bean + SmartMessageConverter myCustomMessageConverter(CountDownLatchContainer container) { + return new CompositeMessageConverter(Collections.singletonList(new GenericMessageConverter())) { + + @Override + public Object fromMessage(Message message, Class targetClass, Object conversionHint) { + container.customMessageConverterCountdownLatch.countDown(); + return super.fromMessage(message, targetClass, conversionHint); + } + }; + } + + @Bean + SecondTopicListener secondTopicListener() { + return new SecondTopicListener(); + } + + @Bean + ThirdTopicListener thirdTopicListener() { + return new ThirdTopicListener(); + } + + @Bean + FourthTopicListener fourthTopicListener() { + return new FourthTopicListener(); + } + + @Bean + FifthTopicListener1 fifthTopicListener1() { + return new FifthTopicListener1(); + } + + @Bean + FifthTopicListener2 fifthTopicListener2() { + return new FifthTopicListener2(); + } + + @Bean + SixthTopicDefaultDLTListener manualTopicListener() { + return new SixthTopicDefaultDLTListener(); + } + + @Bean + NoRetryTopicListener noRetryTopicListener() { + return new NoRetryTopicListener(); + } + + @Bean + FirstReuseRetryTopicListener firstReuseRetryTopicListener() { + return new FirstReuseRetryTopicListener(); + } + + @Bean + SecondReuseRetryTopicListener secondReuseRetryTopicListener() { + return new SecondReuseRetryTopicListener(); + } + + @Bean + ThirdReuseRetryTopicListener thirdReuseRetryTopicListener() { + return new ThirdReuseRetryTopicListener(); + } + + @Bean + CountDownLatchContainer latchContainer() { + return new CountDownLatchContainer(); + } + + @Bean + MyCustomDltProcessor myCustomDltProcessor() { + return new MyCustomDltProcessor(); + } + } + + @Configuration + static class KafkaProducerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + ProducerFactory producerFactory() { + Map configProps = KafkaTestUtils.producerProps( + this.broker.getBrokersAsString()); + configProps.put( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + StringSerializer.class); + configProps.put( + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + StringSerializer.class); + return new DefaultKafkaProducerFactory<>(configProps); + } + + @Bean("customKafkaTemplate") + KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); + } + } + + @EnableKafka + @Configuration + static class KafkaConsumerConfig { + + @Autowired + EmbeddedKafkaBroker broker; + + @Bean + KafkaAdmin kafkaAdmin() { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, this.broker.getBrokersAsString()); + return new KafkaAdmin(configs); + } + + @Bean + NewTopic topic() { + return TopicBuilder.name(THIRD_TOPIC).partitions(2).replicas(1).build(); + } + + @Bean + NewTopics topics() { + return new NewTopics(TopicBuilder.name(FOURTH_TOPIC).partitions(2).replicas(1).build()); + } + + @Bean + ConsumerFactory consumerFactory() { + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), "groupId"); + props.put( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + props.put( + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, + StringDeserializer.class); + props.put( + ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, false); + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + return new DefaultKafkaConsumerFactory<>(props); + } + + @Bean + ConcurrentKafkaListenerContainerFactory retryTopicListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + ContainerProperties props = factory.getContainerProperties(); + props.setIdleEventInterval(100L); + props.setPollTimeout(50L); + props.setIdlePartitionEventInterval(100L); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer( + container -> container.getContainerProperties().setIdlePartitionEventInterval(100L)); + return factory; + } + + @Bean + ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( + ConsumerFactory consumerFactory) { + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + factory.setConcurrency(1); + factory.setContainerCustomizer(container -> { + if (container.getListenerId().startsWith("manual")) { + container.getContainerProperties().setAckMode(AckMode.MANUAL); + container.getContainerProperties().setAsyncAcks(true); + } + }); + return factory; + } + + @Bean + TaskScheduler sched() { + return new ThreadPoolTaskScheduler(); + } + + } + +} diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilderTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilderTests.java index 990fe99e34..b6cfc0fc0f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilderTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilderTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Arrays; import java.util.List; import java.util.Map; @@ -34,6 +32,8 @@ import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.test.util.ReflectionTestUtils; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Tomaz Fernandes * @author Adrian Chlebosz diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationIntegrationTests.java index 2ad2ab8c35..3abd651735 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -59,6 +55,10 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @since 2.7.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationManualAssignmentIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationManualAssignmentIntegrationTests.java index 1c282bbdbf..87618b70cf 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationManualAssignmentIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationManualAssignmentIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - import java.time.Duration; import java.util.List; import java.util.Map; @@ -53,6 +50,9 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + /** * @author Gary Russell * @since 2.7.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationProviderTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationProviderTests.java index 8d60c46e8d..6d12e5f8c9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationProviderTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationProviderTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,19 +16,8 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; - -import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; import java.lang.reflect.Method; import java.util.Collections; @@ -43,10 +32,21 @@ import org.springframework.kafka.annotation.RetryableTopic; import org.springframework.kafka.core.KafkaOperations; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; + /** * @author Tomaz Fernandes * @author Gary Russell * @author Fabio da Silva Jr. + * @author Wang Zhiyang + * * @since 2.7 */ @ExtendWith(MockitoExtension.class) @@ -56,9 +56,7 @@ class RetryTopicConfigurationProviderTests { { this.beanFactory = mock(ConfigurableListableBeanFactory.class); - willAnswer(invoc -> { - return invoc.getArgument(0); - }).given(this.beanFactory).resolveEmbeddedValue(anyString()); + willAnswer(invoc -> invoc.getArgument(0)).given(this.beanFactory).resolveEmbeddedValue(anyString()); } private final String[] topics = {"topic1", "topic2"}; @@ -81,31 +79,28 @@ private Method getAnnotatedMethod(String methodName) { @Mock Object bean; - @Mock - RetryableTopic annotation; - @Mock KafkaOperations kafkaOperations; @Mock RetryTopicConfiguration retryTopicConfiguration; - @Mock - RetryTopicConfiguration retryTopicConfiguration2; - @Test void shouldProvideFromAnnotation() { // setup - willReturn(kafkaOperations).given(beanFactory).getBean("retryTopicDefaultKafkaTemplate", KafkaOperations.class); + willReturn(kafkaOperations).given(beanFactory).getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class); // given RetryTopicConfigurationProvider provider = new RetryTopicConfigurationProvider(beanFactory); RetryTopicConfiguration configuration = provider.findRetryConfigurationFor(topics, annotatedMethod, bean); + RetryTopicConfiguration configurationFromClass = provider + .findRetryConfigurationFor(topics, null, AnnotatedClass.class, bean); // then then(this.beanFactory).should(times(0)).getBeansOfType(RetryTopicConfiguration.class); - + assertThat(configuration).isNotNull(); + assertThat(configurationFromClass).isNotNull(); } @Test @@ -119,10 +114,13 @@ void shouldProvideFromBeanFactory() { // given RetryTopicConfigurationProvider provider = new RetryTopicConfigurationProvider(beanFactory); RetryTopicConfiguration configuration = provider.findRetryConfigurationFor(topics, nonAnnotatedMethod, bean); + RetryTopicConfiguration configurationFromClass = provider + .findRetryConfigurationFor(topics, null, NonAnnotatedClass.class, bean); // then - then(this.beanFactory).should(times(1)).getBeansOfType(RetryTopicConfiguration.class); + then(this.beanFactory).should(times(2)).getBeansOfType(RetryTopicConfiguration.class); assertThat(configuration).isEqualTo(retryTopicConfiguration); + assertThat(configurationFromClass).isEqualTo(retryTopicConfiguration); } @@ -137,10 +135,13 @@ void shouldFindNone() { // given RetryTopicConfigurationProvider provider = new RetryTopicConfigurationProvider(beanFactory); RetryTopicConfiguration configuration = provider.findRetryConfigurationFor(topics, nonAnnotatedMethod, bean); + RetryTopicConfiguration configurationFromClass = provider + .findRetryConfigurationFor(topics, null, NonAnnotatedClass.class, bean); // then - then(this.beanFactory).should(times(1)).getBeansOfType(RetryTopicConfiguration.class); + then(this.beanFactory).should(times(2)).getBeansOfType(RetryTopicConfiguration.class); assertThat(configuration).isNull(); + assertThat(configurationFromClass).isNull(); } @@ -148,15 +149,20 @@ void shouldFindNone() { void shouldProvideFromMetaAnnotation() { // setup - willReturn(kafkaOperations).given(beanFactory).getBean("retryTopicDefaultKafkaTemplate", KafkaOperations.class); + willReturn(kafkaOperations).given(beanFactory).getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class); // given RetryTopicConfigurationProvider provider = new RetryTopicConfigurationProvider(beanFactory); RetryTopicConfiguration configuration = provider.findRetryConfigurationFor(topics, metaAnnotatedMethod, bean); + RetryTopicConfiguration configurationFromClass = provider + .findRetryConfigurationFor(topics, null, MetaAnnotatedClass.class, bean); // then then(this.beanFactory).should(times(0)).getBeansOfType(RetryTopicConfiguration.class); + assertThat(configuration).isNotNull(); assertThat(configuration.getConcurrency()).isEqualTo(3); + assertThat(configurationFromClass).isNotNull(); + assertThat(configurationFromClass.getConcurrency()).isEqualTo(3); } @@ -166,9 +172,12 @@ void shouldNotConfigureIfBeanFactoryNull() { // given RetryTopicConfigurationProvider provider = new RetryTopicConfigurationProvider(null); RetryTopicConfiguration configuration = provider.findRetryConfigurationFor(topics, nonAnnotatedMethod, bean); + RetryTopicConfiguration configurationFromClass + = provider.findRetryConfigurationFor(topics, null, NonAnnotatedClass.class, bean); // then assertThat(configuration).isNull(); + assertThat(configurationFromClass).isNull(); } @@ -181,7 +190,6 @@ public void nonAnnotatedMethod() { // NoOps } - @Target({ElementType.METHOD}) @Retention(RetentionPolicy.RUNTIME) @RetryableTopic @interface MetaAnnotatedRetryableTopic { @@ -193,4 +201,19 @@ public void nonAnnotatedMethod() { public void metaAnnotatedMethod() { // NoOps } + + @RetryableTopic + public static class AnnotatedClass { + // NoOps + } + + public static class NonAnnotatedClass { + // NoOps + } + + @MetaAnnotatedRetryableTopic + public static class MetaAnnotatedClass { + // NoOps + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationSupportTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationSupportTests.java index cbfe355bbe..a066566a37 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationSupportTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurationSupportTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - import java.time.Clock; import java.util.LinkedHashMap; import java.util.List; @@ -53,9 +42,22 @@ import org.springframework.scheduling.TaskScheduler; import org.springframework.util.backoff.BackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Tomaz Fernandes * @author Gary Russell + * @author Wang Zhiyang + * * @since 2.9 */ class RetryTopicConfigurationSupportTests { @@ -185,7 +187,6 @@ void testCreateBackOffManager() { ContainerPartitionPausingBackOffManagerFactory.class); KafkaConsumerBackoffManager backoffManagerMock = mock(KafkaConsumerBackoffManager.class); TaskScheduler taskSchedulerMock = mock(TaskScheduler.class); - Clock clock = mock(Clock.class); ApplicationContext ctx = mock(ApplicationContext.class); given(componentFactory.kafkaBackOffManagerFactory(registry, ctx)).willReturn(factory); given(factory.create()).willReturn(backoffManagerMock); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurerTests.java index a481a82069..3f5536798f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConfigurerTests.java @@ -16,27 +16,21 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; - import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.function.Consumer; +import java.util.stream.Stream; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; @@ -54,6 +48,15 @@ import org.springframework.kafka.test.condition.LogLevels; import org.springframework.test.util.ReflectionTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; + /** * @author Tomaz Fernandes * @author Wang Zhiyang @@ -75,16 +78,12 @@ class RetryTopicConfigurerTests { @Mock private BeanFactory beanFactory; - private DefaultListableBeanFactory defaultListableBeanFactory = new DefaultListableBeanFactory(); + private final DefaultListableBeanFactory defaultListableBeanFactory = new DefaultListableBeanFactory(); - @Mock - private RetryTopicConfigurer.EndpointProcessor endpointProcessor; + private static final MethodKafkaListenerEndpoint mainEndpoint = mock(MethodKafkaListenerEndpoint.class); - @Mock - private MethodKafkaListenerEndpoint mainEndpoint; - - @Mock - private MultiMethodKafkaListenerEndpoint multiMethodEndpoint; + private static final MultiMethodKafkaListenerEndpoint mainMultiEndpoint = + mock(MultiMethodKafkaListenerEndpoint.class); @Mock private RetryTopicConfiguration configuration; @@ -167,22 +166,15 @@ private Method getMethod(String methodName) { } } - @Test - void shouldThrowIfMultiMethodEndpoint() { - - // setup - RetryTopicConfigurer configurer = new RetryTopicConfigurer(destinationTopicProcessor, containerFactoryResolver, - listenerContainerFactoryConfigurer, new SuffixingRetryTopicNamesProviderFactory()); - configurer.setBeanFactory(beanFactory); - - // when - then - assertThatIllegalArgumentException().isThrownBy( - () -> configurer.processMainAndRetryListeners(endpointProcessor, multiMethodEndpoint, configuration, - registrar, containerFactory, defaultFactoryBeanName)); + private static Stream paramsRetryEndpoints() { + return Stream.of( + Arguments.of(mainEndpoint), + Arguments.of(mainMultiEndpoint)); } - @Test - void shouldConfigureRetryEndpoints() { + @ParameterizedTest(name = "{index} shouldNotCustomizeEndpointForMainTopicWithTPO beanMethod is {0}, is multi {1}") + @MethodSource("paramsRetryEndpoints") + void shouldConfigureRetryEndpoints(MethodKafkaListenerEndpoint mainEndpoint) { // given @@ -204,7 +196,15 @@ void shouldConfigureRetryEndpoints() { given(configuration.getDestinationTopicProperties()).willReturn(destinationPropertiesList); given(mainEndpoint.getBean()).willReturn(bean); - given(mainEndpoint.getMethod()).willReturn(endpointMethod); + if (mainEndpoint instanceof MultiMethodKafkaListenerEndpoint multiEndpoint) { + given(multiEndpoint.getDefaultMethod()).willReturn(endpointMethod); + given(multiEndpoint.getMethods()).willReturn(List.of(endpointMethod)); + } + else { + given(mainEndpoint.getMethod()).willReturn(endpointMethod); + } + given(endpointHandlerMethod.resolveBean(any())).willReturn(bean); + given(endpointHandlerMethod.getMethod()).willReturn(noOpsDltMethod); given(configuration.getDltHandlerMethod()).willReturn(endpointHandlerMethod); given(configuration.forKafkaTopicAutoCreation()).willReturn(topicCreationConfig); given(topicCreationConfig.shouldCreateTopics()).willReturn(true); @@ -217,6 +217,7 @@ void shouldConfigureRetryEndpoints() { given(firstRetryDestinationProperties.suffix()).willReturn(firstRetrySuffix); given(secondRetryDestinationProperties.suffix()).willReturn(secondRetrySuffix); given(dltDestinationProperties.suffix()).willReturn(dltSuffix); + given(dltDestinationProperties.isDltTopic()).willReturn(true); given(mainDestinationProperties.isMainEndpoint()).willReturn(true); given(mainEndpoint.getTopics()).willReturn(topics); @@ -294,15 +295,6 @@ private void assertTopicNames(String retrySuffix, DestinationTopic.Properties de assertThat(retryTopicName.get(index + 1)).isEqualTo(secondTopicName); } - private void thenAssertEndpointProcessing(MethodKafkaListenerEndpoint endpoint) { - then(endpoint).should(times(1)).setTopics(topics.toArray(new String[]{})); - then(endpoint).should(times(1)).setId("testId"); - then(endpoint).should(times(1)).setGroup("testGroup"); - then(endpoint).should(times(1)).setGroupId("testGroupId"); - then(endpoint).should(times(1)).setClientIdPrefix("testClientPrefix"); - then(endpoint).should(times(1)).setBeanFactory(defaultListableBeanFactory); - } - public void noOpsMethod() { // noOps } @@ -349,7 +341,6 @@ void shouldInstantiateIfNotInContainer() { @LogLevels(classes = RetryTopicConfigurer.class, level = "info") @Test - @SuppressWarnings("deprecation") void shouldLogConsumerRecordMessage() { RetryTopicConfigurer.LoggingDltListenerHandlerMethod method = new RetryTopicConfigurer.LoggingDltListenerHandlerMethod(); @@ -366,6 +357,7 @@ void shouldNotLogObjectMessage() { } static class NoOpsClass { - void noOpsMethod() { }; + void noOpsMethod() { } } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConstantsTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConstantsTests.java index 6181124420..841572bd27 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConstantsTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicConstantsTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2021 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; - import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Tomaz Fernandes * @since 2.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicExceptionRoutingIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicExceptionRoutingIntegrationTests.java index d8b7157252..93b0a34b1d 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicExceptionRoutingIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicExceptionRoutingIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,6 +51,7 @@ import org.springframework.kafka.support.converter.ConversionException; import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.messaging.handler.annotation.Header; import org.springframework.retry.annotation.Backoff; import org.springframework.scheduling.TaskScheduler; @@ -62,9 +60,15 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + /** * @author Tomaz Fernandes + * @author Wang Zhiyang + * @author Sanghyeok An + * * @since 2.8.4 */ @SpringJUnitConfig @@ -75,9 +79,13 @@ public class RetryTopicExceptionRoutingIntegrationTests { private static final Logger logger = LoggerFactory.getLogger(RetryTopicExceptionRoutingIntegrationTests.class); public final static String BLOCKING_AND_TOPIC_RETRY = "blocking-and-topic-retry"; + public final static String ONLY_RETRY_VIA_BLOCKING = "only-retry-blocking-topic"; + public final static String ONLY_RETRY_VIA_TOPIC = "only-retry-topic"; + public final static String USER_FATAL_EXCEPTION_TOPIC = "user-fatal-topic"; + public final static String FRAMEWORK_FATAL_EXCEPTION_TOPIC = "framework-fatal-topic"; @Autowired @@ -247,9 +255,7 @@ static class FrameworkFatalTopicListener { @Autowired CountDownLatchContainer container; - @SuppressWarnings("deprecation") - @RetryableTopic(sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.SINGLE_TOPIC, - backoff = @Backoff(50)) + @RetryableTopic(backoff = @Backoff(50)) @KafkaListener(topics = FRAMEWORK_FATAL_EXCEPTION_TOPIC) public void listenWithAnnotation(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { container.fatalFrameworkLatch.countDown(); @@ -270,20 +276,33 @@ public void annotatedDltMethod(Object message, @Header(KafkaHeaders.RECEIVED_TOP static class CountDownLatchContainer { CountDownLatch blockingAndTopicsLatch = new CountDownLatch(12); + CountDownLatch onlyRetryViaBlockingLatch = new CountDownLatch(4); + CountDownLatch onlyRetryViaTopicLatch = new CountDownLatch(3); + CountDownLatch fatalUserLatch = new CountDownLatch(1); + CountDownLatch fatalFrameworkLatch = new CountDownLatch(1); + CountDownLatch annotatedDltOnlyBlockingLatch = new CountDownLatch(1); + CountDownLatch annotatedDltUserFatalLatch = new CountDownLatch(1); + CountDownLatch annotatedDltFrameworkFatalLatch = new CountDownLatch(1); + CountDownLatch dltProcessorLatch = new CountDownLatch(1); + CountDownLatch dltProcessorWithErrorLatch = new CountDownLatch(1); AtomicInteger blockingAndTopicsListenerInvocations = new AtomicInteger(); + AtomicInteger onlyRetryViaTopicListenerInvocations = new AtomicInteger(); + AtomicInteger onlyRetryViaBlockingListenerInvocations = new AtomicInteger(); + AtomicInteger userFatalListenerInvocations = new AtomicInteger(); + AtomicInteger fatalFrameworkListenerInvocations = new AtomicInteger(); } @@ -332,7 +351,6 @@ public RetryTopicConfiguration blockingAndTopic(KafkaTemplate te } @Bean - @SuppressWarnings("deprecation") public RetryTopicConfiguration onlyTopic(KafkaTemplate template) { return RetryTopicConfigurationBuilder .newInstance() @@ -415,9 +433,7 @@ public static class KafkaProducerConfig { @Bean public ProducerFactory producerFactory() { - Map configProps = new HashMap<>(); - configProps.put( - ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + Map configProps = KafkaTestUtils.producerProps( this.broker.getBrokersAsString()); configProps.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, @@ -450,13 +466,8 @@ public KafkaAdmin kafkaAdmin() { @Bean public ConsumerFactory consumerFactory() { - Map props = new HashMap<>(); - props.put( - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, - this.broker.getBrokersAsString()); - props.put( - ConsumerConfig.GROUP_ID_CONFIG, - "groupId"); + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), "groupId"); props.put( ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicHeadersTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicHeadersTests.java index c07e3e84cb..89045bbde3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicHeadersTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicHeadersTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2021 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; - import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Tomaz Fernandes * @since 2.7 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicIntegrationTests.java index 1f32c923c9..15104b0829 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.awaitility.Awaitility.await; - import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; @@ -76,6 +72,7 @@ import org.springframework.kafka.support.KafkaHeaders; import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.messaging.Message; import org.springframework.messaging.converter.CompositeMessageConverter; import org.springframework.messaging.converter.GenericMessageConverter; @@ -84,16 +81,20 @@ import org.springframework.retry.annotation.Backoff; import org.springframework.scheduling.TaskScheduler; import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; -import org.springframework.stereotype.Component; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; + /** * @author Tomaz Fernandes * @author Gary Russell * @author Wang Zhiyang + * @author Sanghyeok An * @since 2.7 */ @SpringJUnitConfig @@ -315,7 +316,6 @@ private boolean awaitLatch(CountDownLatch latch) { } } - @Component static class FirstTopicListener { @Autowired @@ -335,7 +335,6 @@ public void listen(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String r } - @Component static class SecondTopicListener { @Autowired @@ -349,7 +348,6 @@ public void listenAgain(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) Str } } - @Component static class ThirdTopicListener { @Autowired @@ -377,13 +375,13 @@ public void annotatedDltMethod(Object message) { } } - @Component static class FourthTopicListener { @Autowired CountDownLatchContainer container; @RetryableTopic(dltStrategy = DltStrategy.NO_DLT, attempts = "4", backoff = @Backoff(300), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, kafkaTemplate = "${kafka.template}") @KafkaListener(topics = FOURTH_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) public void listenNoDlt(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { @@ -410,6 +408,7 @@ static class FifthTopicListener1 { numPartitions = "2", retryTopicSuffix = "-listener1", dltTopicSuffix = "-listener1-dlt", topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, kafkaTemplate = "${kafka.template}") @KafkaListener(id = "fifthTopicId1", topicPartitions = {@TopicPartition(topic = TWO_LISTENERS_TOPIC, partitionOffsets = @PartitionOffset(partition = "0", initialOffset = "0"))}, @@ -442,6 +441,7 @@ static class FifthTopicListener2 { numPartitions = "2", retryTopicSuffix = "-listener2", dltTopicSuffix = "-listener2-dlt", topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS, kafkaTemplate = "${kafka.template}") @KafkaListener(id = "fifthTopicId2", topicPartitions = {@TopicPartition(topic = TWO_LISTENERS_TOPIC, partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "0"))}, @@ -462,13 +462,13 @@ public void annotatedDltMethod(ConsumerRecord record) { } - @Component static class SixthTopicDefaultDLTListener { @Autowired CountDownLatchContainer container; - @RetryableTopic(attempts = "4", backoff = @Backoff(50)) + @RetryableTopic(attempts = "4", backoff = @Backoff(50), + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) @KafkaListener(id = "manual", topics = MANUAL_TOPIC, containerFactory = MAIN_TOPIC_CONTAINER_FACTORY) public void listenNoDlt(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic, @SuppressWarnings("unused") Acknowledgment ack) { @@ -480,7 +480,6 @@ public void listenNoDlt(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) Str } - @Component static class NoRetryTopicListener { @Autowired @@ -503,7 +502,6 @@ public void annotatedDltMethod(Object message) { } } - @Component static class FirstReuseRetryTopicListener { final List topics = Collections.synchronizedList(new ArrayList<>()); @@ -511,8 +509,7 @@ static class FirstReuseRetryTopicListener { @Autowired CountDownLatchContainer container; - @RetryableTopic(attempts = "2", backoff = @Backoff(50), - sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.SINGLE_TOPIC) + @RetryableTopic(attempts = "2", backoff = @Backoff(50)) @KafkaListener(id = "reuseRetry1", topics = FIRST_REUSE_RETRY_TOPIC, containerFactory = "retryTopicListenerContainerFactory") public void listen1(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { @@ -524,7 +521,6 @@ public void listen1(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String } - @Component static class SecondReuseRetryTopicListener { final List topics = Collections.synchronizedList(new ArrayList<>()); @@ -532,8 +528,7 @@ static class SecondReuseRetryTopicListener { @Autowired CountDownLatchContainer container; - @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 30, maxDelay = 100, multiplier = 2), - sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.SINGLE_TOPIC) + @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 30, maxDelay = 100, multiplier = 2)) @KafkaListener(id = "reuseRetry2", topics = SECOND_REUSE_RETRY_TOPIC, containerFactory = "retryTopicListenerContainerFactory") public void listen2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { @@ -545,7 +540,6 @@ public void listen2(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String } - @Component static class ThirdReuseRetryTopicListener { final List topics = Collections.synchronizedList(new ArrayList<>()); @@ -553,8 +547,7 @@ static class ThirdReuseRetryTopicListener { @Autowired CountDownLatchContainer container; - @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 1, maxDelay = 5, multiplier = 1.4), - sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.SINGLE_TOPIC) + @RetryableTopic(attempts = "5", backoff = @Backoff(delay = 1, maxDelay = 5, multiplier = 1.4)) @KafkaListener(id = "reuseRetry3", topics = THIRD_REUSE_RETRY_TOPIC, containerFactory = "retryTopicListenerContainerFactory") public void listen3(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String receivedTopic) { @@ -566,26 +559,42 @@ public void listen3(String message, @Header(KafkaHeaders.RECEIVED_TOPIC) String } - @Component static class CountDownLatchContainer { CountDownLatch countDownLatch1 = new CountDownLatch(5); + CountDownLatch countDownLatch2 = new CountDownLatch(3); + CountDownLatch countDownLatch3 = new CountDownLatch(3); + CountDownLatch countDownLatch4 = new CountDownLatch(4); + CountDownLatch countDownLatch51 = new CountDownLatch(4); + CountDownLatch countDownLatch52 = new CountDownLatch(4); + CountDownLatch countDownLatch6 = new CountDownLatch(4); + CountDownLatch countDownLatchNoRetry = new CountDownLatch(1); + CountDownLatch countDownLatchDltOne = new CountDownLatch(1); + CountDownLatch countDownLatchDltTwo = new CountDownLatch(1); + CountDownLatch countDownLatchDltThree = new CountDownLatch(1); + CountDownLatch countDownLatchDltFour = new CountDownLatch(1); + CountDownLatch countDownLatchReuseOne = new CountDownLatch(2); + CountDownLatch countDownLatchReuseTwo = new CountDownLatch(5); + CountDownLatch countDownLatchReuseThree = new CountDownLatch(5); + CountDownLatch customDltCountdownLatch = new CountDownLatch(1); + CountDownLatch customErrorHandlerCountdownLatch = new CountDownLatch(6); + CountDownLatch customMessageConverterCountdownLatch = new CountDownLatch(6); final List knownTopics = new ArrayList<>(); @@ -600,7 +609,6 @@ private void countDownIfNotKnown(String receivedTopic, CountDownLatch countDownL } } - @Component static class MyCustomDltProcessor { @Autowired @@ -758,9 +766,7 @@ public static class KafkaProducerConfig { @Bean public ProducerFactory producerFactory() { - Map configProps = new HashMap<>(); - configProps.put( - ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + Map configProps = KafkaTestUtils.producerProps( this.broker.getBrokersAsString()); configProps.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, @@ -803,13 +809,8 @@ public NewTopics topics() { @Bean public ConsumerFactory consumerFactory() { - Map props = new HashMap<>(); - props.put( - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, - this.broker.getBrokersAsString()); - props.put( - ConsumerConfig.GROUP_ID_CONFIG, - "groupId"); + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), "groupId"); props.put( ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicSameContainerFactoryIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicSameContainerFactoryIntegrationTests.java index 3473c8a6c7..73257f8dd1 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicSameContainerFactoryIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryTopicSameContainerFactoryIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,6 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - -import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -31,14 +25,14 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.junit.jupiter.api.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; import org.springframework.kafka.annotation.DltHandler; import org.springframework.kafka.annotation.EnableKafka; +import org.springframework.kafka.annotation.KafkaHandler; import org.springframework.kafka.annotation.KafkaListener; import org.springframework.kafka.annotation.RetryableTopic; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; @@ -52,6 +46,7 @@ import org.springframework.kafka.support.KafkaHeaders; import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.messaging.handler.annotation.Header; import org.springframework.retry.annotation.Backoff; import org.springframework.stereotype.Component; @@ -59,25 +54,41 @@ import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; import org.springframework.util.backoff.FixedBackOff; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + /** * @author Tomaz Fernandes * @author Cenk Akin + * @author Wang Zhiyang + * @author Sanghyeok An + * * @since 2.8.3 */ @SpringJUnitConfig @DirtiesContext @EmbeddedKafka(topics = { RetryTopicSameContainerFactoryIntegrationTests.FIRST_TOPIC, - RetryTopicSameContainerFactoryIntegrationTests.SECOND_TOPIC, RetryTopicSameContainerFactoryIntegrationTests.THIRD_TOPIC}, partitions = 1) + RetryTopicSameContainerFactoryIntegrationTests.SECOND_TOPIC, + RetryTopicSameContainerFactoryIntegrationTests.THIRD_TOPIC, + RetryTopicSameContainerFactoryIntegrationTests.CLASS_LEVEL_FIRST_TOPIC, + RetryTopicSameContainerFactoryIntegrationTests.CLASS_LEVEL_SECOND_TOPIC, + RetryTopicSameContainerFactoryIntegrationTests.CLASS_LEVEL_THIRD_TOPIC}, partitions = 1) public class RetryTopicSameContainerFactoryIntegrationTests { - private static final Logger logger = LoggerFactory.getLogger(RetryTopicSameContainerFactoryIntegrationTests.class); - public final static String FIRST_TOPIC = "myRetryTopic1"; public final static String SECOND_TOPIC = "myRetryTopic2"; public final static String THIRD_TOPIC = "myRetryTopic3"; + public final static String CLASS_LEVEL_FIRST_TOPIC = "classLevelRetryTopic1"; + + public final static String CLASS_LEVEL_SECOND_TOPIC = "classLevelRetryTopic2"; + + public final static String CLASS_LEVEL_THIRD_TOPIC = "classLevelRetryTopic3"; + @Autowired private KafkaTemplate sendKafkaTemplate; @@ -86,11 +97,8 @@ public class RetryTopicSameContainerFactoryIntegrationTests { @Test void shouldRetryFirstAndSecondTopics(@Autowired RetryTopicComponentFactory componentFactory) { - logger.debug("Sending message to topic " + FIRST_TOPIC); sendKafkaTemplate.send(FIRST_TOPIC, "Testing topic 1"); - logger.debug("Sending message to topic " + SECOND_TOPIC); sendKafkaTemplate.send(SECOND_TOPIC, "Testing topic 2"); - logger.debug("Sending message to topic " + THIRD_TOPIC); sendKafkaTemplate.send(THIRD_TOPIC, "Testing topic 3"); assertThat(awaitLatch(latchContainer.countDownLatchFirstRetryable)).isTrue(); assertThat(awaitLatch(latchContainer.countDownLatchDltOne)).isTrue(); @@ -101,6 +109,20 @@ void shouldRetryFirstAndSecondTopics(@Autowired RetryTopicComponentFactory compo verify(componentFactory).destinationTopicResolver(); } + @Test + void shouldRetryClassLevelFirstAndSecondTopics(@Autowired RetryTopicComponentFactory componentFactory) { + sendKafkaTemplate.send(CLASS_LEVEL_FIRST_TOPIC, "Testing topic 1"); + sendKafkaTemplate.send(CLASS_LEVEL_SECOND_TOPIC, "Testing topic 2"); + sendKafkaTemplate.send(CLASS_LEVEL_THIRD_TOPIC, "Testing topic 3"); + assertThat(awaitLatch(latchContainer.countDownLatchClassLevelFirstRetryable)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchClassLevelDltOne)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchClassLevelSecondRetryable)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchClassLevelDltSecond)).isTrue(); + assertThat(awaitLatch(latchContainer.countDownLatchClassLevelBasic)).isTrue(); + assertThat(awaitLatch(latchContainer.customizerClassLevelLatch)).isTrue(); + verify(componentFactory).destinationTopicResolver(); + } + private boolean awaitLatch(CountDownLatch latch) { try { return latch.await(150, TimeUnit.SECONDS); @@ -121,18 +143,43 @@ static class FirstRetryableKafkaListener { attempts = "4", backoff = @Backoff(delay = 1000, multiplier = 2.0), autoCreateTopics = "false", - topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE) + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) @KafkaListener(topics = RetryTopicSameContainerFactoryIntegrationTests.FIRST_TOPIC) public void listen(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { countDownLatchContainer.countDownLatchFirstRetryable.countDown(); - logger.warn(in + " from " + topic); throw new RuntimeException("from FirstRetryableKafkaListener"); } @DltHandler public void dlt(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { countDownLatchContainer.countDownLatchDltOne.countDown(); - logger.warn(in + " from " + topic); + } + } + + @Component + @RetryableTopic( + attempts = "4", + backoff = @Backoff(delay = 1000, multiplier = 2.0), + autoCreateTopics = "false", + topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE, + sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) + @KafkaListener(topics = RetryTopicSameContainerFactoryIntegrationTests.CLASS_LEVEL_FIRST_TOPIC, + containerFactory = "classLevelKafkaListenerContainerFactory") + static class FirstClassLevelRetryableKafkaListener { + + @Autowired + CountDownLatchContainer countDownLatchContainer; + + @KafkaHandler + public void listen(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { + countDownLatchContainer.countDownLatchClassLevelFirstRetryable.countDown(); + throw new RuntimeException("from FirstClassLevelRetryableKafkaListener"); + } + + @DltHandler + public void dlt(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { + countDownLatchContainer.countDownLatchClassLevelDltOne.countDown(); } } @@ -142,42 +189,87 @@ static class SecondRetryableKafkaListener { @Autowired CountDownLatchContainer countDownLatchContainer; - @RetryableTopic + @RetryableTopic(sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) @KafkaListener(topics = RetryTopicSameContainerFactoryIntegrationTests.SECOND_TOPIC) public void listen(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { countDownLatchContainer.countDownLatchSecondRetryable.countDown(); - logger.info(in + " from " + topic); throw new RuntimeException("from SecondRetryableKafkaListener"); } @DltHandler public void dlt(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { countDownLatchContainer.countDownLatchDltSecond.countDown(); - logger.warn(in + " from " + topic); } } + @Component + @RetryableTopic(sameIntervalTopicReuseStrategy = SameIntervalTopicReuseStrategy.MULTIPLE_TOPICS) + @KafkaListener(topics = RetryTopicSameContainerFactoryIntegrationTests.CLASS_LEVEL_SECOND_TOPIC, + containerFactory = "classLevelKafkaListenerContainerFactory") + static class SecondClassLevelRetryableKafkaListener { + + @Autowired + CountDownLatchContainer countDownLatchContainer; + + @KafkaHandler + public void listen(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { + countDownLatchContainer.countDownLatchClassLevelSecondRetryable.countDown(); + throw new RuntimeException("from ClassLevelSecondRetryableKafkaListener"); + } + + @DltHandler + public void dlt(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { + countDownLatchContainer.countDownLatchClassLevelDltSecond.countDown(); + } + } @Component static class BasicKafkaListener { @KafkaListener(topics = RetryTopicSameContainerFactoryIntegrationTests.THIRD_TOPIC) public void listen(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { - logger.info(in + " from " + topic); throw new RuntimeException("from BasicKafkaListener"); } } + @Component + @KafkaListener(topics = RetryTopicSameContainerFactoryIntegrationTests.CLASS_LEVEL_THIRD_TOPIC, + containerFactory = "classLevelKafkaListenerContainerFactory") + static class BasicClassLevelKafkaListener { + + @KafkaHandler + public void listen(String in, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { + throw new RuntimeException("from BasicClassLevelKafkaListener"); + } + } + @Component static class CountDownLatchContainer { CountDownLatch countDownLatchFirstRetryable = new CountDownLatch(4); + CountDownLatch countDownLatchSecondRetryable = new CountDownLatch(3); + CountDownLatch countDownLatchDltOne = new CountDownLatch(1); + CountDownLatch countDownLatchDltSecond = new CountDownLatch(1); CountDownLatch countDownLatchBasic = new CountDownLatch(1); + CountDownLatch customizerLatch = new CountDownLatch(10); + + CountDownLatch countDownLatchClassLevelFirstRetryable = new CountDownLatch(4); + + CountDownLatch countDownLatchClassLevelSecondRetryable = new CountDownLatch(3); + + CountDownLatch countDownLatchClassLevelDltOne = new CountDownLatch(1); + + CountDownLatch countDownLatchClassLevelDltSecond = new CountDownLatch(1); + + CountDownLatch countDownLatchClassLevelBasic = new CountDownLatch(1); + + CountDownLatch customizerClassLevelLatch = new CountDownLatch(10); + } @EnableKafka @@ -208,9 +300,41 @@ BasicKafkaListener basicKafkaListener() { } @Bean + FirstClassLevelRetryableKafkaListener firstClassLevelRetryableKafkaListener() { + return new FirstClassLevelRetryableKafkaListener(); + } + + @Bean + SecondClassLevelRetryableKafkaListener secondClassLevelRetryableKafkaListener() { + return new SecondClassLevelRetryableKafkaListener(); + } + + @Bean + BasicClassLevelKafkaListener basicClassLevelKafkaListener() { + return new BasicClassLevelKafkaListener(); + } + + @Bean + @Primary public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( ConsumerFactory consumerFactory, CountDownLatchContainer latchContainer) { + return createKafkaListenerContainerFactory(consumerFactory, latchContainer.countDownLatchBasic, + latchContainer.customizerLatch); + } + + @Bean + public ConcurrentKafkaListenerContainerFactory classLevelKafkaListenerContainerFactory( + ConsumerFactory consumerFactory, CountDownLatchContainer latchContainer) { + + return createKafkaListenerContainerFactory(consumerFactory, latchContainer.countDownLatchClassLevelBasic, + latchContainer.customizerClassLevelLatch); + } + + private ConcurrentKafkaListenerContainerFactory createKafkaListenerContainerFactory( + ConsumerFactory consumerFactory, CountDownLatch countDownLatchBasic, + CountDownLatch customizerLatch) { + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(consumerFactory); ContainerProperties props = factory.getContainerProperties(); @@ -219,20 +343,18 @@ public ConcurrentKafkaListenerContainerFactory kafkaListenerCont props.setIdlePartitionEventInterval(100L); factory.setConsumerFactory(consumerFactory); DefaultErrorHandler errorHandler = new DefaultErrorHandler( - (cr, ex) -> latchContainer.countDownLatchBasic.countDown(), + (cr, ex) -> countDownLatchBasic.countDown(), new FixedBackOff(0, 2)); factory.setCommonErrorHandler(errorHandler); factory.setConcurrency(1); factory.setContainerCustomizer( - container -> latchContainer.customizerLatch.countDown()); + container -> customizerLatch.countDown()); return factory; } @Bean public ProducerFactory producerFactory() { - Map configProps = new HashMap<>(); - configProps.put( - ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + Map configProps = KafkaTestUtils.producerProps( this.broker.getBrokersAsString()); configProps.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, @@ -250,13 +372,8 @@ public KafkaTemplate kafkaTemplate() { @Bean public ConsumerFactory consumerFactory() { - Map props = new HashMap<>(); - props.put( - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, - this.broker.getBrokersAsString()); - props.put( - ConsumerConfig.GROUP_ID_CONFIG, - "groupId"); + Map props = KafkaTestUtils.consumerProps( + this.broker.getBrokersAsString(), "groupId"); props.put( ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryableTopicAnnotationProcessorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryableTopicAnnotationProcessorTests.java index 2c887cf8d1..817f332fff 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryableTopicAnnotationProcessorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/retrytopic/RetryableTopicAnnotationProcessorTests.java @@ -16,19 +16,15 @@ package org.springframework.kafka.retrytopic; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; - import java.lang.reflect.Method; import java.util.List; +import java.util.stream.Stream; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; @@ -48,13 +44,22 @@ import org.springframework.test.util.ReflectionTestUtils; import org.springframework.util.ReflectionUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.mock; + /** * @author Tomaz Fernandes * @author Gary Russell * @author Adrian Chlebosz + * @author Wang Zhiyang + * * @since 2.7 */ -@SuppressWarnings("deprecation") @ExtendWith(MockitoExtension.class) class RetryableTopicAnnotationProcessorTests { @@ -78,9 +83,7 @@ class RetryableTopicAnnotationProcessorTests { { this.beanFactory = mock(ConfigurableBeanFactory.class); - willAnswer(invoc -> { - return invoc.getArgument(0); - }).given(this.beanFactory).resolveEmbeddedValue(anyString()); + willAnswer(invoc -> invoc.getArgument(0)).given(this.beanFactory).resolveEmbeddedValue(anyString()); } // Retry with DLT @@ -118,9 +121,15 @@ private Object createBean() { } } + private static Stream paramsForRetryTopic() { + return Stream.of( + Arguments.of(true), + Arguments.of(false)); + } - @Test - void shouldGetDltHandlerMethod() { + @ParameterizedTest(name = "{index} shouldGetDltHandlerMethod is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldGetDltHandlerMethod(boolean isMethod) { // setup given(beanFactory.getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class)) @@ -128,8 +137,8 @@ void shouldGetDltHandlerMethod() { RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - RetryTopicConfiguration configuration = processor - .processAnnotation(topics, listenWithRetryAndDlt, annotationWithDlt, beanWithDlt); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetryAndDlt, + RetryableTopicClassLevelAnnotationFactoryWithDlt.class, isMethod, annotationWithDlt, beanWithDlt); // then EndpointHandlerMethod dltHandlerMethod = configuration.getDltHandlerMethod(); @@ -140,15 +149,27 @@ void shouldGetDltHandlerMethod() { configuration.getDestinationTopicProperties().get(0)).isAlwaysRetryOnDltFailure()).isFalse(); } - @Test - void shouldGetLoggingDltHandlerMethod() { + private RetryTopicConfiguration getRetryTopicConfiguration(RetryableTopicAnnotationProcessor processor, + String[] topics, Method method, Class clazz, boolean isMethod, RetryableTopic annotation, Object bean) { + if (isMethod) { + return processor.processAnnotation(topics, method, annotation, bean); + } + else { + return processor.processAnnotation(topics, clazz, annotation, bean); + } + } + + @ParameterizedTest(name = "{index} shouldGetLoggingDltHandlerMethod is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldGetLoggingDltHandlerMethod(boolean isMethod) { // setup given(beanFactory.getBean(kafkaTemplateName, KafkaOperations.class)).willReturn(kafkaOperationsFromTemplateName); RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - RetryTopicConfiguration configuration = processor.processAnnotation(topics, listenWithRetry, annotation, bean); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetry, + RetryableTopicClassLevelAnnotationFactory.class, isMethod, annotation, bean); // then EndpointHandlerMethod dltHandlerMethod = configuration.getDltHandlerMethod(); @@ -171,6 +192,9 @@ void shouldThrowIfProvidedKafkaTemplateNotFound() { // given - then assertThatExceptionOfType(BeanInitializationException.class) .isThrownBy(() -> processor.processAnnotation(topics, listenWithRetry, annotation, bean)); + assertThatExceptionOfType(BeanInitializationException.class) + .isThrownBy(() -> processor.processAnnotation(topics, RetryableTopicClassLevelAnnotationFactory.class, + annotation, bean)); } @Test @@ -191,10 +215,13 @@ void shouldThrowIfNoKafkaTemplateFound() { // given - then assertThatIllegalStateException().isThrownBy(() -> processor.processAnnotation(topics, listenWithRetryAndDlt, annotationWithDlt, beanWithDlt)); + assertThatIllegalStateException().isThrownBy(() -> processor.processAnnotation( + topics, RetryableTopicClassLevelAnnotationFactoryWithDlt.class, annotationWithDlt, beanWithDlt)); } - @Test - void shouldTrySpringBootDefaultKafkaTemplate() { + @ParameterizedTest(name = "{index} shouldTrySpringBootDefaultKafkaTemplate is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldTrySpringBootDefaultKafkaTemplate(boolean isMethod) { // setup given(this.beanFactory.getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class)) @@ -207,15 +234,16 @@ void shouldTrySpringBootDefaultKafkaTemplate() { RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - then - RetryTopicConfiguration configuration = processor.processAnnotation(topics, listenWithRetry, annotationWithDlt, - bean); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetry, + RetryableTopicClassLevelAnnotationFactory.class, isMethod, annotationWithDlt, bean); DestinationTopic.Properties properties = configuration.getDestinationTopicProperties().get(0); DestinationTopic destinationTopic = new DestinationTopic("", properties); assertThat(destinationTopic.getKafkaOperations()).isEqualTo(kafkaOperationsFromDefaultName); } - @Test - void shouldGetKafkaTemplateFromBeanName() { + @ParameterizedTest(name = "{index} shouldGetKafkaTemplateFromBeanName is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldGetKafkaTemplateFromBeanName(boolean isMethod) { // setup given(this.beanFactory.getBean(kafkaTemplateName, KafkaOperations.class)) @@ -223,15 +251,16 @@ void shouldGetKafkaTemplateFromBeanName() { RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - then - RetryTopicConfiguration configuration = processor - .processAnnotation(topics, listenWithRetry, annotation, bean); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetry, + RetryableTopicClassLevelAnnotationFactory.class, isMethod, annotation, bean); DestinationTopic.Properties properties = configuration.getDestinationTopicProperties().get(0); DestinationTopic destinationTopic = new DestinationTopic("", properties); assertThat(destinationTopic.getKafkaOperations()).isEqualTo(kafkaOperationsFromTemplateName); } - @Test - void shouldGetKafkaTemplateFromDefaultBeanName() { + @ParameterizedTest(name = "{index} shouldGetKafkaTemplateFromDefaultBeanName is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldGetKafkaTemplateFromDefaultBeanName(boolean isMethod) { // setup given(this.beanFactory.getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class)) @@ -239,8 +268,8 @@ void shouldGetKafkaTemplateFromDefaultBeanName() { RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - RetryTopicConfiguration configuration = processor - .processAnnotation(topics, listenWithRetryAndDlt, annotationWithDlt, beanWithDlt); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetryAndDlt, + RetryableTopicClassLevelAnnotationFactoryWithDlt.class, isMethod, annotationWithDlt, beanWithDlt); // then DestinationTopic.Properties properties = configuration.getDestinationTopicProperties().get(0); @@ -248,8 +277,9 @@ void shouldGetKafkaTemplateFromDefaultBeanName() { assertThat(destinationTopic.getKafkaOperations()).isEqualTo(kafkaOperationsFromDefaultName); } - @Test - void shouldCreateExponentialBackoff() { + @ParameterizedTest(name = "{index} shouldCreateExponentialBackoff is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldCreateExponentialBackoff(boolean isMethod) { // setup given(this.beanFactory.getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class)) @@ -257,8 +287,8 @@ void shouldCreateExponentialBackoff() { RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - RetryTopicConfiguration configuration = processor - .processAnnotation(topics, listenWithRetryAndDlt, annotationWithDlt, beanWithDlt); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetryAndDlt, + RetryableTopicClassLevelAnnotationFactoryWithDlt.class, isMethod, annotationWithDlt, beanWithDlt); // then List destinationTopicProperties = configuration.getDestinationTopicProperties(); @@ -275,8 +305,9 @@ void shouldCreateExponentialBackoff() { assertThat(destinationTopic.shouldRetryOn(1, new IllegalArgumentException())).isTrue(); } - @Test - void shouldSetAbort() { + @ParameterizedTest(name = "{index} shouldSetAbort is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldSetAbort(boolean isMethod) { // setup given(this.beanFactory.getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class)) @@ -284,8 +315,8 @@ void shouldSetAbort() { RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - RetryTopicConfiguration configuration = processor - .processAnnotation(topics, listenWithRetryAndDlt, annotationWithDlt, beanWithDlt); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetryAndDlt, + RetryableTopicClassLevelAnnotationFactoryWithDlt.class, isMethod, annotationWithDlt, beanWithDlt); // then List destinationTopicProperties = configuration.getDestinationTopicProperties(); @@ -300,8 +331,9 @@ void shouldSetAbort() { } - @Test - void shouldCreateFixedBackoff() { + @ParameterizedTest(name = "{index} shouldCreateFixedBackoff is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldCreateFixedBackoff(boolean isMethod) { // setup given(this.beanFactory.getBean(kafkaTemplateName, KafkaOperations.class)) @@ -309,33 +341,32 @@ void shouldCreateFixedBackoff() { RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - RetryTopicConfiguration configuration = processor - .processAnnotation(topics, listenWithRetry, annotation, bean); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, listenWithRetry, + RetryableTopicClassLevelAnnotationFactory.class, isMethod, annotation, bean); // then List destinationTopicProperties = configuration.getDestinationTopicProperties(); + assertThat(destinationTopicProperties.size()).isEqualTo(3); DestinationTopic destinationTopic = new DestinationTopic("", destinationTopicProperties.get(0)); assertThat(destinationTopic.getDestinationDelay()).isEqualTo(0); DestinationTopic destinationTopic2 = new DestinationTopic("", destinationTopicProperties.get(1)); assertThat(destinationTopic2.getDestinationDelay()).isEqualTo(1000); DestinationTopic destinationTopic3 = new DestinationTopic("", destinationTopicProperties.get(2)); - assertThat(destinationTopic3.getDestinationDelay()).isEqualTo(1000); - DestinationTopic destinationTopic4 = new DestinationTopic("", destinationTopicProperties.get(3)); - assertThat(destinationTopic4.getDestinationDelay()).isEqualTo(0); - + assertThat(destinationTopic3.getDestinationDelay()).isEqualTo(0); } - @Test - void shouldCreateExceptionBasedDltRoutingSpec() { + @ParameterizedTest(name = "{index} shouldCreateFixedBackoff is method {0}") + @MethodSource("paramsForRetryTopic") + void shouldCreateExceptionBasedDltRoutingSpec(boolean isMethod) { // setup given(this.beanFactory.getBean(RetryTopicBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME, KafkaOperations.class)) .willReturn(kafkaOperationsFromDefaultName); RetryableTopicAnnotationProcessor processor = new RetryableTopicAnnotationProcessor(beanFactory); // given - RetryTopicConfiguration configuration = processor - .processAnnotation( - topics, listenWithCustomDltRouting, annotationWithCustomDltRouting, beanWithCustomDltRouting); + RetryTopicConfiguration configuration = getRetryTopicConfiguration(processor, topics, + listenWithCustomDltRouting, RetryableTopicClassLevelAnnotationFactoryWithCustomDltRouting.class, + isMethod, annotationWithCustomDltRouting, beanWithCustomDltRouting); // then List destinationTopicProperties = configuration.getDestinationTopicProperties(); @@ -355,6 +386,12 @@ void listenWithRetry() { } } + @KafkaListener + @RetryableTopic(kafkaTemplate = RetryableTopicAnnotationProcessorTests.kafkaTemplateName) + static class RetryableTopicClassLevelAnnotationFactory { + + } + static class RetryableTopicAnnotationFactoryWithDlt { @KafkaListener @@ -370,6 +407,18 @@ void handleDlt() { } } + @KafkaListener + @RetryableTopic(attempts = "3", backoff = @Backoff(multiplier = 2, value = 1000), + dltStrategy = DltStrategy.FAIL_ON_ERROR, excludeNames = "java.lang.IllegalStateException") + static class RetryableTopicClassLevelAnnotationFactoryWithDlt { + + @DltHandler + void handleDlt() { + // NoOps + } + + } + static class RetryableTopicAnnotationFactoryWithCustomDltRouting { @KafkaListener @RetryableTopic( @@ -384,4 +433,17 @@ void listenWithRetry() { // NoOps } } + + @KafkaListener + @RetryableTopic( + attempts = "1", + exceptionBasedDltRouting = { + @ExceptionBasedDltDestination( + suffix = "-deserialization", exceptions = {DeserializationException.class} + ) + } + ) + static class RetryableTopicClassLevelAnnotationFactoryWithCustomDltRouting { + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializerTests.java index c42867e089..a649cddbe8 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.security.jaas; -import static org.assertj.core.api.Assertions.assertThat; - import java.io.IOException; import java.util.Collections; import java.util.HashMap; @@ -26,6 +24,7 @@ import javax.security.auth.login.AppConfigurationEntry; +import com.sun.security.auth.login.ConfigFile; import org.apache.kafka.common.security.JaasContext; import org.jetbrains.annotations.NotNull; import org.junit.jupiter.api.Test; @@ -38,7 +37,7 @@ import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer.ControlFlag; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import com.sun.security.auth.login.ConfigFile; +import static org.assertj.core.api.Assertions.assertThat; /** * @author Marius Bogoevici diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer1Tests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer1Tests.java index a473fed795..16faaec2f3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer1Tests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer1Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,7 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -41,11 +38,15 @@ import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer; import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell + * @author Sanghyeok An * @since 2.7 * */ @@ -75,9 +76,7 @@ public static class Config { @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, "configurer1"); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + Map props = KafkaTestUtils.streamsProps("configurer1", this.brokerAddresses); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer2Tests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer2Tests.java index b1e7ffe71a..27fc7a9ed9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer2Tests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/Configurer2Tests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,7 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -41,11 +38,15 @@ import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer; import org.springframework.kafka.test.EmbeddedKafkaBroker; import org.springframework.kafka.test.context.EmbeddedKafka; +import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell + * @author Sanghyeok An * @since 2.7 * */ @@ -75,9 +76,7 @@ public static class Config { @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, "configurer2"); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + Map props = KafkaTestUtils.streamsProps("configurer2", this.brokerAddresses); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/HeaderEnricherProcessorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/HeaderEnricherProcessorTests.java index 7f5c2f92cc..1823e2defe 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/HeaderEnricherProcessorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/HeaderEnricherProcessorTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2021 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.HashMap; import java.util.Map; import java.util.Properties; @@ -38,6 +36,8 @@ import org.springframework.expression.common.LiteralExpression; import org.springframework.expression.spel.standard.SpelExpressionParser; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsBranchTests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsBranchTests.java index c145d29cd7..6dcb606c38 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsBranchTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsBranchTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,7 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; @@ -54,10 +51,13 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Elliot Kennedy * @author Artem Bilan * @author Ivan Ponomarev + * @author Sanghyeok An * * @since 1.3.3 */ @@ -146,9 +146,7 @@ public Map producerConfigs() { @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + Map props = KafkaTestUtils.streamsProps("testStreams", this.brokerAddresses); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); return new KafkaStreamsConfiguration(props); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryServiceTests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryServiceTests.java index e59ddcc89c..67c0def9d9 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryServiceTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsInteractiveQueryServiceTests.java @@ -16,20 +16,15 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.lang.reflect.Field; -import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.KeyValue; @@ -40,6 +35,7 @@ import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.processor.WallclockTimestampExtractor; +import org.apache.kafka.streams.state.HostInfo; import org.apache.kafka.streams.state.QueryableStoreTypes; import org.apache.kafka.streams.state.ReadOnlyKeyValueStore; import org.junit.jupiter.api.Test; @@ -63,7 +59,7 @@ import org.springframework.kafka.core.ProducerFactory; import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; import org.springframework.kafka.test.EmbeddedKafkaBroker; -import org.springframework.kafka.test.EmbeddedKafkaKraftBroker; +import org.springframework.kafka.test.EmbeddedKafkaZKBroker; import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.KafkaTestUtils; import org.springframework.retry.RetryPolicy; @@ -73,8 +69,16 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Soby Chacko + * @author Sanghyeok An * @since 3.2.0 */ @SpringJUnitConfig @@ -92,7 +96,7 @@ class KafkaStreamsInteractiveQueryServiceTests { public static final String NON_EXISTENT_STORE = "my-non-existent-store"; @Autowired - private EmbeddedKafkaKraftBroker embeddedKafka; + private EmbeddedKafkaZKBroker embeddedKafka; @Autowired private StreamsBuilderFactoryBean streamsBuilderFactoryBean; @@ -108,11 +112,7 @@ class KafkaStreamsInteractiveQueryServiceTests { @Test void retrieveQueryableStore() throws Exception { - this.kafkaTemplate.sendDefault(123, "123"); - this.kafkaTemplate.flush(); - - ConsumerRecord result = resultFuture.get(600, TimeUnit.SECONDS); - assertThat(result).isNotNull(); + ensureKafkaStreamsProcessorIsUpAndRunning(); ReadOnlyKeyValueStore objectObjectReadOnlyKeyValueStore = this.interactiveQueryService .retrieveQueryableStore(STATE_STORE, @@ -121,14 +121,18 @@ void retrieveQueryableStore() throws Exception { assertThat((Long) objectObjectReadOnlyKeyValueStore.get(123)).isGreaterThanOrEqualTo(1); } - @SuppressWarnings("unchecked") - @Test - void retrieveNonExistentStateStoreAndVerifyRetries() throws Exception { + private void ensureKafkaStreamsProcessorIsUpAndRunning() throws InterruptedException, ExecutionException, TimeoutException { this.kafkaTemplate.sendDefault(123, "123"); this.kafkaTemplate.flush(); ConsumerRecord result = resultFuture.get(600, TimeUnit.SECONDS); assertThat(result).isNotNull(); + } + + @SuppressWarnings("unchecked") + @Test + void retrieveNonExistentStateStoreAndVerifyRetries() throws Exception { + ensureKafkaStreamsProcessorIsUpAndRunning(); assertThat(this.streamsBuilderFactoryBean.getKafkaStreams()).isNotNull(); KafkaStreams kafkaStreams = spy(this.streamsBuilderFactoryBean.getKafkaStreams()); @@ -148,6 +152,54 @@ void retrieveNonExistentStateStoreAndVerifyRetries() throws Exception { verify(kafkaStreams, times(3)).store(any(StoreQueryParameters.class)); } + @Test + void currentHostInfo() { + HostInfo currentKafkaStreamsApplicationHostInfo = + this.interactiveQueryService.getCurrentKafkaStreamsApplicationHostInfo(); + assertThat(currentKafkaStreamsApplicationHostInfo.host()).isEqualTo("localhost"); + assertThat(currentKafkaStreamsApplicationHostInfo.port()).isEqualTo(8080); + } + + @Test + void hostInfoForKeyAndStore() throws Exception { + ensureKafkaStreamsProcessorIsUpAndRunning(); + + HostInfo kafkaStreamsApplicationHostInfo = + this.interactiveQueryService.getKafkaStreamsApplicationHostInfo(STATE_STORE, 123, + new IntegerSerializer()); + // In real applications, the above call may return a different server than what is configured + // via application.server on the Kafka Streams where the call was invoked. However, in the case + // of this test, we only have a single Kafka Streams instance and even there, we provide a mock + // value for application.server (localhost:8080). Because of that, that is what we are verifying against. + assertThat(kafkaStreamsApplicationHostInfo.host()).isEqualTo("localhost"); + assertThat(kafkaStreamsApplicationHostInfo.port()).isEqualTo(8080); + } + + @Test + void hostInfoForNonExistentKeyAndStateStore() throws Exception { + ensureKafkaStreamsProcessorIsUpAndRunning(); + + assertThat(this.streamsBuilderFactoryBean.getKafkaStreams()).isNotNull(); + KafkaStreams kafkaStreams = spy(this.streamsBuilderFactoryBean.getKafkaStreams()); + assertThat(kafkaStreams).isNotNull(); + + Field kafkaStreamsField = KafkaStreamsInteractiveQueryService.class.getDeclaredField("kafkaStreams"); + kafkaStreamsField.setAccessible(true); + kafkaStreamsField.set(interactiveQueryService, kafkaStreams); + + IntegerSerializer serializer = new IntegerSerializer(); + + assertThatExceptionOfType(IllegalStateException.class) + .isThrownBy(() -> { + this.interactiveQueryService.getKafkaStreamsApplicationHostInfo(NON_EXISTENT_STORE, 12345, + serializer); + }) + .withMessageContaining("Error when retrieving state store."); + + verify(kafkaStreams, times(3)).queryMetadataForKey(NON_EXISTENT_STORE, 12345, + serializer); + } + @Configuration @EnableKafka @EnableKafkaStreams @@ -196,14 +248,14 @@ public ConsumerFactory consumerFactory() { @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, "iqs-testStreams"); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + Map props = KafkaTestUtils.streamsProps( + "iqs-testStreams", this.brokerAddresses); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class.getName()); props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "100"); + props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:8080"); return new KafkaStreamsConfiguration(props); } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsJsonSerializationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsJsonSerializationTests.java index 46112bfed4..3e208f08dd 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsJsonSerializationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsJsonSerializationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,11 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; - -import java.util.HashMap; import java.util.Map; import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -29,7 +28,6 @@ import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.streams.StreamsBuilder; -import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.Printed; @@ -57,12 +55,12 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; +import static org.assertj.core.api.Assertions.assertThat; /** * @author Elliot Kennedy * @author Artem Bilan + * @author Sanghyeok An */ @SpringJUnitConfig @DirtiesContext @@ -204,9 +202,7 @@ public Map producerConfigs() { @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + Map props = KafkaTestUtils.streamsProps("testStreams", this.brokerAddresses); return new KafkaStreamsConfiguration(props); } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsTests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsTests.java index d28d506cfc..11e9a66d20 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/KafkaStreamsTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - import java.time.Duration; import java.util.HashMap; import java.util.Map; @@ -28,6 +25,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import kafka.server.BrokerServer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KafkaStreams; @@ -75,7 +73,8 @@ import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; -import kafka.server.BrokerServer; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; /** * @author Artem Bilan @@ -83,6 +82,7 @@ * @author Gary Russell * @author Elliot Metsger * @author Zach Olauson + * @author Sanghyeok An * * @since 1.1.4 */ @@ -97,7 +97,7 @@ brokerProperties = { "auto.create.topics.enable=${topics.autoCreate:false}", "delete.topic.enable=${topic.delete:true}" }, - brokerPropertiesLocation = "classpath:/${broker.filename:broker}.properties") + brokerPropertiesLocation = "classpath:/${broker.filename:broker}.properties", kraft = true) public class KafkaStreamsTests { static final String STREAMING_TOPIC1 = "streamingTopic1"; @@ -195,9 +195,7 @@ public KafkaTemplate template() { @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + Map props = KafkaTestUtils.streamsProps("testStreams", this.brokerAddresses); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/RecoveringDeserializationExceptionHandlerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/RecoveringDeserializationExceptionHandlerTests.java index bb4098c95f..0a6b1ded99 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/RecoveringDeserializationExceptionHandlerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/RecoveringDeserializationExceptionHandlerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2022 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.streams; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -67,8 +65,12 @@ import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell + * @author Soby Chacko + * @author Sanghyeok An * @since 2.3 * */ @@ -78,7 +80,7 @@ public class RecoveringDeserializationExceptionHandlerTests { @Autowired - private KafkaTemplate kafkaTemplate; + private KafkaOperations kafkaTemplate; @Autowired private CompletableFuture> resultFuture; @@ -91,9 +93,9 @@ void viaStringProperty() { Recoverer.class.getName()); handler.configure(configs); assertThat(KafkaTestUtils.getPropertyValue(handler, "recoverer")).isInstanceOf(Recoverer.class); - assertThat(handler.handle(null, new ConsumerRecord("foo", 0, 0, null, null), + assertThat(handler.handle(null, new ConsumerRecord<>("foo", 0, 0, null, null), new IllegalArgumentException())).isEqualTo(DeserializationHandlerResponse.CONTINUE); - assertThat(handler.handle(null, new ConsumerRecord("foo", 0, 0, null, null), + assertThat(handler.handle(null, new ConsumerRecord<>("foo", 0, 0, null, null), new IllegalStateException())).isEqualTo(DeserializationHandlerResponse.FAIL); } @@ -104,30 +106,30 @@ void viaClassProperty() { configs.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, Recoverer.class); handler.configure(configs); assertThat(KafkaTestUtils.getPropertyValue(handler, "recoverer")).isInstanceOf(Recoverer.class); - assertThat(handler.handle(null, new ConsumerRecord("foo", 0, 0, null, null), + assertThat(handler.handle(null, new ConsumerRecord<>("foo", 0, 0, null, null), new IllegalArgumentException())).isEqualTo(DeserializationHandlerResponse.CONTINUE); - assertThat(handler.handle(null, new ConsumerRecord("foo", 0, 0, null, null), + assertThat(handler.handle(null, new ConsumerRecord<>("foo", 0, 0, null, null), new IllegalStateException())).isEqualTo(DeserializationHandlerResponse.FAIL); } @Test void viaObjectProperty() { RecoveringDeserializationExceptionHandler handler = new RecoveringDeserializationExceptionHandler(); - Map configs = new HashMap(); + Map configs = new HashMap<>(); Recoverer rec = new Recoverer(); configs.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, rec); handler.configure(configs); assertThat(KafkaTestUtils.getPropertyValue(handler, "recoverer")).isSameAs(rec); - assertThat(handler.handle(null, new ConsumerRecord("foo", 0, 0, null, null), + assertThat(handler.handle(null, new ConsumerRecord<>("foo", 0, 0, null, null), new IllegalArgumentException())).isEqualTo(DeserializationHandlerResponse.CONTINUE); - assertThat(handler.handle(null, new ConsumerRecord("foo", 0, 0, null, null), + assertThat(handler.handle(null, new ConsumerRecord<>("foo", 0, 0, null, null), new IllegalStateException())).isEqualTo(DeserializationHandlerResponse.FAIL); } @Test void withNoRecoverer() { RecoveringDeserializationExceptionHandler handler = new RecoveringDeserializationExceptionHandler(); - assertThat(handler.handle(null, new ConsumerRecord("foo", 0, 0, null, null), + assertThat(handler.handle(null, new ConsumerRecord<>("foo", 0, 0, null, null), new IllegalArgumentException())).isEqualTo(DeserializationHandlerResponse.FAIL); } @@ -177,7 +179,7 @@ public Map producerConfigs() { } @Bean - public KafkaOperations template() { + public KafkaTemplate template() { KafkaTemplate kafkaTemplate = new KafkaTemplate<>(producerFactory(), true); kafkaTemplate.setDefaultTopic("recoverer1"); return kafkaTemplate; @@ -185,9 +187,7 @@ public KafkaOperations template() { @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) public KafkaStreamsConfiguration kStreamsConfigs() { - Map props = new HashMap<>(); - props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); - props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + Map props = KafkaTestUtils.streamsProps("testStreams", this.brokerAddresses); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, FailSerde.class); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, diff --git a/spring-kafka/src/test/java/org/springframework/kafka/streams/messaging/MessagingProcessorTests.java b/spring-kafka/src/test/java/org/springframework/kafka/streams/messaging/MessagingProcessorTests.java index 0ef2fffeb4..4f8aad95aa 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/streams/messaging/MessagingProcessorTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/streams/messaging/MessagingProcessorTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2021 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.streams.messaging; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Collections; import java.util.Properties; @@ -41,6 +39,8 @@ import org.springframework.kafka.support.converter.MessagingMessageConverter; import org.springframework.messaging.support.MessageBuilder; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/DefaultKafkaHeaderMapperTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/DefaultKafkaHeaderMapperTests.java index daf1aa6c29..1d799061cc 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/DefaultKafkaHeaderMapperTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/DefaultKafkaHeaderMapperTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.support; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; - import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -32,9 +29,14 @@ import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.header.internals.RecordHeaders; +import org.assertj.core.api.InstanceOfAssertFactories; import org.junit.jupiter.api.Test; +import org.springframework.core.log.LogAccessor; import org.springframework.kafka.support.DefaultKafkaHeaderMapper.NonTrustedHeaderType; +import org.springframework.kafka.support.serializer.DeserializationException; +import org.springframework.kafka.support.serializer.SerializationTestUtils; +import org.springframework.kafka.support.serializer.SerializationUtils; import org.springframework.messaging.Message; import org.springframework.messaging.MessageHeaders; import org.springframework.messaging.support.ExecutorSubscribableChannel; @@ -43,9 +45,17 @@ import org.springframework.util.MimeType; import org.springframework.util.MimeTypeUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Artem Bilan + * @author Soby Chacko * * @since 1.3 * @@ -170,7 +180,8 @@ void testMimeTypeInHeaders() { mapper.toHeaders(recordHeaders, receivedHeaders); Object fooHeader = receivedHeaders.get("foo"); assertThat(fooHeader).isInstanceOf(List.class); - assertThat(fooHeader).asList().containsExactly("application/json", "text/plain"); + assertThat(fooHeader).asInstanceOf(InstanceOfAssertFactories.LIST) + .containsExactly("application/json", "text/plain"); } @Test @@ -321,6 +332,52 @@ void inboundJson() { .containsKey("baz"); } + @Test + void deserializationExceptionHeadersAreMappedAsNonByteArray() { + DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper(); + + byte[] keyDeserExceptionBytes = SerializationTestUtils.header(true); + Header keyHeader = SerializationTestUtils.deserializationHeader(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER, + keyDeserExceptionBytes); + byte[] valueDeserExceptionBytes = SerializationTestUtils.header(false); + Header valueHeader = SerializationTestUtils.deserializationHeader(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, + valueDeserExceptionBytes); + Headers headers = new RecordHeaders( + new Header[] { keyHeader, valueHeader }); + Map springHeaders = new HashMap<>(); + mapper.toHeaders(headers, springHeaders); + assertThat(springHeaders.get(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER)).isEqualTo(keyHeader); + assertThat(springHeaders.get(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER)).isEqualTo(valueHeader); + + LogAccessor logger = new LogAccessor(this.getClass()); + + DeserializationException keyDeserializationException = SerializationUtils.byteArrayToDeserializationException(logger, keyHeader); + assertThat(keyDeserExceptionBytes).containsExactly(SerializationTestUtils.header(keyDeserializationException)); + + DeserializationException valueDeserializationException = + SerializationUtils.byteArrayToDeserializationException(logger, valueHeader); + assertThat(valueDeserExceptionBytes).containsExactly(SerializationTestUtils.header(valueDeserializationException)); + + headers = new RecordHeaders(); + mapper.fromHeaders(new MessageHeaders(springHeaders), headers); + assertThat(headers.lastHeader(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER)).isNull(); + assertThat(headers.lastHeader(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER)).isNull(); + } + + @Test + void ensureNullHeaderValueHandledGraciously() { + DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper(); + + Header mockHeader = mock(Header.class); + given(mockHeader.value()).willReturn(null); + + Object result = mapper.headerValueToAddIn(mockHeader); + + assertThat(result).isNull(); + verify(mockHeader).value(); + verify(mockHeader, never()).key(); + } + public static final class Foo { private String bar = "bar"; diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/ExponentialBackOffWithMaxRetriesTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/ExponentialBackOffWithMaxRetriesTests.java index 8c6949a75b..6af6a203be 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/ExponentialBackOffWithMaxRetriesTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/ExponentialBackOffWithMaxRetriesTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.support; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; - import java.util.ArrayList; import java.util.List; import java.util.stream.IntStream; @@ -27,6 +24,9 @@ import org.springframework.util.backoff.BackOffExecution; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; + /** * @author Gary Russell * @since 2.7.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/KafkaStreamBrancherTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/KafkaStreamBrancherTests.java index 1717d273ce..215eb85764 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/KafkaStreamBrancherTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/KafkaStreamBrancherTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2021 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,18 @@ package org.springframework.kafka.support; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; - import java.util.concurrent.atomic.AtomicInteger; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.Predicate; import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; + /** * @author Ivan Ponomarev * @author Artem Bilan diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/LogIfLevelEnabledTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/LogIfLevelEnabledTests.java index 597f199678..9da1a72a07 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/LogIfLevelEnabledTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/LogIfLevelEnabledTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,18 @@ package org.springframework.kafka.support; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.withSettings; - import java.util.function.Supplier; import org.junit.jupiter.api.Test; import org.springframework.core.log.LogAccessor; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.withSettings; + /** * @author Gary Russell * @since 2.2.1 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/LoggingProducerListenerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/LoggingProducerListenerTests.java index 83220be1d2..20cc10fb9e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/LoggingProducerListenerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/LoggingProducerListenerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,11 +16,6 @@ package org.springframework.kafka.support; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.spy; - import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; @@ -31,6 +26,11 @@ import org.springframework.core.log.LogAccessor; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.Mockito.spy; + /** * @author Gary Russell * @since 2.5.16 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/SimpleKafkaHeaderMapperTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/SimpleKafkaHeaderMapperTests.java index 77caa3d81a..9b19bfda6b 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/SimpleKafkaHeaderMapperTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/SimpleKafkaHeaderMapperTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2022 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,6 @@ package org.springframework.kafka.support; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.assertj.core.api.Assertions.entry; - import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; @@ -32,6 +28,10 @@ import org.springframework.messaging.MessageHeaders; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.assertj.core.api.Assertions.entry; + /** * @author Gary Russell * @since 2.2.5 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/TopicPartitionOffsetTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/TopicPartitionOffsetTests.java index 1ad0d983b5..b8aab7914a 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/TopicPartitionOffsetTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/TopicPartitionOffsetTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.support; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Objects; import org.apache.kafka.common.TopicPartition; @@ -25,6 +23,8 @@ import org.springframework.kafka.support.TopicPartitionOffset.SeekPosition; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.3.13 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/converter/BatchMessageConverterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/converter/BatchMessageConverterTests.java index 1e7921194a..0d3ee939ee 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/converter/BatchMessageConverterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/converter/BatchMessageConverterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the original author or authors. + * Copyright 2017-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.support.converter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -42,6 +39,9 @@ import org.springframework.messaging.Message; import org.springframework.messaging.MessageHeaders; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + /** * @author Biju Kunjummen * @author Artem Bilan diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/converter/MessagingMessageConverterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/converter/MessagingMessageConverterTests.java index 2386881e18..64abf4f1b8 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/converter/MessagingMessageConverterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/converter/MessagingMessageConverterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2023 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ package org.springframework.kafka.support.converter; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.Arrays; import java.util.Collection; import java.util.Map; @@ -40,6 +38,8 @@ import org.springframework.messaging.support.GenericMessage; import org.springframework.util.MimeType; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @author Artem Bilan diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/converter/ProjectingMessageConverterTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/converter/ProjectingMessageConverterTests.java index dd17646d99..fc994c6c75 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/converter/ProjectingMessageConverterTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/converter/ProjectingMessageConverterTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2018-2019 the original author or authors. + * Copyright 2018-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,15 +16,11 @@ package org.springframework.kafka.support.converter; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.Mockito.doReturn; - import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; +import com.jayway.jsonpath.DocumentContext; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.Test; @@ -37,7 +33,10 @@ import org.springframework.kafka.support.KafkaNull; import org.springframework.messaging.support.MessageBuilder; -import com.jayway.jsonpath.DocumentContext; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.Mockito.doReturn; /** * @author Oliver Gierke diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/MicrometerHolderTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/MicrometerHolderTests.java index 4ee64bcf00..73efecdd1e 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/MicrometerHolderTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/MicrometerHolderTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2023 the original author or authors. + * Copyright 2020-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,18 +16,12 @@ package org.springframework.kafka.support.micrometer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; - import java.util.Collections; import java.util.Map; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Timer; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.ObjectProvider; @@ -37,9 +31,14 @@ import org.springframework.context.annotation.Primary; import org.springframework.test.util.ReflectionTestUtils; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Timer; -import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; /** * @author Vasyl Sarzhynskyi diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationIntegrationTests.java index ce3935b38a..4718c75b5f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,20 @@ package org.springframework.kafka.support.micrometer; -import static org.assertj.core.api.Assertions.assertThat; - import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import io.micrometer.common.KeyValues; +import io.micrometer.core.tck.MeterRegistryAssert; +import io.micrometer.observation.ObservationRegistry; +import io.micrometer.tracing.Span.Kind; +import io.micrometer.tracing.exporter.FinishedSpan; +import io.micrometer.tracing.test.SampleTestRunner; +import io.micrometer.tracing.test.simple.SpanAssert; +import io.micrometer.tracing.test.simple.SpansAssert; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -43,14 +49,7 @@ import org.springframework.kafka.test.EmbeddedKafkaZKBroker; import org.springframework.kafka.test.utils.KafkaTestUtils; -import io.micrometer.common.KeyValues; -import io.micrometer.core.tck.MeterRegistryAssert; -import io.micrometer.observation.ObservationRegistry; -import io.micrometer.tracing.Span.Kind; -import io.micrometer.tracing.exporter.FinishedSpan; -import io.micrometer.tracing.test.SampleTestRunner; -import io.micrometer.tracing.test.simple.SpanAssert; -import io.micrometer.tracing.test.simple.SpansAssert; +import static org.assertj.core.api.Assertions.assertThat; /** * @author Artem Bilan @@ -115,7 +114,6 @@ public SampleTestRunnerConsumer yourCode() { }; } - @Configuration @EnableKafka public static class Config { diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationTests.java index d67396aa8b..a020bf0363 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/micrometer/ObservationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the original author or authors. + * Copyright 2022-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,27 +16,48 @@ package org.springframework.kafka.support.micrometer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.awaitility.Awaitility.await; -import static org.mockito.Mockito.mock; - import java.util.Arrays; import java.util.Deque; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import io.micrometer.common.KeyValues; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.observation.DefaultMeterObservationHandler; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import io.micrometer.core.tck.MeterRegistryAssert; +import io.micrometer.observation.Observation; +import io.micrometer.observation.ObservationHandler; +import io.micrometer.observation.ObservationRegistry; +import io.micrometer.observation.tck.TestObservationRegistry; +import io.micrometer.tracing.Span; +import io.micrometer.tracing.TraceContext; +import io.micrometer.tracing.Tracer; +import io.micrometer.tracing.handler.DefaultTracingObservationHandler; +import io.micrometer.tracing.handler.PropagatingReceiverTracingObservationHandler; +import io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler; +import io.micrometer.tracing.propagation.Propagator; +import io.micrometer.tracing.test.simple.SimpleSpan; +import io.micrometer.tracing.test.simple.SimpleTracer; import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerInterceptor; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -54,7 +75,8 @@ import org.springframework.kafka.core.KafkaAdmin; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.listener.AbstractMessageListenerContainer; +import org.springframework.kafka.listener.MessageListenerContainer; +import org.springframework.kafka.listener.RecordInterceptor; import org.springframework.kafka.support.micrometer.KafkaListenerObservation.DefaultKafkaListenerObservationConvention; import org.springframework.kafka.support.micrometer.KafkaTemplateObservation.DefaultKafkaTemplateObservationConvention; import org.springframework.kafka.test.EmbeddedKafkaBroker; @@ -63,36 +85,43 @@ import org.springframework.lang.Nullable; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit.jupiter.SpringJUnitConfig; +import org.springframework.util.StringUtils; -import io.micrometer.common.KeyValues; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.observation.DefaultMeterObservationHandler; -import io.micrometer.core.instrument.simple.SimpleMeterRegistry; -import io.micrometer.core.tck.MeterRegistryAssert; -import io.micrometer.observation.ObservationHandler; -import io.micrometer.observation.ObservationRegistry; -import io.micrometer.observation.tck.TestObservationRegistry; -import io.micrometer.tracing.Span; -import io.micrometer.tracing.TraceContext; -import io.micrometer.tracing.Tracer; -import io.micrometer.tracing.handler.DefaultTracingObservationHandler; -import io.micrometer.tracing.handler.PropagatingReceiverTracingObservationHandler; -import io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler; -import io.micrometer.tracing.propagation.Propagator; -import io.micrometer.tracing.test.simple.SimpleSpan; -import io.micrometer.tracing.test.simple.SimpleTracer; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.awaitility.Awaitility.await; +import static org.mockito.Mockito.mock; /** * @author Gary Russell * @author Artem Bilan + * @author Wang Zhiyang + * @author Christian Mergenthaler + * @author Soby Chacko * * @since 3.0 */ @SpringJUnitConfig -@EmbeddedKafka(topics = { "observation.testT1", "observation.testT2", "ObservationTests.testT3" }) +@EmbeddedKafka(topics = { ObservationTests.OBSERVATION_TEST_1, ObservationTests.OBSERVATION_TEST_2, + ObservationTests.OBSERVATION_TEST_3, ObservationTests.OBSERVATION_RUNTIME_EXCEPTION, + ObservationTests.OBSERVATION_ERROR }, partitions = 1) @DirtiesContext public class ObservationTests { + public final static String OBSERVATION_TEST_1 = "observation.testT1"; + + public final static String OBSERVATION_TEST_2 = "observation.testT2"; + + public final static String OBSERVATION_TEST_3 = "observation.testT3"; + + public final static String OBSERVATION_RUNTIME_EXCEPTION = "observation.runtime-exception"; + + public final static String OBSERVATION_ERROR = "observation.error.sync"; + + public final static String OBSERVATION_ERROR_COMPLETABLE_FUTURE = "observation.error.completableFuture"; + + public final static String OBSERVATION_ERROR_MONO = "observation.error.mono"; + @Test void endToEnd(@Autowired Listener listener, @Autowired KafkaTemplate template, @Autowired SimpleTracer tracer, @Autowired KafkaListenerEndpointRegistry rler, @@ -102,35 +131,72 @@ void endToEnd(@Autowired Listener listener, @Autowired KafkaTemplate spanFromCallback = new AtomicReference<>(); + + template.setProducerInterceptor(new ProducerInterceptor<>() { + @Override + public ProducerRecord onSend(ProducerRecord record) { + tracer.currentSpanCustomizer().tag("key", "value"); + return record; + } + + @Override + public void onAcknowledgement(RecordMetadata metadata, Exception exception) { + + } + + @Override + public void close() { + + } + + @Override + public void configure(Map configs) { + + } + }); + + MessageListenerContainer listenerContainer1 = rler.getListenerContainer("obs1"); + listenerContainer1.stop(); + + template.send(OBSERVATION_TEST_1, "test") + .thenAccept((sendResult) -> spanFromCallback.set(tracer.currentSpan())) + .get(10, TimeUnit.SECONDS); + + Deque spans = tracer.getSpans(); + assertThat(spans).hasSize(1); + + SimpleSpan templateSpan = spans.peek(); + assertThat(templateSpan).isNotNull(); + assertThat(templateSpan.getTags()).containsAllEntriesOf(Map.of( + "key", "value")); + + assertThat(spanFromCallback.get()).isNotNull(); + listenerContainer1.start(); + MessageListenerContainer listenerContainer2 = rler.getListenerContainer("obs2"); + assertThat(listenerContainer1).isNotNull(); + assertThat(listenerContainer2).isNotNull(); + // consumer factory broker different to admin + assertThatContainerAdmin(listenerContainer1, admin, + broker.getBrokersAsString() + "," + broker.getBrokersAsString() + "," + + broker.getBrokersAsString()); + // broker override in annotation + assertThatContainerAdmin(listenerContainer2, admin, broker.getBrokersAsString()); + assertThat(listener.latch1.await(10, TimeUnit.SECONDS)).isTrue(); + listenerContainer1.stop(); + listenerContainer2.stop(); + assertThat(listener.record).isNotNull(); Headers headers = listener.record.headers(); - assertThat(headers.lastHeader("foo")).extracting(hdr -> hdr.value()).isEqualTo("some foo value".getBytes()); - assertThat(headers.lastHeader("bar")).extracting(hdr -> hdr.value()).isEqualTo("some bar value".getBytes()); - Deque spans = tracer.getSpans(); + assertThat(headers.lastHeader("foo")).extracting(Header::value).isEqualTo("some foo value".getBytes()); + assertThat(headers.lastHeader("bar")).extracting(Header::value).isEqualTo("some bar value".getBytes()); + spans = tracer.getSpans(); assertThat(spans).hasSize(4); - SimpleSpan span = spans.poll(); - assertThat(span.getTags()).containsEntry("spring.kafka.template.name", "template"); - assertThat(span.getName()).isEqualTo("observation.testT1 send"); - assertThat(span.getRemoteServiceName()).startsWith("Apache Kafka: "); - await().until(() -> spans.peekFirst().getTags().size() == 3); - span = spans.poll(); - assertThat(span.getTags()) - .containsAllEntriesOf( - Map.of("spring.kafka.listener.id", "obs1-0", "foo", "some foo value", "bar", "some bar value")); - assertThat(span.getName()).isEqualTo("observation.testT1 receive"); - assertThat(span.getRemoteServiceName()).startsWith("Apache Kafka: "); - await().until(() -> spans.peekFirst().getTags().size() == 1); - span = spans.poll(); - assertThat(span.getTags()).containsEntry("spring.kafka.template.name", "template"); - assertThat(span.getName()).isEqualTo("observation.testT2 send"); - await().until(() -> spans.peekFirst().getTags().size() == 3); - span = spans.poll(); - assertThat(span.getTags()) - .containsAllEntriesOf( - Map.of("spring.kafka.listener.id", "obs2-0", "foo", "some foo value", "bar", "some bar value")); - assertThat(span.getName()).isEqualTo("observation.testT2 receive"); + assertThatTemplateSpanTags(spans, 6, OBSERVATION_TEST_1); + assertThatListenerSpanTags(spans, 12, OBSERVATION_TEST_1, "obs1-0", "obs1", "0", "0"); + assertThatTemplateSpanTags(spans, 6, OBSERVATION_TEST_2); + assertThatListenerSpanTags(spans, 12, OBSERVATION_TEST_2, "obs2-0", "obs2", "0", "0"); template.setObservationConvention(new DefaultKafkaTemplateObservationConvention() { @Override @@ -139,7 +205,9 @@ public KeyValues getLowCardinalityKeyValues(KafkaRecordSenderContext context) { } }); - rler.getListenerContainer("obs1").getContainerProperties().setObservationConvention( + template.send(OBSERVATION_TEST_1, "test").get(10, TimeUnit.SECONDS); + + listenerContainer1.getContainerProperties().setObservationConvention( new DefaultKafkaListenerObservationConvention() { @Override @@ -148,77 +216,44 @@ public KeyValues getLowCardinalityKeyValues(KafkaRecordReceiverContext context) } }); - rler.getListenerContainer("obs1").stop(); - rler.getListenerContainer("obs1").start(); - template.send("observation.testT1", "test").get(10, TimeUnit.SECONDS); + + listenerContainer2.start(); + listenerContainer1.start(); assertThat(listener.latch2.await(10, TimeUnit.SECONDS)).isTrue(); + listenerContainer1.stop(); + listenerContainer2.stop(); + assertThat(listener.record).isNotNull(); headers = listener.record.headers(); - assertThat(headers.lastHeader("foo")).extracting(hdr -> hdr.value()).isEqualTo("some foo value".getBytes()); - assertThat(headers.lastHeader("bar")).extracting(hdr -> hdr.value()).isEqualTo("some bar value".getBytes()); + assertThat(headers.lastHeader("foo")).extracting(Header::value).isEqualTo("some foo value".getBytes()); + assertThat(headers.lastHeader("bar")).extracting(Header::value).isEqualTo("some bar value".getBytes()); assertThat(spans).hasSize(4); - span = spans.poll(); - assertThat(span.getTags()).containsEntry("spring.kafka.template.name", "template"); - assertThat(span.getTags()).containsEntry("foo", "bar"); - assertThat(span.getName()).isEqualTo("observation.testT1 send"); - await().until(() -> spans.peekFirst().getTags().size() == 4); - span = spans.poll(); - assertThat(span.getTags()) - .containsAllEntriesOf(Map.of("spring.kafka.listener.id", "obs1-0", "foo", "some foo value", "bar", - "some bar value", "baz", "qux")); - assertThat(span.getName()).isEqualTo("observation.testT1 receive"); - await().until(() -> spans.peekFirst().getTags().size() == 2); - span = spans.poll(); - assertThat(span.getTags()).containsEntry("spring.kafka.template.name", "template"); - assertThat(span.getTags()).containsEntry("foo", "bar"); - assertThat(span.getName()).isEqualTo("observation.testT2 send"); - await().until(() -> spans.peekFirst().getTags().size() == 3); - span = spans.poll(); - assertThat(span.getTags()) - .containsAllEntriesOf( - Map.of("spring.kafka.listener.id", "obs2-0", "foo", "some foo value", "bar", "some bar value")); + assertThatTemplateSpanTags(spans, 7, OBSERVATION_TEST_1, Map.entry("foo", "bar")); + assertThatListenerSpanTags(spans, 13, OBSERVATION_TEST_1, "obs1-0", "obs1", "1", "0", Map.entry("baz", "qux")); + assertThatTemplateSpanTags(spans, 7, OBSERVATION_TEST_2, Map.entry("foo", "bar")); + SimpleSpan span = assertThatListenerSpanTags(spans, 12, OBSERVATION_TEST_2, "obs2-0", "obs2", "1", "0"); assertThat(span.getTags()).doesNotContainEntry("baz", "qux"); - assertThat(span.getName()).isEqualTo("observation.testT2 receive"); - MeterRegistryAssert.assertThat(meterRegistry) - .hasTimerWithNameAndTags("spring.kafka.template", - KeyValues.of("spring.kafka.template.name", "template")) - .hasTimerWithNameAndTags("spring.kafka.template", - KeyValues.of("spring.kafka.template.name", "template", "foo", "bar")) - .hasTimerWithNameAndTags("spring.kafka.listener", KeyValues.of("spring.kafka.listener.id", "obs1-0")) - .hasTimerWithNameAndTags("spring.kafka.listener", - KeyValues.of("spring.kafka.listener.id", "obs1-0", "baz", "qux")) - .hasTimerWithNameAndTags("spring.kafka.listener", KeyValues.of("spring.kafka.listener.id", "obs2-0")); + MeterRegistryAssert meterRegistryAssert = MeterRegistryAssert.assertThat(meterRegistry); + assertThatTemplateHasTimerWithNameAndTags(meterRegistryAssert, OBSERVATION_TEST_1); + assertThatListenerHasTimerWithNameAndTags(meterRegistryAssert, OBSERVATION_TEST_1, "obs1", "obs1-0"); + assertThatTemplateHasTimerWithNameAndTags(meterRegistryAssert, OBSERVATION_TEST_2, "foo", "bar"); + assertThatListenerHasTimerWithNameAndTags(meterRegistryAssert, OBSERVATION_TEST_1, "obs1", "obs1-0", + "baz", "qux"); + assertThatListenerHasTimerWithNameAndTags(meterRegistryAssert, OBSERVATION_TEST_2, "obs2", "obs2-0"); + assertThat(admin.getConfigurationProperties()) - .containsEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, broker.getBrokersAsString()); + .containsEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, List.of(broker.getBrokersAsString())); // producer factory broker different to admin - KafkaAdmin pAdmin = KafkaTestUtils.getPropertyValue(template, "kafkaAdmin", KafkaAdmin.class); - assertThat(pAdmin.getOperationTimeout()).isEqualTo(admin.getOperationTimeout()); - assertThat(pAdmin.getConfigurationProperties()) - .containsEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, - broker.getBrokersAsString() + "," + broker.getBrokersAsString()); + assertThatAdmin(template, admin, broker.getBrokersAsString() + "," + broker.getBrokersAsString(), + "kafkaAdmin"); // custom admin assertThat(customTemplate.getKafkaAdmin()).isSameAs(config.mockAdmin); - // consumer factory broker different to admin - Object container = KafkaTestUtils - .getPropertyValue(endpointRegistry.getListenerContainer("obs1"), "containers", List.class).get(0); - KafkaAdmin cAdmin = KafkaTestUtils.getPropertyValue(container, "listenerConsumer.kafkaAdmin", KafkaAdmin.class); - assertThat(cAdmin.getOperationTimeout()).isEqualTo(admin.getOperationTimeout()); - assertThat(cAdmin.getConfigurationProperties()) - .containsEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, - broker.getBrokersAsString() + "," + broker.getBrokersAsString() + "," - + broker.getBrokersAsString()); - // broker override in annotation - container = KafkaTestUtils - .getPropertyValue(endpointRegistry.getListenerContainer("obs2"), "containers", List.class).get(0); - cAdmin = KafkaTestUtils.getPropertyValue(container, "listenerConsumer.kafkaAdmin", KafkaAdmin.class); - assertThat(cAdmin.getOperationTimeout()).isEqualTo(admin.getOperationTimeout()); - assertThat(cAdmin.getConfigurationProperties()) - .containsEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, broker.getBrokersAsString()); // custom admin - container = KafkaTestUtils - .getPropertyValue(endpointRegistry.getListenerContainer("obs3"), "containers", List.class).get(0); - cAdmin = KafkaTestUtils.getPropertyValue(container, "listenerConsumer.kafkaAdmin", KafkaAdmin.class); + Object container = KafkaTestUtils.getPropertyValue( + endpointRegistry.getListenerContainer("obs3"), "containers", List.class).get(0); + KafkaAdmin cAdmin = KafkaTestUtils.getPropertyValue( + container, "listenerConsumer.kafkaAdmin", KafkaAdmin.class); assertThat(cAdmin).isSameAs(config.mockAdmin); assertThatExceptionOfType(KafkaException.class) @@ -230,21 +265,205 @@ public KeyValues getLowCardinalityKeyValues(KafkaRecordReceiverContext context) .doesNotHaveMeterWithNameAndTags("spring.kafka.template", KeyValues.of("error", "KafkaException")); } + @SafeVarargs + @SuppressWarnings("varargs") + private void assertThatTemplateSpanTags(Deque spans, int tagSize, String destName, + Map.Entry... keyValues) { + + SimpleSpan span = spans.poll(); + assertThat(span).isNotNull(); + await().until(() -> span.getTags().size() == tagSize); + assertThat(span.getTags()).containsAllEntriesOf(Map.of( + "spring.kafka.template.name", "template", + "messaging.operation", "publish", + "messaging.system", "kafka", + "messaging.destination.kind", "topic", + "messaging.destination.name", destName)); + if (keyValues != null && keyValues.length > 0) { + Arrays.stream(keyValues).forEach(entry -> assertThat(span.getTags()).contains(entry)); + } + assertThat(span.getName()).isEqualTo(destName + " send"); + assertThat(span.getRemoteServiceName()).startsWith("Apache Kafka: "); + } + + @SafeVarargs + @SuppressWarnings("varargs") + private SimpleSpan assertThatListenerSpanTags(Deque spans, int tagSize, String sourceName, + String listenerId, String consumerGroup, String offset, String partition, + Map.Entry... keyValues) { + + SimpleSpan span = spans.poll(); + assertThat(span).isNotNull(); + await().until(() -> span.getTags().size() == tagSize); + String clientId = span.getTags().get("messaging.kafka.client_id"); + assertThat(span.getTags()) + .containsAllEntriesOf( + Map.ofEntries(Map.entry("spring.kafka.listener.id", listenerId), + Map.entry("foo", "some foo value"), + Map.entry("bar", "some bar value"), + Map.entry("messaging.consumer.id", consumerGroup + " - " + clientId), + Map.entry("messaging.kafka.consumer.group", consumerGroup), + Map.entry("messaging.kafka.message.offset", offset), + Map.entry("messaging.kafka.source.partition", partition), + Map.entry("messaging.operation", "receive"), + Map.entry("messaging.source.kind", "topic"), + Map.entry("messaging.source.name", sourceName), + Map.entry("messaging.system", "kafka"))); + if (keyValues != null && keyValues.length > 0) { + Arrays.stream(keyValues).forEach(entry -> assertThat(span.getTags()).contains(entry)); + } + assertThat(span.getName()).isEqualTo(sourceName + " receive"); + return span; + } + + private void assertThatTemplateHasTimerWithNameAndTags(MeterRegistryAssert meterRegistryAssert, String destName, + String... keyValues) { + + meterRegistryAssert.hasTimerWithNameAndTags("spring.kafka.template", + KeyValues.of("spring.kafka.template.name", "template", + "messaging.operation", "publish", + "messaging.system", "kafka", + "messaging.destination.kind", "topic", + "messaging.destination.name", destName) + .and(keyValues)); + } + + private void assertThatListenerHasTimerWithNameAndTags(MeterRegistryAssert meterRegistryAssert, String destName, + String consumerGroup, String listenerId, String... keyValues) { + + meterRegistryAssert.hasTimerWithNameAndTags("spring.kafka.listener", + KeyValues.of( + "messaging.kafka.consumer.group", consumerGroup, + "messaging.operation", "receive", + "messaging.source.kind", "topic", + "messaging.source.name", destName, + "messaging.system", "kafka", + "spring.kafka.listener.id", listenerId) + .and(keyValues)); + } + + private void assertThatContainerAdmin(MessageListenerContainer listenerContainer, KafkaAdmin admin, + String brokersString) { + + Object container = KafkaTestUtils.getPropertyValue(listenerContainer, "containers", List.class).get(0); + assertThatAdmin(container, admin, brokersString, "listenerConsumer.kafkaAdmin"); + } + + private void assertThatAdmin(Object object, KafkaAdmin admin, String brokersString, String key) { + KafkaAdmin cAdmin = KafkaTestUtils.getPropertyValue(object, key, KafkaAdmin.class); + assertThat(cAdmin.getOperationTimeout()).isEqualTo(admin.getOperationTimeout()); + assertThat(cAdmin.getConfigurationProperties()) + .containsEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString); + } + + @Test + void observationRuntimeException(@Autowired ExceptionListener listener, @Autowired SimpleTracer tracer, + @Autowired @Qualifier("throwableTemplate") KafkaTemplate runtimeExceptionTemplate, + @Autowired KafkaListenerEndpointRegistry endpointRegistry, @Autowired Config config) + throws ExecutionException, InterruptedException, TimeoutException { + + runtimeExceptionTemplate.send(OBSERVATION_RUNTIME_EXCEPTION, "testRuntimeException").get(10, TimeUnit.SECONDS); + assertThat(listener.latch4.await(10, TimeUnit.SECONDS)).isTrue(); + endpointRegistry.getListenerContainer("obs4").stop(); + + Deque spans = tracer.getSpans(); + assertThat(spans).hasSize(2); + SimpleSpan span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.template.name")).isEqualTo("throwableTemplate"); + span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.listener.id")).isEqualTo("obs4-0"); + assertThat(span.getError().getCause()) + .isInstanceOf(IllegalStateException.class) + .hasMessage("obs4 run time exception"); + + assertThat(config.scopeInFailureReference.get()).isNotNull(); + } + + @Test + void observationErrorException(@Autowired ExceptionListener listener, @Autowired SimpleTracer tracer, + @Autowired @Qualifier("throwableTemplate") KafkaTemplate errorTemplate, + @Autowired KafkaListenerEndpointRegistry endpointRegistry) + throws ExecutionException, InterruptedException, TimeoutException { + + errorTemplate.send(OBSERVATION_ERROR, "testError").get(10, TimeUnit.SECONDS); + assertThat(listener.latch5.await(10, TimeUnit.SECONDS)).isTrue(); + endpointRegistry.getListenerContainer("obs5").stop(); + + Deque spans = tracer.getSpans(); + assertThat(spans).hasSize(2); + SimpleSpan span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.template.name")).isEqualTo("throwableTemplate"); + span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.listener.id")).isEqualTo("obs5-0"); + assertThat(span.getError()) + .isInstanceOf(Error.class) + .hasMessage("obs5 error"); + } + + @Test + void observationErrorExceptionWhenCompletableFutureReturned(@Autowired ExceptionListener listener, @Autowired SimpleTracer tracer, + @Autowired @Qualifier("throwableTemplate") KafkaTemplate errorTemplate, + @Autowired KafkaListenerEndpointRegistry endpointRegistry) + throws ExecutionException, InterruptedException, TimeoutException { + + errorTemplate.send(OBSERVATION_ERROR_COMPLETABLE_FUTURE, "testError").get(10, TimeUnit.SECONDS); + Deque spans = tracer.getSpans(); + await().untilAsserted(() -> assertThat(spans).hasSize(2)); + SimpleSpan span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.template.name")).isEqualTo("throwableTemplate"); + span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.listener.id")).isEqualTo("obs6-0"); + assertThat(span.getError()) + .isInstanceOf(Error.class) + .hasMessage("Should report metric."); + } + + @Test + void observationErrorExceptionWhenMonoReturned(@Autowired ExceptionListener listener, @Autowired SimpleTracer tracer, + @Autowired @Qualifier("throwableTemplate") KafkaTemplate errorTemplate, + @Autowired KafkaListenerEndpointRegistry endpointRegistry) + throws ExecutionException, InterruptedException, TimeoutException { + + errorTemplate.send(OBSERVATION_ERROR_MONO, "testError").get(10, TimeUnit.SECONDS); + Deque spans = tracer.getSpans(); + await().untilAsserted(() -> assertThat(spans).hasSize(2)); + SimpleSpan span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.template.name")).isEqualTo("throwableTemplate"); + span = spans.poll(); + assertThat(span.getTags().get("spring.kafka.listener.id")).isEqualTo("obs7-0"); + assertThat(span.getError()) + .isInstanceOf(Error.class) + .hasMessage("Should report metric."); + } + + @Test + void kafkaAdminNotRecreatedIfBootstrapServersSameInProducerAndAdminConfig( + @Autowired @Qualifier("reuseAdminBeanKafkaTemplate") KafkaTemplate template, + @Autowired KafkaAdmin kafkaAdmin) { + // See this issue for more details: https://github.com/spring-projects/spring-kafka/issues/3466 + assertThat(template.getKafkaAdmin()).isSameAs(kafkaAdmin); + } + @Configuration @EnableKafka public static class Config { KafkaAdmin mockAdmin = mock(KafkaAdmin.class); + AtomicReference scopeInFailureReference = new AtomicReference<>(); + @Bean KafkaAdmin admin(EmbeddedKafkaBroker broker) { + String[] brokers = StringUtils.commaDelimitedListToStringArray(broker.getBrokersAsString()); + List brokersAsList = Arrays.asList(brokers); KafkaAdmin admin = new KafkaAdmin( - Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, broker.getBrokersAsString())); + Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokersAsList)); admin.setOperationTimeout(42); return admin; } @Bean + @Primary ProducerFactory producerFactory(EmbeddedKafkaBroker broker) { Map producerProps = KafkaTestUtils.producerProps(broker); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, broker.getBrokersAsString() + "," @@ -252,6 +471,13 @@ ProducerFactory producerFactory(EmbeddedKafkaBroker broker) { return new DefaultKafkaProducerFactory<>(producerProps); } + @Bean + ProducerFactory customProducerFactory(EmbeddedKafkaBroker broker) { + Map producerProps = KafkaTestUtils.producerProps(broker); + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, broker.getBrokersAsString()); + return new DefaultKafkaProducerFactory<>(producerProps); + } + @Bean ConsumerFactory consumerFactory(EmbeddedKafkaBroker broker) { Map consumerProps = KafkaTestUtils.consumerProps("obs", "false", broker); @@ -276,9 +502,24 @@ KafkaTemplate customTemplate(ProducerFactory p return template; } + @Bean + KafkaTemplate throwableTemplate(ProducerFactory pf) { + KafkaTemplate template = new KafkaTemplate<>(pf); + template.setObservationEnabled(true); + return template; + } + + @Bean + KafkaTemplate reuseAdminBeanKafkaTemplate( + @Qualifier("customProducerFactory") ProducerFactory pf) { + KafkaTemplate template = new KafkaTemplate<>(pf); + template.setObservationEnabled(true); + return template; + } + @Bean ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory( - ConsumerFactory cf) { + ConsumerFactory cf, ObservationRegistry observationRegistry) { ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); @@ -286,7 +527,25 @@ ConcurrentKafkaListenerContainerFactory kafkaListenerContainerF factory.getContainerProperties().setObservationEnabled(true); factory.setContainerCustomizer(container -> { if (container.getListenerId().equals("obs3")) { - ((AbstractMessageListenerContainer) container).setKafkaAdmin(this.mockAdmin); + container.setKafkaAdmin(this.mockAdmin); + } + if (container.getListenerId().equals("obs4")) { + container.setRecordInterceptor(new RecordInterceptor<>() { + + @Override + public ConsumerRecord intercept(ConsumerRecord record, + Consumer consumer) { + + return record; + } + + @Override + public void failure(ConsumerRecord record, Exception exception, + Consumer consumer) { + + Config.this.scopeInFailureReference.set(observationRegistry.getCurrentObservationScope()); + } + }); } }); return factory; @@ -306,14 +565,14 @@ MeterRegistry meterRegistry() { ObservationRegistry observationRegistry(Tracer tracer, Propagator propagator, MeterRegistry meterRegistry) { TestObservationRegistry observationRegistry = TestObservationRegistry.create(); observationRegistry.observationConfig().observationHandler( - // Composite will pick the first matching handler - new ObservationHandler.FirstMatchingCompositeObservationHandler( - // This is responsible for creating a child span on the sender side - new PropagatingSenderTracingObservationHandler<>(tracer, propagator), - // This is responsible for creating a span on the receiver side - new PropagatingReceiverTracingObservationHandler<>(tracer, propagator), - // This is responsible for creating a default span - new DefaultTracingObservationHandler(tracer))) + // Composite will pick the first matching handler + new ObservationHandler.FirstMatchingCompositeObservationHandler( + // This is responsible for creating a child span on the sender side + new PropagatingSenderTracingObservationHandler<>(tracer, propagator), + // This is responsible for creating a span on the receiver side + new PropagatingReceiverTracingObservationHandler<>(tracer, propagator), + // This is responsible for creating a default span + new DefaultTracingObservationHandler(tracer))) .observationHandler(new DefaultMeterObservationHandler(meterRegistry)); return observationRegistry; } @@ -352,6 +611,11 @@ Listener listener(KafkaTemplate template) { return new Listener(template); } + @Bean + ExceptionListener exceptionListener() { + return new ExceptionListener(); + } + } public static class Listener { @@ -368,12 +632,12 @@ public Listener(KafkaTemplate template) { this.template = template; } - @KafkaListener(id = "obs1", topics = "observation.testT1") + @KafkaListener(id = "obs1", topics = OBSERVATION_TEST_1) void listen1(ConsumerRecord in) { - this.template.send("observation.testT2", in.value()); + this.template.send(OBSERVATION_TEST_2, in.value()); } - @KafkaListener(id = "obs2", topics = "observation.testT2", + @KafkaListener(id = "obs2", topics = OBSERVATION_TEST_2, properties = ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + ":" + "#{@embeddedKafka.brokersAsString}") void listen2(ConsumerRecord in) { this.record = in; @@ -381,10 +645,50 @@ void listen2(ConsumerRecord in) { this.latch2.countDown(); } - @KafkaListener(id = "obs3", topics = "observation.testT3") + @KafkaListener(id = "obs3", topics = OBSERVATION_TEST_3) void listen3(ConsumerRecord in) { } } + public static class ExceptionListener { + + final CountDownLatch latch4 = new CountDownLatch(1); + + final CountDownLatch latch5 = new CountDownLatch(1); + + @KafkaListener(id = "obs4", topics = OBSERVATION_RUNTIME_EXCEPTION) + void listenRuntimeException(ConsumerRecord in) { + try { + throw new IllegalStateException("obs4 run time exception"); + } + finally { + this.latch4.countDown(); + } + } + + @KafkaListener(id = "obs5", topics = OBSERVATION_ERROR) + void listenError(ConsumerRecord in) { + try { + throw new Error("obs5 error"); + } + finally { + this.latch5.countDown(); + } + } + + @KafkaListener(id = "obs6", topics = OBSERVATION_ERROR_COMPLETABLE_FUTURE) + CompletableFuture receive(ConsumerRecord record) { + return CompletableFuture.supplyAsync(() -> { + throw new Error("Should report metric."); + }); + } + + @KafkaListener(id = "obs7", topics = OBSERVATION_ERROR_MONO) + Mono receive1(ConsumerRecord record) { + return Mono.error(new Error("Should report metric.")); + } + + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerializationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerializationTests.java index f280d8e1ab..052bb63d1f 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerializationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingByTopicSerializationTests.java @@ -16,8 +16,6 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; - import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; @@ -32,6 +30,8 @@ import org.apache.kafka.common.serialization.StringSerializer; import org.junit.jupiter.api.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @author Wang Zhiyang diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingSerializationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingSerializationTests.java index 9cd6a60ea2..ee257fffb4 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingSerializationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/DelegatingSerializationTests.java @@ -16,12 +16,6 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.nio.ByteBuffer; import java.util.Collections; import java.util.HashMap; @@ -45,6 +39,12 @@ import org.springframework.kafka.support.DefaultKafkaHeaderMapper; import org.springframework.messaging.MessageHeaders; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Artem Bilan diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerdeTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerdeTests.java index de6012babb..41806c4fbb 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerdeTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerdeTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2021 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,13 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; - import org.junit.jupiter.api.Test; import org.springframework.kafka.support.mapping.AbstractJavaTypeMapper; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell * @since 2.3 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerializationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerializationTests.java index ff04ce17af..8510437e27 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerializationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/JsonSerializationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2016-2021 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,11 +16,6 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.assertj.core.api.Assertions.assertThatIllegalStateException; - import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -30,6 +25,15 @@ import java.util.Objects; import java.util.Set; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.type.TypeFactory; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; @@ -45,15 +49,10 @@ import org.springframework.kafka.support.serializer.testentities.DummyEntity; import org.springframework.kafka.test.utils.KafkaTestUtils; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonSubTypes; -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.type.TypeFactory; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.assertThatIllegalStateException; /** * @author Igor Stepanov @@ -516,7 +515,7 @@ public static class Child extends Parent { Child(int number) { super(number); } - } + } } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/RetryingDeserializerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/RetryingDeserializerTests.java index fb1d3846d1..09e5a92c1a 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/RetryingDeserializerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/RetryingDeserializerTests.java @@ -16,11 +16,6 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Map; @@ -35,6 +30,11 @@ import org.springframework.retry.RetryContext; import org.springframework.retry.support.RetryTemplate; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * @author Gary Russell * @author Wang Zhiyang diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationIntegrationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationIntegrationTests.java index 5b71aed27c..835bfa38f6 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationIntegrationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationIntegrationTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 the original author or authors. + * Copyright 2021-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,6 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - import java.util.Map; import java.util.regex.Pattern; @@ -35,6 +32,9 @@ import org.springframework.kafka.test.context.EmbeddedKafka; import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + /** * @author Gary Russell * @since 2.8.1 @@ -63,7 +63,8 @@ void configurePreLoadedDelegates() { props.setMessageListener(mock(MessageListener.class)); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cFact, props); container.start(); - assertThat(KafkaTestUtils.getPropertyValue(container, "listenerConsumer.consumer.valueDeserializer")) + assertThat(KafkaTestUtils.getPropertyValue(container, + "listenerConsumer.consumer.delegate.deserializers.valueDeserializer")) .isSameAs(delegating); Map delegates = KafkaTestUtils.getPropertyValue(delegating, "delegates", Map.class); assertThat(delegates).hasSize(1); diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationTestUtils.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationTestUtils.java index 4837c6575f..2f7194aa51 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationTestUtils.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationTestUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,16 @@ package org.springframework.kafka.support.serializer; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.UncheckedIOException; + import org.apache.kafka.common.header.Header; /** * @author Gary Russell + * @author Soby Chacko * @since 2.9.11 * */ @@ -32,4 +38,25 @@ public static Header deserializationHeader(String key, byte[] value) { return new DeserializationExceptionHeader(key, value); } + public static byte[] header(boolean isKey) { + return header(createDeserEx(isKey)); + } + + public static DeserializationException createDeserEx(boolean isKey) { + return new DeserializationException( + isKey ? "testK" : "testV", + isKey ? "key".getBytes() : "value".getBytes(), isKey, null); + } + + public static byte[] header(DeserializationException deserEx) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + new ObjectOutputStream(baos).writeObject(deserEx); + } + catch (IOException e) { + throw new UncheckedIOException(e); + } + return baos.toByteArray(); + } + } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationUtilsTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationUtilsTests.java index ead2dbc401..92654833f5 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationUtilsTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/SerializationUtilsTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2023 the original author or authors. + * Copyright 2023-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +16,6 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.BDDMockito.willReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; - import java.util.List; import java.util.function.Supplier; @@ -35,6 +28,13 @@ import org.springframework.core.log.LogAccessor; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willAnswer; +import static org.mockito.BDDMockito.willReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; + /** * @author Gary Russell * @since 2.9.11 diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/StringOrBytesSerializerTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/StringOrBytesSerializerTests.java index 4ba81b738d..2a8a5b28ef 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/StringOrBytesSerializerTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/StringOrBytesSerializerTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2021 the original author or authors. + * Copyright 2019-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,7 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; - +import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.Map; @@ -26,8 +25,11 @@ import org.springframework.kafka.test.utils.KafkaTestUtils; +import static org.assertj.core.api.Assertions.assertThat; + /** * @author Gary Russell + * @author Soby Chacko * @since 2.3 * */ @@ -45,10 +47,10 @@ void test() { Bytes bytes = Bytes.wrap("baz".getBytes()); out = serializer.serialize("x", bytes); assertThat(out).isEqualTo("baz".getBytes()); - assertThat(KafkaTestUtils.getPropertyValue(serializer, "stringSerializer.encoding")).isEqualTo("UTF-8"); + assertThat(KafkaTestUtils.getPropertyValue(serializer, "stringSerializer.encoding")).isEqualTo(StandardCharsets.UTF_8); Map configs = Collections.singletonMap("serializer.encoding", "UTF-16"); serializer.configure(configs, false); - assertThat(KafkaTestUtils.getPropertyValue(serializer, "stringSerializer.encoding")).isEqualTo("UTF-16"); + assertThat(KafkaTestUtils.getPropertyValue(serializer, "stringSerializer.encoding")).isEqualTo(StandardCharsets.UTF_16); } } diff --git a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/ToStringSerializationTests.java b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/ToStringSerializationTests.java index f19a9150fb..3e86d310e3 100644 --- a/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/ToStringSerializationTests.java +++ b/spring-kafka/src/test/java/org/springframework/kafka/support/serializer/ToStringSerializationTests.java @@ -16,8 +16,6 @@ package org.springframework.kafka.support.serializer; -import static org.assertj.core.api.Assertions.assertThat; - import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Collections; @@ -32,6 +30,8 @@ import org.springframework.kafka.support.serializer.testentities.DummyEntity; import org.springframework.lang.Nullable; +import static org.assertj.core.api.Assertions.assertThat; + /** * * @author Alexei Klenin diff --git a/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinCoroutinesTests.kt b/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinCoroutinesTests.kt index aaf1f079d3..ec60d2bd02 100644 --- a/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinCoroutinesTests.kt +++ b/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinCoroutinesTests.kt @@ -29,18 +29,13 @@ import org.springframework.kafka.annotation.EnableKafka import org.springframework.kafka.annotation.KafkaHandler import org.springframework.kafka.annotation.KafkaListener import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory -import org.springframework.kafka.core.ConsumerFactory -import org.springframework.kafka.core.DefaultKafkaConsumerFactory -import org.springframework.kafka.core.DefaultKafkaProducerFactory -import org.springframework.kafka.core.KafkaTemplate -import org.springframework.kafka.core.ProducerFactory +import org.springframework.kafka.core.* import org.springframework.kafka.listener.KafkaListenerErrorHandler import org.springframework.kafka.test.EmbeddedKafkaBroker import org.springframework.kafka.test.context.EmbeddedKafka import org.springframework.messaging.handler.annotation.SendTo import org.springframework.test.annotation.DirtiesContext import org.springframework.test.context.junit.jupiter.SpringJUnitConfig -import java.lang.Exception import java.time.Duration import java.util.concurrent.CountDownLatch import java.util.concurrent.TimeUnit diff --git a/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinTests.kt b/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinTests.kt index 9a112cb801..a5e8d0fbab 100644 --- a/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinTests.kt +++ b/spring-kafka/src/test/kotlin/org/springframework/kafka/listener/EnableKafkaKotlinTests.kt @@ -1,5 +1,5 @@ /* - * Copyright 2016-2023 the original author or authors. + * Copyright 2016-2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -30,23 +30,20 @@ import org.springframework.context.annotation.Configuration import org.springframework.kafka.annotation.EnableKafka import org.springframework.kafka.annotation.KafkaListener import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory -import org.springframework.kafka.core.ConsumerFactory -import org.springframework.kafka.core.DefaultKafkaConsumerFactory -import org.springframework.kafka.core.DefaultKafkaProducerFactory -import org.springframework.kafka.core.KafkaTemplate -import org.springframework.kafka.core.ProducerFactory +import org.springframework.kafka.core.* import org.springframework.kafka.listener.* +import org.springframework.kafka.support.converter.JsonMessageConverter import org.springframework.kafka.test.EmbeddedKafkaBroker import org.springframework.kafka.test.context.EmbeddedKafka import org.springframework.test.annotation.DirtiesContext import org.springframework.test.context.junit.jupiter.SpringJUnitConfig -import java.lang.Exception import java.util.concurrent.CountDownLatch import java.util.concurrent.TimeUnit /** * @author Gary Russell + * @author Huijin Hong * @since 2.2 */ @@ -63,7 +60,7 @@ class EnableKafkaKotlinTests { @Test fun `test listener`() { - this.template.send("kotlinTestTopic1", "foo") + this.template.send("kotlinTestTopic1", "{\"data\":\"foo\"}") assertThat(this.config.latch1.await(10, TimeUnit.SECONDS)).isTrue() assertThat(this.config.received).isEqualTo("foo") } @@ -173,6 +170,7 @@ class EnableKafkaKotlinTests { = ConcurrentKafkaListenerContainerFactory() factory.consumerFactory = kcf() factory.setCommonErrorHandler(eh) + factory.setRecordMessageConverter(JsonMessageConverter()) return factory } @@ -186,9 +184,11 @@ class EnableKafkaKotlinTests { return factory } + data class TestKafkaMessage(val data: String) + @KafkaListener(id = "kotlin", topics = ["kotlinTestTopic1"], containerFactory = "kafkaListenerContainerFactory") - fun listen(value: String) { - this.received = value + fun listen(value: TestKafkaMessage) { + this.received = value.data this.latch1.countDown() } diff --git a/src/checkstyle/checkstyle.xml b/src/checkstyle/checkstyle.xml index 313da7c27c..59b4ecaf28 100644 --- a/src/checkstyle/checkstyle.xml +++ b/src/checkstyle/checkstyle.xml @@ -97,13 +97,12 @@ - + - + - @@ -191,6 +190,12 @@ + + + + + + diff --git a/src/idea/spring-framework.xml b/src/idea/spring-framework.xml new file mode 100644 index 0000000000..a66a6d14c3 --- /dev/null +++ b/src/idea/spring-framework.xml @@ -0,0 +1,269 @@ + + + + + \ No newline at end of file