diff --git a/.github/scripts/check-google-java-format.sh b/.github/scripts/check-google-java-format.sh
new file mode 100755
index 0000000000..8e12296a74
--- /dev/null
+++ b/.github/scripts/check-google-java-format.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+set -u
+
+script_name=""
+
+case "$(uname -sr)" in
+
+ Darwin*)
+ script_name="google-java-format_darwin-arm64"
+ ;;
+
+ Linux*)
+ script_name="google-java-format_linux-x86-64"
+ ;;
+ *)
+ echo 'Unsupported OS'
+ exit 1
+ ;;
+esac
+
+JAVA_FILES=$(find . -name "*.java" -type f)
+
+invalid_files=0
+
+echo "Following files are formatted incorrectly:";
+# TODO: remove '--skip-reflowing-long-strings' once https://github.com/google/google-java-format/issues/566 is fixed
+for FILE in $JAVA_FILES; do
+ if [[ "$*" == *--fix* ]]; then
+ ./$script_name --skip-reflowing-long-strings --replace "$FILE" > /dev/null
+ else
+ ./$script_name --set-exit-if-changed --skip-reflowing-long-strings "$FILE" > /dev/null
+ fi
+ if [ $? -ne 0 ]; then
+ echo "$FILE"
+ ((invalid_files++))
+ fi
+done
+
+if [ "$invalid_files" -ne 0 ]; then
+ echo "Found $invalid_files incorrectly formatted files (listed above), run google-java-format to fix them.";
+ exit 1
+else
+ echo "All files are formatted correctly."
+fi
+
diff --git a/.github/scripts/download_reports.sh b/.github/scripts/download_reports.sh
deleted file mode 100644
index 79e93f818c..0000000000
--- a/.github/scripts/download_reports.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-TMPDIR="test"
-mkdir $TMPDIR
-
-REPO="allegro/hermes"
-BUILDS_FILE="builds.json"
-PAST_BUILDS="$TMPDIR/$BUILDS_FILE"
-
-UNIT_TEST_ARTIFACT="check-test-report"
-E2E_REPORT_ARTIFACT="integrationTest-test-report"
-
-gh run list --repo $REPO --branch master --workflow ci --json "status,databaseId" --limit 20 >> $PAST_BUILDS
-
-cd $TMPDIR
-
-jq -c '.[]' "$BUILDS_FILE" | while read i; do
- STATUS=$(echo $i | jq '.status')
- if [[ "$STATUS" == 'completed' ]]; then
- continue
- fi
- RUN_ID=$(echo $i | jq '.databaseId')
-
- echo "downloading results for run: $RUN_ID"
- RUN_DIR=$RUN_ID
-
- mkdir $RUN_DIR
- echo "creating dir $RUN_DIR"
- cd $RUN_DIR
-
- mkdir $UNIT_TEST_ARTIFACT
- cd $UNIT_TEST_ARTIFACT
- gh run download --repo $REPO -n $UNIT_TEST_ARTIFACT $RUN_ID
- echo "Downloaded unit test report"
- cd ..
-
- mkdir $E2E_REPORT_ARTIFACT
- cd $E2E_REPORT_ARTIFACT
- gh run download --repo $REPO -n $E2E_REPORT_ARTIFACT $RUN_ID
- echo "Downloaded integrationTest report"
-
- cd ../..
-done
diff --git a/.github/scripts/reporter.py b/.github/scripts/reporter.py
deleted file mode 100755
index 5e124b1471..0000000000
--- a/.github/scripts/reporter.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import dataclasses
-import os
-import sys
-from typing import List, Set, Dict
-import xml.etree.ElementTree as ET
-
-
-@dataclasses.dataclass
-class TestResults:
- failed: Set[str]
- passed: Set[str]
- run_id: str
- test_cnt: int = 0
- skipped_cnt: int = 0
- failed_cnt: int = 0
- error_cnt: int = 0
-
-
-def get_test_files(dir: str) -> List[str]:
- files = [
- os.path.join(dp, f)
- for dp, dn, filenames in os.walk(dir)
- for f in filenames
- if os.path.splitext(f)[1] == '.xml'
- and os.path.splitext(f)[0].startswith("TEST-")
- ]
- return files
-
-
-def parse_test_file(file: str, run_id: str) -> TestResults:
- root = ET.parse(file).getroot()
- result = TestResults(
- skipped_cnt=int(root.get("skipped")),
- test_cnt=int(root.get("tests")),
- failed_cnt=int(root.get("failures")),
- error_cnt=int(root.get("errors")),
- failed=set(),
- passed=set(),
- run_id=run_id
- )
- name = root.get("name")
- name = '.'.join(name.split('.')[4:]) # remove common prefix
-
- for testcase in root.findall("testcase"):
- testname = testcase.get("name")
- test_failed = testcase.findall("failure")
- if test_failed:
- result.failed.add(f"{name}#{testname}")
- else:
- result.passed.add(f"{name}#{testname}")
- return result
-
-
-def aggregate_results(test_dir: str, run_id: str) -> TestResults:
- test_files = get_test_files(test_dir)
- results = []
- for test_file in test_files:
- result = parse_test_file(test_file, run_id)
- results.append(result)
-
- agg = TestResults(set(), set(), run_id)
-
- for result in results:
- agg.test_cnt += result.test_cnt
- agg.skipped_cnt += result.skipped_cnt
- agg.error_cnt += result.error_cnt
- agg.failed_cnt += result.failed_cnt
- for fail in result.failed:
- agg.failed.add(fail)
- for pas in result.passed:
- agg.passed.add(pas)
- return agg
-
-
-def report(type: str, runs: List[TestResults]) -> str:
- failed = {}
- markdown = ""
- for run in runs:
- for fail in run.failed:
- if fail in failed:
- failed[fail]["count"] += 1
- failed[fail]["runs"].append(run.run_id)
- else:
- failed[fail] = {"count": 1, "runs": [run.run_id]}
- markdown += f"## {type} \n"
- markdown += "| Test name | Fail count | Failed in runs |\n"
- markdown += "|--|--|--|\n"
- for k, v in sorted(failed.items(), key=lambda item: -item[1]["count"]):
- markdown += f"| {k} | {v['count']} | {v['runs']} |\n"
- markdown += "\n"
- return markdown
-
-
-if __name__ == '__main__':
- root = sys.argv[1]
- print(f"Parsing tests results from directory: {root}")
- run_dirs = [f.path for f in os.scandir(root) if f.is_dir()]
- print(f"Found {len(run_dirs)} run dirs")
- unittest_runs = []
- e2e_runs = []
-
- for run in run_dirs:
- run_id = os.path.basename(os.path.normpath(run))
- unit_test_dir = os.path.join(run, "check-test-report")
- unit_test_results = aggregate_results(unit_test_dir, run_id)
- unittest_runs.append(unit_test_results)
-
- e2e_test_dir = os.path.join(run, "integrationTest-test-report")
- e2e_test_results = aggregate_results(e2e_test_dir, run_id)
- e2e_runs.append(e2e_test_results)
-
- step_summary = "# Failing tests report\n"
- step_summary += report("Unit tests", unittest_runs)
- step_summary += report("Integration tests", e2e_runs)
-
- result_file = os.environ["GITHUB_STEP_SUMMARY"]
- with open(result_file, 'w') as f:
- f.write(step_summary)
diff --git a/.github/workflows/checkstyle.yml b/.github/workflows/checkstyle.yml
deleted file mode 100644
index 8d99e40474..0000000000
--- a/.github/workflows/checkstyle.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-name: Checkstyle
-
-on:
- push:
- branches: [ master ]
- pull_request:
- branches: [ master ]
-
-jobs:
- checkstyle:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
- - uses: reviewdog/action-setup@v1
- with:
- reviewdog_version: latest
- - name: Set up JDK 17
- uses: actions/setup-java@v3
- with:
- java-version: 17
- distribution: 'temurin'
- - name: Run check style
- # ignore lengthy console setup tasks
- run: ./gradlew --continue clean checkstyleMain checkstyleTest checkstyleIntegration checkstyleJmh -PmaxCheckstyleWarnings=0 -x attachHermesConsole -x prepareIndexTemplate
- - name: Run reviewdog
- if: ${{ success() || failure() }}
- env:
- REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- for f in $(find . -regex '.*/build/reports/checkstyle/.*\.xml'); do
- module_name=$(echo "$f" | cut -d "/" -f2)
- reviewdog -f=checkstyle -level=warning -filter-mode=nofilter -reporter=github-check -name="checkstyle-$module_name" < $f
- done
diff --git a/.github/workflows/ci-console.yml b/.github/workflows/ci-console.yml
deleted file mode 100644
index 628fa9a7ee..0000000000
--- a/.github/workflows/ci-console.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-name: Console CI
-
-# Configured for all pushes and PRs for the sake of new console development on dev branches.
-# Later it could be changed to pushes and PRs only to master branch to match main CI config.
-on: [ push, pull_request ]
-
-jobs:
- build:
- runs-on: ubuntu-latest
- defaults:
- run:
- working-directory: ./hermes-console-vue
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Setup node
- uses: actions/setup-node@v3
- with:
- node-version: 18
- - name: Run linter
- run: yarn && yarn lint
- - name: Run frontend tests
- run: yarn test:unit
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 691ef82521..03e0af5fb5 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -2,16 +2,40 @@ name: CI
on:
push:
- branches: [ master, integration_tests_framework ]
+ branches: [ master ]
pull_request:
- branches: [ master, integration_tests_framework ]
+ branches: [ master ]
jobs:
+ console:
+ name: ci-console
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: ./hermes-console
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Setup node
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20
+ - name: Run linter
+ run: yarn && yarn lint
+ - name: Run frontend tests
+ run: yarn test:unit
+ - name: Upload artifact
+ if: always() && github.ref == 'refs/heads/master'
+ uses: actions/upload-artifact@v4
+ with:
+ name: ci-console
+ path: ./hermes-console/allure-results
build:
runs-on: ubuntu-latest
strategy:
matrix:
tasks: [
+ # Add/remove task in Allure Report job also
{alias: "unitTests", name: "check"},
{alias: "integrationTests", name: "integrationTest"},
{alias: "slowIntegrationTests", name: "slowIntegrationTest"},
@@ -20,22 +44,16 @@ jobs:
fail-fast: false
name: ${{ matrix.tasks.alias }}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- - uses: actions/cache@v3
- with:
- path: |
- ~/.gradle/caches
- ~/.gradle/wrapper
- key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }}
- restore-keys: |
- ${{ runner.os }}-gradle-
- name: Set up JDK 17
- uses: actions/setup-java@v3
+ uses: actions/setup-java@v4
with:
java-version: 17
distribution: 'temurin'
+ - name: Setup Gradle
+ uses: gradle/gradle-build-action@v2
- name: Build with Gradle
run: ./gradlew assemble
- name: Run task with Gradle
@@ -53,3 +71,61 @@ jobs:
with:
paths: '**/build/test-results/**/TEST-*.xml'
show: fail, skip
+ - name: Upload artifact
+ if: always() && github.ref == 'refs/heads/master' && matrix.tasks.alias != 'benchmark'
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{ matrix.tasks.alias }}
+ path: build/allure-results
+
+ allureReport:
+ if: always() && github.ref == 'refs/heads/master'
+ name: Generate Allure Report
+ needs: [ build, console ]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Download artifact unitTests
+ uses: actions/download-artifact@v4
+ if: always()
+ with:
+ name: unitTests
+ path: allure-results
+ - name: Download artifact integrationTests
+ uses: actions/download-artifact@v4
+ if: always()
+ with:
+ name: integrationTests
+ path: allure-results
+ - name: Download artifact slowIntegrationTests
+ uses: actions/download-artifact@v4
+ if: always()
+ with:
+ name: slowIntegrationTests
+ path: allure-results
+ - name: Download artifact ci-console
+ uses: actions/download-artifact@v4
+ if: always()
+ with:
+ name: ci-console
+ path: allure-results
+ - name: Load test report history
+ uses: actions/checkout@v4
+ if: always()
+ continue-on-error: true
+ with:
+ ref: gh-pages
+ path: gh-pages
+ - name: Build test report
+ uses: simple-elf/allure-report-action@v1.9
+ if: always()
+ with:
+ gh_pages: gh-pages
+ allure_history: allure-history
+ allure_results: allure-results
+ - name: Publish test report
+ uses: peaceiris/actions-gh-pages@v4
+ if: always()
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_branch: gh-pages
+ publish_dir: allure-history
\ No newline at end of file
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 56c5ff5084..c43d90c2db 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -35,17 +35,17 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Set up JDK 17
- uses: actions/setup-java@v3
+ uses: actions/setup-java@v4
with:
java-version: 17
distribution: 'temurin'
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@v1
+ uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -56,7 +56,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
- uses: github/codeql-action/autobuild@v1
+ uses: github/codeql-action/autobuild@v2
# âšī¸ Command-line programs to run using the OS shell.
# đ https://git.io/JvXDl
@@ -70,4 +70,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v1
+ uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/google-java-format.yml b/.github/workflows/google-java-format.yml
new file mode 100644
index 0000000000..2ec1c5bf8b
--- /dev/null
+++ b/.github/workflows/google-java-format.yml
@@ -0,0 +1,26 @@
+name: Google java format
+
+on:
+ workflow_dispatch:
+ pull_request:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ ref: ${{ github.head_ref }}
+
+ - name: Download and run google java format
+ run: |
+ ls -la
+ curl -sSLO "https://github.com/google/google-java-format/releases/download/v$VERSION/google-java-format_linux-x86-64"
+ chmod a+x google-java-format_linux-x86-64
+ ./.github/scripts/check-google-java-format.sh
+ shell: bash
+ env:
+ VERSION: 1.23.0
+
diff --git a/.github/workflows/gradle-wrapper-validation.yml b/.github/workflows/gradle-wrapper-validation.yml
index d3c1263959..eda69f9d27 100644
--- a/.github/workflows/gradle-wrapper-validation.yml
+++ b/.github/workflows/gradle-wrapper-validation.yml
@@ -12,5 +12,5 @@ jobs:
name: "Validation"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: gradle/wrapper-validation-action@v1
diff --git a/.github/workflows/markdown-links-check.yml b/.github/workflows/markdown-links-check.yml
index 6920411bd5..4d186efa1e 100644
--- a/.github/workflows/markdown-links-check.yml
+++ b/.github/workflows/markdown-links-check.yml
@@ -12,7 +12,7 @@ jobs:
check-links:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: gaurav-nelson/github-action-markdown-link-check@v1
with:
use-quiet-mode: 'yes'
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index c6485311cc..9d0bec195f 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -16,15 +16,16 @@ jobs:
environment: ci
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- - uses: gradle/wrapper-validation-action@v1
- name: Set up JDK
- uses: actions/setup-java@v3
+ uses: actions/setup-java@v4
with:
java-version: '17'
distribution: 'temurin'
+ - name: Setup Gradle
+ uses: gradle/gradle-build-action@v2
- name: Release
if: github.ref == 'refs/heads/master'
run: ./gradlew release -Prelease.customPassword=${GITHUB_TOKEN} -Prelease.customUsername=${GITHUB_ACTOR} -Prelease.forceVersion=${FORCE_VERSION}
diff --git a/.github/workflows/test_report.yml b/.github/workflows/test_report.yml
deleted file mode 100644
index 2b52cbedb8..0000000000
--- a/.github/workflows/test_report.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: "Test report"
-on:
- push:
- branches: [ master ]
-
-jobs:
- validation:
- name: "Test report"
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: Grant execute permission for report downloader
- run: chmod +x ./.github/scripts/download_reports.sh
- - name: Download past reports
- run: ./.github/scripts/download_reports.sh
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- - uses: actions/setup-python@v4
- with:
- python-version: '3.10'
- - name: Aggregate reports
- run: python ./.github/scripts/reporter.py "test"
diff --git a/.gitignore b/.gitignore
index d085ce8969..475005bac9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,6 @@
build
.gradle
classes
-.idea
*.iml
*.ipr
*.iws
@@ -39,3 +38,38 @@ scripts/lib/
scripts/pip-selfcheck.json
.DS_Store
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+.idea/artifacts
+.idea/compiler.xml
+.idea/jarRepositories.xml
+.idea/modules.xml
+.idea/*.iml
+.idea/modules
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
diff --git a/.idea/externalDependencies.xml b/.idea/externalDependencies.xml
new file mode 100644
index 0000000000..679f74ca17
--- /dev/null
+++ b/.idea/externalDependencies.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/google-java-format.xml b/.idea/google-java-format.xml
new file mode 100644
index 0000000000..8b57f4527a
--- /dev/null
+++ b/.idea/google-java-format.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000000..4336a05b15
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,58 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000000..35eb1ddfbb
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a5377d8bfe..262c86384e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -27,5 +27,5 @@ You can also use other *magic words* from [GitHub handbook](https://help.github.
* use `spock` when writing new unit tests in all modules
* when changing old tests use your best judgement as to when rewrite them to `spock`
-* use `TestNG` with defined environment in `integration` module
+* use `JUnit5` with defined environment in `integration-tests` module
* prepend configuration options with module name, i.e. `frontend.` or `consumer.` when it applies to single module
diff --git a/README.md b/README.md
index 9b0545b9c9..eadb83bfd6 100644
--- a/README.md
+++ b/README.md
@@ -20,3 +20,40 @@ If you have any question or idea regarding the project, please feel free to reac
## License
**hermes** is published under [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0).
+
+## Development
+
+### Code formatting
+For code formatting we use [google-java-format](https://github.com/google/google-java-format/tree/master).
+Following steps are required for optimal dev experience in IJ:
+
+1. Download [google-java-format plugin](https://plugins.jetbrains.com/plugin/8527-google-java-format)
+2. [Set custom VM options required for IJ plugin](https://github.com/google/google-java-format/tree/master?tab=readme-ov-file#intellij-jre-config)
+3. Go to `Settings > google-java-format` and click `Enable google java-format` (should be checked by default)
+4. Go to `Settings > Tools > Actions on Save` and enable `Reformat code` and `Optimize imports` for Java files
+
+Each save should automatically trigger reformat.
+
+If you want to debug the CLI check on macOS:
+
+```shell
+wget https://github.com/google/google-java-format/releases/download/v1.23.0/google-java-format_darwin-arm64
+chmod a+x google-java-format_darwin-arm64
+chmod a+x .github/scripts/check-google-java-format.sh
+./.github/scripts/check-google-java-format.sh
+```
+
+or if you are on Linux:
+
+```shell
+wget https://github.com/google/google-java-format/releases/download/v1.23.0/google-java-format_linux-x86-64
+chmod a+x google-java-format_linux-x86-64
+chmod a+x .github/scripts/check-google-java-format.sh
+./.github/scripts/check-google-java-format.sh
+```
+
+You can also run the following command to fix formatting for the whole project:
+
+```shell
+./.github/scripts/check-google-java-format.sh --fix
+```
diff --git a/build.gradle b/build.gradle
index b89f19fe60..02f86f4e6d 100644
--- a/build.gradle
+++ b/build.gradle
@@ -39,7 +39,6 @@ nexusPublishing {
allprojects {
apply plugin: 'java'
apply plugin: 'groovy'
- apply plugin: 'checkstyle'
group = 'pl.allegro.tech.hermes'
version = scmVersion.version
@@ -49,26 +48,28 @@ allprojects {
project.ext.versions = [
kafka : '2.8.2',
- guava : '23.0',
- jackson : '2.15.2',
- jersey : '3.1.2',
- jetty : '11.0.15',
+ guava : '33.1.0-jre',
+ jackson : '2.17.0',
+ jersey : '3.1.6',
+ jetty : '12.0.8',
curator : '5.4.0',
- dropwizard_metrics: '4.1.0',
- micrometer_metrics: '1.11.1',
- wiremock : '3.0.1',
- spock : '2.4-M1-groovy-4.0',
- groovy : '4.0.12',
- avro : '1.9.1',
+ dropwizard_metrics: '4.2.25',
+ micrometer_metrics: '1.12.5',
+ wiremock : '3.9.0',
+ spock : '2.4-M4-groovy-4.0',
+ groovy : '4.0.21',
+ avro : '1.11.3',
json2avro : '0.2.14',
+ // TODO: newest version requires subject alternative name in a certificate during host verification, current test cert does not have a one
okhttp : '3.9.1',
- undertow : '2.0.29.Final',
- spring_web : '6.0.8',
- failsafe : '2.3.1',
- junit_jupiter : '5.9.1',
- testcontainers : '1.18.1',
- spring : '3.0.6',
- assertj : '3.24.2'
+ undertow : '2.3.12.Final',
+ spring_web : '6.1.6',
+ failsafe : '2.4.4',
+ junit_jupiter : '5.10.2',
+ testcontainers : '1.19.8',
+ spring : '3.2.4',
+ assertj : '3.25.3',
+ allure : '2.24.0'
]
repositories {
@@ -77,15 +78,23 @@ allprojects {
dependencies {
implementation group: 'org.slf4j', name: 'slf4j-api', version: '2.0.4'
- implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.9'
+ implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.14.0'
+
+ // Allure Spock adapter
+ testImplementation(platform("io.qameta.allure:allure-bom:${versions.allure}"))
+ testImplementation("io.qameta.allure:allure-spock2")
+ testImplementation("io.qameta.allure:allure-junit-platform")
+
+ // Spock framework
+ testImplementation(platform("org.spockframework:spock-bom:${versions.spock}"))
+ testImplementation("org.spockframework:spock-core")
testImplementation group: 'junit', name: 'junit', version: '4.11'
testImplementation group: 'com.tngtech.java', name: 'junit-dataprovider', version: '1.10.0'
testImplementation group: 'pl.pragmatists', name: 'JUnitParams', version: '1.0.2'
- testImplementation group: 'org.mockito', name: 'mockito-all', version: '1.9.5'
+ testImplementation group: 'org.mockito', name: 'mockito-core', version: '5.11.0'
testImplementation group: 'org.assertj', name: 'assertj-core', version: versions.assertj
- testImplementation group: 'com.jayway.awaitility', name: 'awaitility', version: '1.6.1'
- testImplementation group: 'com.googlecode.catch-exception', name: 'catch-exception', version: '1.2.0'
+ testImplementation group: 'org.awaitility', name: 'awaitility', version: '4.2.1'
annotationProcessor group: 'org.springframework.boot', name: 'spring-boot-configuration-processor', version: versions.spring
}
@@ -113,7 +122,7 @@ allprojects {
}
-configure(subprojects - project(':integration')) {
+configure(subprojects - project(':integration-tests')) {
apply plugin: 'jacoco'
apply plugin: 'maven-publish'
apply plugin: 'signing'
@@ -123,6 +132,8 @@ configure(subprojects - project(':integration')) {
withSourcesJar()
}
+ javadoc.options.addStringOption('Xdoclint:none', '-quiet')
+
publishing {
publications {
mavenJava(MavenPublication) {
@@ -194,18 +205,6 @@ subprojects {
events 'passed', 'skipped', 'failed'
}
}
-
- tasks.withType(Checkstyle) {
- reports {
- xml.required = true
- html.required = false
- }
- }
-
- checkstyle {
- toolVersion '10.3.4'
- maxWarnings getIntProperty('maxCheckstyleWarnings', Integer.MAX_VALUE)
- }
}
def getIntProperty(String name, int defaultValue) {
diff --git a/config/checkstyle/checkstyle.xml b/config/checkstyle/checkstyle.xml
deleted file mode 100644
index 68ce4fd71d..0000000000
--- a/config/checkstyle/checkstyle.xml
+++ /dev/null
@@ -1,439 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/config/checkstyle/suppressions.xml b/config/checkstyle/suppressions.xml
deleted file mode 100644
index 91465ba4ee..0000000000
--- a/config/checkstyle/suppressions.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index dc2d859510..9f07d83fc8 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -30,19 +30,6 @@ services:
- kafka_data:/var/lib/kafka/data
- kafka_secrets:/etc/kafka/secrets
- graphite:
- image: graphiteapp/graphite-statsd:1.1.3
- ports:
- - '2003-2004:2003-2004'
- - '2023-2024:2023-2024'
- - '8125:8125/udp'
- - '8126:8126'
- - '8082:80'
- volumes:
- - graphite_conf:/opt/graphite/conf
- - graphite_data:/opt/graphite/storage
- - statsd_data:/opt/statsd
-
frontend:
build:
context: ../
@@ -52,7 +39,6 @@ services:
depends_on:
- zk
- kafka
- - graphite
consumers:
build:
@@ -61,7 +47,6 @@ services:
depends_on:
- zk
- kafka
- - graphite
management:
build:
@@ -72,7 +57,6 @@ services:
depends_on:
- zk
- kafka
- - graphite
schema-registry:
image: "confluentinc/cp-schema-registry:${CONFLUENT_IMAGES_TAG}"
@@ -87,9 +71,6 @@ services:
- "8081:8081"
volumes:
- graphite_conf:
- graphite_data:
- statsd_data:
zk_secrets:
zk_data:
zk_log:
diff --git a/docker/latest/consumers/consumers.yaml b/docker/latest/consumers/consumers.yaml
index 76af83d9b5..f64008630f 100644
--- a/docker/latest/consumers/consumers.yaml
+++ b/docker/latest/consumers/consumers.yaml
@@ -8,11 +8,6 @@ consumer:
clusters:
- datacenter: "dc"
brokerList: "kafka:29092"
- graphite:
- host: "graphite"
- metrics:
- metric-registry:
- graphiteReporterEnabled: true
workload:
consumerPerSubscription: 1
schema:
diff --git a/docker/latest/frontend/frontend.yaml b/docker/latest/frontend/frontend.yaml
index 2ccd0f3432..0ba33ab769 100644
--- a/docker/latest/frontend/frontend.yaml
+++ b/docker/latest/frontend/frontend.yaml
@@ -8,11 +8,6 @@ frontend:
clusters:
- datacenter: "dc"
brokerList: "kafka:29092"
- graphite:
- host: "graphite"
- metrics:
- metric-registry:
- graphiteReporterEnabled: true
schema:
cache:
refreshAfterWrite: 1m
diff --git a/docker/latest/management/management.yaml b/docker/latest/management/management.yaml
index 0953e3660d..8aaec73c04 100644
--- a/docker/latest/management/management.yaml
+++ b/docker/latest/management/management.yaml
@@ -17,11 +17,6 @@ kafka:
connectionTimeout: 3000
bootstrapKafkaServer: kafka:29092
-graphite:
- client:
- enabled: true
- externalMonitoringUrl: graphite:8082
-
server:
port: 8090
@@ -59,6 +54,10 @@ schema.repository:
serverUrl: http://schema-registry:8081
validationEnabled: true
+prometheus:
+ client:
+ enabled: true
+
console:
configurationLocation: console/config-local.json
configurationType: classpath_resource
diff --git a/docs/docs/configuration/buffer-persistence.md b/docs/docs/configuration/buffer-persistence.md
index a4e3b9ca66..875776f508 100644
--- a/docs/docs/configuration/buffer-persistence.md
+++ b/docs/docs/configuration/buffer-persistence.md
@@ -1,4 +1,4 @@
-# Publishing buffer persistence
+# Publishing buffer persistence [deprecated]
Hermes Frontend API has option to register callbacks triggered during different phases of message lifetime:
@@ -15,7 +15,7 @@ to disk. Map structure is continuously persisted to disk, as it is stored in off
When Hermes Frontend starts up it scans filesystem in search of existing persisted map. If found, it is read and any
persisted events are sent to Message Store. This way recovering after crash is fully automatic. If Hermes process or
-server crashes, nothing is lost.
+server crashes, events that were flushed to disk are recovered.
There is additional protection against flooding subscribers with outdated events. When reading events from persisted
storage, Hermes filters out messages older than N hours, where N is a system parameter and is set to 3 days by default.
diff --git a/docs/docs/configuration/console.md b/docs/docs/configuration/console.md
index f327d0dd63..a67d509c6d 100644
--- a/docs/docs/configuration/console.md
+++ b/docs/docs/configuration/console.md
@@ -15,14 +15,12 @@ dashboard.docs | link to documentation, available on Console home page
## Metric Store integration
-Hermes Console can be integrated with Metric Store. This means, that metrics shown in Console can link to actual graphs
-plotted by Metric Store. At the moment only Graphite is supported.
-
-Option | Description
------------------------ | ---------------------------------------------------------------------------
-metrics.type | type of metrics storage to link to (currently only `graphite` is supported)
-metrics.graphite.url | URL to graphite
-metrics.graphite.prefix | prefix to graphite metrics
+Hermes console could have a button on the topics and subscriptions view that takes you to a dashboard with metrics.
+In order to make it work you have to provide an implementation of `pl.allegro.tech.hermes.management.domain.MetricsDashboardUrlService`.
+
+ Option | Description
+-------------------------------------|--------------------------------------------------------------------------------------
+ metrics.fetchingDashboardUrlEnabled | enable fetching dashboard url from hermes-management and show the referring UI button
## Authorization
diff --git a/docs/docs/configuration/consumers-tuning.md b/docs/docs/configuration/consumers-tuning.md
index f4433b819f..904df5eede 100644
--- a/docs/docs/configuration/consumers-tuning.md
+++ b/docs/docs/configuration/consumers-tuning.md
@@ -2,18 +2,18 @@
## HTTP Sender
-Option | Description | Default value
----------------------------------------------------- | ----------------------------------------------------------- | -------------
-consumer.http-client.serial.http1.threadPoolSize | size of thread pool for sender threads (global) | 30
-consumer.http-client.serial.http1.maxConnectionsPerDestination | max connections per remote host | 100
+| Option | Description | Default value |
+|----------------------------------------------------------------|-------------------------------------------------|---------------|
+| consumer.http-client.serial.http1.threadPoolSize | size of thread pool for sender threads (global) | 30 |
+| consumer.http-client.serial.http1.maxConnectionsPerDestination | max connections per remote host | 100 |
## Consumers core
-Option | Description | Default value
------------------------------ | ------------------------------------------------------------------------ | -------------
-consumer.commit.offset.period | interval between committing offsets to Kafka | 60s
-consumer.threadPoolSize | thread pool for threads involved in consuming, 1 thread per subscription | 500
-consumer.serialConsumer.inflightSize | how many messages can be kept in send queue, per subscription | 100
+| Option | Description | Default value |
+|--------------------------------------|--------------------------------------------------------------------------|---------------|
+| consumer.commit.offset.period | interval between committing offsets to Kafka | 60s |
+| consumer.threadPoolSize | thread pool for threads involved in consuming, 1 thread per subscription | 500 |
+| consumer.serialConsumer.inflightSize | how many messages can be kept in send queue, per subscription | 100 |
## Workload constraints management
@@ -26,10 +26,10 @@ subscriptions assigned to itself.
These numbers can be configured:
-Option | Description | Default value
---------------------------------------------------- | ----------------------------------------- | ---------------------
-consumer.workload.consumersPerSubscription | Number of consumers to which the subscription will be assigned. If this value is greater than the number of available consumers, Hermes will assign the subscription to all available consumers. | 2
-consumer.workload.maxSubscriptionsPerConsumer | The maximum number of subscriptions assigned to a single consumer. If all consumers have the maximum number of subscriptions assigned, a new subscription will not be activated until a new consumer is added or another subscription is unassigned. | 200
+| Option | Description | Default value |
+|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
+| consumer.workload.consumersPerSubscription | Number of consumers to which the subscription will be assigned. If this value is greater than the number of available consumers, Hermes will assign the subscription to all available consumers. | 2 |
+| consumer.workload.maxSubscriptionsPerConsumer | The maximum number of subscriptions assigned to a single consumer. If all consumers have the maximum number of subscriptions assigned, a new subscription will not be activated until a new consumer is added or another subscription is unassigned. | 200 |
Additionally, Hermes allows to configure the property `consumer.workload.consumersPerSubscription` for specific
topics or subscriptions in the runtime via REST API.
diff --git a/docs/docs/configuration/metrics.md b/docs/docs/configuration/metrics.md
index 86f78fe134..5e172d4ebf 100644
--- a/docs/docs/configuration/metrics.md
+++ b/docs/docs/configuration/metrics.md
@@ -12,9 +12,9 @@ Option | Description
{modulePrefix}.metrics.prometheus.step | The step size to use in computing windowed statistics | 60s
{modulePrefix}.metrics.prometheus.descriptions | If meter descriptions should be sent to Prometheus | true
-In order to be able to access basic metrics via Management API, it needs to be configured to reach VictoriaMetrics API:
+In order to be able to access basic metrics via Management API, it needs to be configured to reach Prometheus API:
Option | Description | Default value
------------------------------------------|-----------------------------------------------| -------------
prometheus.client.enabled | Should fetch external metrics from Prometheus | true
-prometheus.client.externalMonitoringUrl | URI to VictoriaMetrics HTTP API | http://localhost:18090
+prometheus.client.externalMonitoringUrl | URI to Prometheus HTTP API | http://localhost:18090
diff --git a/docs/docs/overview/architecture.md b/docs/docs/overview/architecture.md
index 7726745068..6d647e64d2 100644
--- a/docs/docs/overview/architecture.md
+++ b/docs/docs/overview/architecture.md
@@ -16,7 +16,7 @@ Hermes integrates with multiple systems, each having different role.
* **Message Store** - stores and routes messages, current implementation: Kafka
* **Metadata Store** - shared metadata storage for all Hermes modules, current implementation: Zookeeper
-* **Metrics Store** *[optional]* - stores metrics gathered by Hermes, current implementation: Graphite
+* **Metrics Store** *[optional]* - stores metrics gathered by Hermes, currently Hermes exposes metrics in Prometheus format
* **Tracking Store** *[optional]* - stores tracking (message trace) information, current implementation: ElasticSearch
## Message flow
diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md
index c31db5a01a..3ad00ea47b 100644
--- a/docs/docs/quickstart.md
+++ b/docs/docs/quickstart.md
@@ -56,7 +56,7 @@ image: allegro/hermes-management:hermes-[specific version tag]
## Development
The default `docker-compose` setup will start all hermes modules (consumers, frontend, management), together
-with its dependencies (Kafka, ZooKeeper, Graphite, Schema Registry). To run a specific module with gradle/IntelliJ,
+with its dependencies (Kafka, ZooKeeper, Schema Registry). To run a specific module with gradle/IntelliJ,
just comment out the module in `services` section of the `docker-compose.yml` file, and start the java process locally:
`./gradlew -p hermes-frontend run`
@@ -175,7 +175,6 @@ management:
depends_on:
- zk
- kafka
- - graphite
[...]
```
diff --git a/docs/docs/user/java-client.md b/docs/docs/user/java-client.md
index 57e78b766b..942c9675a7 100644
--- a/docs/docs/user/java-client.md
+++ b/docs/docs/user/java-client.md
@@ -19,7 +19,7 @@ At the moment there are four implementations of `HermesSender`:
for asynchronous transmission
* **WebClientHermesSender** - for services using [Spring WebFlux](https://docs.spring.io/spring-framework/docs/current/reference/html/web-reactive.html);
uses [WebClient](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/reactive/function/client/WebClient.html)
-* **JerseyHermesSender** - recommended for services using [Jersey]()
+* **JerseyHermesSender** - recommended for services using [Jersey]()
* **OkHttpHermesSender** - supports both HTTP/1.1 and HTTP/2 protocols, uses [OkHttp3 client](http://square.github.io/okhttp/)
diff --git a/docs/docs/user/publishing.md b/docs/docs/user/publishing.md
index 16ba94f0ff..044535aa61 100644
--- a/docs/docs/user/publishing.md
+++ b/docs/docs/user/publishing.md
@@ -134,22 +134,21 @@ Failure statuses:
Each topic can define level of acknowledgement (ACK):
* leader ACK - only one Kafka node (leader) needs to acknowledge reception of message
-* all ACK - all nodes that hold copy of message need to acknowledge reception of message
+* all ACK - at least [min.insync.replicas](https://kafka.apache.org/documentation/#brokerconfigs_min.insync.replicas) nodes must acknowledge reception of message
-For most of the topic leader ACK is enough. This guarantees roughly 99.999..% reception rate. Only in rare cases, during
-Kafka cluster rebalancing or nodes outage Kafka might confirm that message was received, while it was not saved and it
-will be lost.
+ACK configuration has the following consequences:
-What does it mean in practice? Numbers differ per case and they are affected by multiple factors like frequency of
-rebalancing taking place on Kafka clusters, Kafka version etc. In our production environment using ACK leader means we falsely
-believe message was received by Kafka once per 20 million events. This is a very rough estimate that should show you
-the scale, if you need numbers to base your decision on - please conduct own measurements.
+- with `ACK leader` message writes are replicated asynchronously, thus the acknowledgment latency will be low. However, message write may be lost
+when there is a topic leadership change - e.g. due to rebalance or broker restart.
+- with `ACK all` messages writes are synchronously replicated to replicas. Write acknowledgement latency will be much higher than with leader ACK,
+it will also have higher variance due to tail latency. However, messages will be persisted as long as the whole replica set does not go down simultaneously.
-If you need 100% guarantee that message was saved, force all replicas to send ACK. The downside of this is much longer
-response times, they tend to vary a lot as well. Thanks to Hermes buffering (described in paragraphs below), we are able
-to guarantee some sane response times to our clients even in *ACK all* mode.
+Publishers are advised to select topic ACK level based on their latency and durability requirements.
-## Buffering
+Hermes also provides a feature called Buffering (described in paragraphs below) which provides consistent write latency
+despite long Kafka response times. Note that, however, this mode may decrease message durability for `ACK all` setting.
+
+## Buffering [deprecated]
Hermes administrator can set maximum time, for which Hermes will wait for Kafka acknowledgment. By default, it is set to
65ms. After that time, **202** response is sent to client. Event is kept in Kafka producer buffer and it's delivery will
@@ -161,10 +160,54 @@ Kafka is back online.
### Buffer persistence
-By default events are buffered in memory only. This raises the question about what happens in case of Hermes node failure
+By default, events are buffered in memory only. This raises the question about what happens in case of Hermes node failure
(or force kill of process). Hermes Frontend API exposes callbacks that can be used to implement persistence model of
buffered events.
Default implementation uses [OpenHFT ChronicleMap](https://github.com/OpenHFT/Chronicle-Map) to persist unsent messages
to disk. Map structure is continuously persisted to disk, as it is stored in offheap memory as
[memory mapped file](https://en.wikipedia.org/wiki/Memory-mapped_file).
+
+Using buffering with ACK all setting means that durability of events may be lowered when **202** status code is received. If Hermes instance
+is killed before message is spilled to disk or the data on disk becomes corrupted, the message is gone. Thus `ACK all` with **202** status code
+is similar to `ACK leader` because a single node failure could cause the message be lost.
+
+### Deprecation notice
+The buffering mechanism in Hermes is considered deprecated and is set to be removed in the future.
+
+## Remote DC fallback
+
+Hermes supports a remote datacenter fallback mechanism for [multi datacenter deployments](https://hermes-pubsub.readthedocs.io/en/latest/configuration/kafka-and-zookeeper/#multiple-kafka-and-zookeeper-clusters).
+
+Fallback is configured on per topic basis, using a `fallbackToRemoteDatacenterEnabled` property:
+
+```http
+PUT /topics/my.group.my-topic
+
+{
+ "fallbackToRemoteDatacenterEnabled": true
+}
+```
+
+Using this setting automatically disables buffering mechanism for a topic.
+
+When using this setting for a topic, Hermes will try to send a message to a local datacenter Kafka first and will fall back to remote datacenter Kafka
+if the local send fails.
+
+Hermes also provides a speculative fallback mechanism which will send messages to remote Kafka if the local Kafka is not responding in a timely manner.
+Speculative send is performed after `frontend.kafka.fail-fast-producer.speculativeSendDelay` elapses.
+
+When using remote DC fallback, Hermes attempts to send a message to Kafka for the duration of `frontend.handlers.maxPublishRequestDuration` property. If after
+`maxPublishRequestDuration` Hermes has not received an acknowledgment from Kafka, it will respond with **500** status code to the client.
+
+Table below summarizes remote fallback configuration options:
+
+| Option | Scope | Default value |
+|--------------------------------------------------------|--------|---------------|
+| fallbackToRemoteDatacenterEnabled | topic | false |
+| frontend.kafka.fail-fast-producer.speculativeSendDelay | global | 250ms |
+| frontend.handlers.maxPublishRequestDuration | global | 500ms |
+
+## Partition assignment
+`Partition-Key` header can be used by publishers to specify Kafka `key` which will be used for partition assignment for a message. This will ensure
+that all messages with given `Partition-Key` will be sent to the same Kafka partition.
diff --git a/docs/docs/user/subscribing.md b/docs/docs/user/subscribing.md
index df0321155e..8112b6a809 100644
--- a/docs/docs/user/subscribing.md
+++ b/docs/docs/user/subscribing.md
@@ -39,22 +39,17 @@ Minimal request:
All options:
-Option | Description | Default value
------------------------------------- | ----------------------------------------------------| -------------
-trackingMode | track outgoing messages | trackingOff
-subscriptionPolicy.rate | maximum sending speed in rps (per DC) | 400
-subscriptionPolicy.messageTtl | inflight Time To Live in seconds | 3600
-subscriptionPolicy.messageBackoff | backoff time between retry attempts in millis | 1000
-subscriptionPolicy.retryClientErrors | retry on receiving 4xx status | false
-subscriptionPolicy.requestTimeout | request timeout in millis | 1000
-subscriptionPolicy.socketTimeout | maximum time of inactivity between two data packets | infinity
-subscriptionPolicy.inflightSize | max number of pending requests | 100
-subscriptionPolicy.backoffMultiplier | backoff multiplier for calculating message backoff | 1
-subscriptionPolicy.backoffMaxIntervalInSec | maximal retry backoff in seconds | 600
-headers | additional HTTP request headers | [] (array of headers)
-filters | used for skipping unwanted messages | [] (array of filters)
-endpointAddressResolverMetadata | additional address resolver metadata | {} (map)
-subscriptionIdentityHeadersEnabled | attach HTTP headers with subscription identity | false
+| Option | Description | Default value |
+|------------------------------------|---------------------------------------------------------------------------------------|------------------------------------------------------|
+| trackingMode | track outgoing messages | trackingOff |
+| contentType | delivered message format (JSON or BATCH) | JSON |
+| deliveryType | delivery type (SERIAL or BATCH) | SERIAL |
+| subscriptionPolicy | see [delivery types](#delivery-type) | [serial](#serial-delivery), [batch](#batch-delivery) |
+| mode | whether to send message to single (ANYCAST) or all (BROADCAST) subscription endpoints | ANYCAST |
+| headers | additional HTTP request headers | [] (array of headers) |
+| filters | used for skipping unwanted messages | [] (array of filters) |
+| endpointAddressResolverMetadata | additional address resolver metadata | {} (map) |
+| subscriptionIdentityHeadersEnabled | attach HTTP headers with subscription identity | false |
Possible values for **trackingMode** are:
@@ -76,6 +71,7 @@ Request that specifies all available options:
"id": "My Team"
},
"contact": "my-team@my-company.com",
+ "deliveryType": "SERIAL",
"subscriptionPolicy": {
"rate": 100,
"messageTtl": 3600,
@@ -87,6 +83,7 @@ Request that specifies all available options:
"backoffMultiplier": 1.0,
"backoffMaxIntervalInSec": 600
},
+ "mode": "ANYCAST",
"headers": [
{"name": "SOME_HEADER", "value": "ABC"},
{"name": "OTHER_HEADER", "value": "123"}
@@ -141,6 +138,66 @@ no matter how many times Hermes would try to deliver it. This behavior can be ch
flag on subscription. When this flag is set to true, message with response code **4xx** will be retried,
also causing slowing down overall sending speed. See [back pressure section](#back-pressure) for more details.
+## Content type
+
+Hermes can deliver messages to subscribers in two formats:
+
+- JSON - if topic content type was of type JSON or AVRO
+- AVRO - if topic content type was of type AVRO
+
+## Delivery type
+
+Hermes supports two delivery types: SERIAL and BATCH.
+
+### Serial delivery
+With serial delivery, each hermes consumer will process at most `inflightSize` messages concurrently. Messages are sent individually
+(number of published messages = number of messages sent to subscriber).
+
+
+Options for `subscriptionPolicy`:
+
+| Option | Description | Default value |
+|-------------------------|-----------------------------------------------------|---------------|
+| rate | maximum sending speed in rps (per DC) | 400 |
+| messageTtl | inflight Time To Live in seconds | 3600 |
+| messageBackoff | backoff time between retry attempts in millis | 1000 |
+| retryClientErrors | retry on receiving 4xx status | false |
+| requestTimeout | request timeout in millis | 1000 |
+| socketTimeout | maximum time of inactivity between two data packets | infinity |
+| inflightSize | max number of pending requests | 100 |
+| backoffMultiplier | backoff multiplier for calculating message backoff | 1 |
+| backoffMaxIntervalInSec | maximal retry backoff in seconds | 600 |
+
+
+### Batch delivery
+With batch delivery hermes consumer aggregates messages in a batch before sending the batch to the subscriber. There are 3
+configurable thresholds that determine when the batch will be ready to be sent - number of messages in the batch, duration of the batch
+and size of the batch in bytes. Batch is considered ready whenever one of these thresholds is surpassed.
+
+Messages will be sent as an array of JSON messages, e.g.
+
+```json
+[{"foo": "bar1"}, {"foo": "bar2"}, ...]
+```
+
+Options for `subscriptionPolicy`:
+
+| Option | Description | Default value |
+|-------------------------|-----------------------------------------------------------------|---------------|
+| messageTtl | inflight Time To Live in seconds | 3600 |
+| messageBackoff | backoff time between retry attempts in millis | 500 |
+| retryClientErrors | retry on receiving 4xx status | false |
+| requestTimeout | request timeout in millis | 30000 |
+| batchSize | maximum number of messages in a batch | 100 |
+| batchTime | maximum duration in millis for which messages can be aggregated | 30000 |
+| batchVolume | maximum batch size in bytes | 64000 |
+
+#### Limitations
+Following subscription options are not available with batch delivery:
+
+- AVRO subscription contentType
+- BROADCAST mode
+
## Retries
Hermes Consumers have been optimized towards maximizing chances of successful message delivery. Retry policy is
@@ -178,12 +235,11 @@ current_backoff = previous_backoff * backoff_multiplier
```
This has the following consequences:
-Backoff multiplier | Retry policy type
----------------------------------|--------------------------------
-1 | Constant retry backoff
- above 1 | Exponential retry backoff
-
-
+| Backoff multiplier | Retry policy type |
+|--------------------|---------------------------|
+| 1 | Constant retry backoff |
+| above 1 | Exponential retry backoff |
+
The hard limit to current backoff is defined by maximum backoff parameter and by default is equal to 600 s.
It is worth mentioning that the calculation of current backoff is ignored when the **Retry-After** header is used.
@@ -261,6 +317,17 @@ It's ignored by the default implementation.
See [console integration](../configuration/console.md#subscription-configuration) for more information.
+## Mode
+
+Hermes supports two delivery modes:
+
+- ANYCAST - messages will be sent to endpoint returned by `EndpointAddressResolver#resolve`
+- BROADCAST - messages will be sent to endpoint returned by `EndpointAddressResolver#resolveAll`
+
+Example usage of this feature would be to provide `EndpointAddressResolver` implementation which returns any subscriber address (e.g. single
+service instance) for `resolve` and all subscriber addresses for `resolveAll` (e.g. all instances of a service). ANYCAST subscription messages
+would then be delivered to any subscribing service instance and BROADCAST subscription messages would then be delivered to all subscribing service instances.
+
## Message filtering
Each subscription can define set of filters that are going to be applied after receiving message from kafka in order
@@ -271,10 +338,10 @@ of their declaration.
This mainly concerns message content type. Filtering is done *before* any conversion takes place so all messages have
the same content type as topic on which they were published.
-Topic content-type | Filter type
---------------------- | -----------
-avro | avropath
-json | jsonpath
+| Topic content-type | Filter type |
+|--------------------|-------------|
+| avro | avropath |
+| json | jsonpath |
### Matching strategy
@@ -304,12 +371,12 @@ In case when `matchingStrategy` would be set to `any` then all messages with *GB
JsonPath filter is based on popular [library](https://github.com/jayway/JsonPath) of the same name that can query
json documents. In this case it is used as a selector to retrieve value that is later matched by regexp.
-Option | Description
---------------------- | ---------------------------------------------------
-type | type of filter
-path | JsonPath expression to query json document
-matcher | regexp expression to match value from json document
-matchingStrategy | type of matching strategy. Default is `all`
+| Option | Description |
+|------------------|-----------------------------------------------------|
+| type | type of filter |
+| path | JsonPath expression to query json document |
+| matcher | regexp expression to match value from json document |
+| matchingStrategy | type of matching strategy. Default is `all` |
Example:
```
@@ -323,12 +390,12 @@ avro so we decided to introduce very simple dotted path format without any advan
understand if you're familiar with JsonPath. Right now array and basic selectors that point to specific fields are
supported.
-Option | Description
---------------------- | ---------------------------------------------------
-type | type of filter
-path | dotted expression to query avro document. When array selector is used then wildcard sign `*` can be used as index
-matcher | regexp expression to match value from avro document
-matchingStrategy | type of matching strategy. Default is `all`
+| Option | Description |
+|------------------|-------------------------------------------------------------------------------------------------------------------|
+| type | type of filter |
+| path | dotted expression to query avro document. When array selector is used then wildcard sign `*` can be used as index |
+| matcher | regexp expression to match value from avro document |
+| matchingStrategy | type of matching strategy. Default is `all` |
Example:
```
@@ -664,3 +731,32 @@ It returns array of message tracking information in following format:
Sending delay can be defined for each serial subscription. Consumers will wait for a given time before trying to deliver a message.
This might be useful in situations when there are multiple topics that sends events in the same time, but you want to increase
chance that events from one topic will be delivered later than events from another topic.
+
+## Ordering guarantees
+For subscriptions with `SERIAL` deliveryType hermes will deliver `inflightSize` messages concurrently.
+Because of that messages may be delivered out of partition order (unless `inflightSize=1` but this can have poor performance).
+
+With `BATCH` deliveryType messages are guaranteed to be delivered in partition order (batches are sent sequentially).
+
+Note that by default Hermes does not give any guarantees about assigning messages to partitions. To do that, publishers must specify [partition key explicitly](publishing.md#partition-assignment).
+
+When messages are published with `parition-key` and consumed with `BATCH` mode (or `SERIAL` with `inflightSize=1`) they will be ordered as long as they were published to one DC.
+Publishing messages with same `parition-key` to multiple DCs does not guarantee ordering because messages are stored in separate kafka clusters.
+
+## Message duplication
+
+Hermes messages can be duplicated on different levels. Publishers are advised to include an idempotency key in message schema to allow
+consumers to process messages idempotently, relying on Hermes `messageId` is not sufficient.
+
+Scenarios in which subscribes may receive logically duplicated messages:
+
+- Publisher sends a message to Hermes, the request is timed out and retried. In this case both the first and the second message may end up on Kafka and
+they will have different Hermes `messageId`. Both messages are later sent to subscriber.
+- Once message is received by Hermes it is relayed to Kafka. Message may then be duplicated because of Kafka producer level retries. Duplicated messages will
+have the same Hermes `messageId`.
+- When using [remote DC fallback](https://hermes-pubsub.readthedocs.io/en/latest/user/publishing/#remote-dc-fallback), slow messages are speculatively
+sent to remote DC. This may result in a message being duplicated across DCs if both the local and remote message are saved, both copies will have the
+same `messageId`.
+- Even if there is one instance of a given message on a given topic, the message may still be delivered to subscriber multiple times.
+When message is acknowledged by a subscriber its offset is not commited to Kafka synchronously. If Hermes instance is restarted
+after receiving an ack from subscriber but before commiting the message offset to Kafka, the message will be redelivered.
diff --git a/hermes-api/build.gradle b/hermes-api/build.gradle
index c6554f612b..9a3542da0a 100644
--- a/hermes-api/build.gradle
+++ b/hermes-api/build.gradle
@@ -4,20 +4,19 @@ plugins {
}
dependencies {
- api group: 'org.hibernate.validator', name: 'hibernate-validator', version: '8.0.0.Final'
+ api group: 'org.hibernate.validator', name: 'hibernate-validator', version: '8.0.1.Final'
api group: 'jakarta.ws.rs', name: 'jakarta.ws.rs-api', version: '3.1.0'
implementation group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: versions.jackson
api group: 'com.fasterxml.jackson.jakarta.rs', name: 'jackson-jakarta-rs-json-provider', version: versions.jackson
api group: 'com.fasterxml.jackson.datatype', name: 'jackson-datatype-jsr310', version: versions.jackson
implementation group: 'com.google.guava', name: 'guava', version: versions.guava
- api group: 'com.damnhandy', name: 'handy-uri-templates', version: '2.0.2'
+ api group: 'com.damnhandy', name: 'handy-uri-templates', version: '2.1.8'
api group: 'jakarta.xml.bind', name: 'jakarta.xml.bind-api', version: '4.0.0'
- implementation group: 'com.sun.xml.bind', name: 'jaxb-core', version: '4.0.3'
- implementation group: 'com.sun.xml.bind', name: 'jaxb-impl', version: '4.0.3'
- implementation group: 'jakarta.annotation', name: 'jakarta.annotation-api', version: '2.1.1'
-
+ implementation group: 'com.sun.xml.bind', name: 'jaxb-core', version: '4.0.5'
+ implementation group: 'com.sun.xml.bind', name: 'jaxb-impl', version: '4.0.5'
+ implementation group: 'jakarta.annotation', name: 'jakarta.annotation-api', version: '3.0.0'
testImplementation group: 'org.spockframework', name: 'spock-core', version: versions.spock
testImplementation group: 'org.spockframework', name: 'spock-junit4', version: versions.spock
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Anonymizable.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Anonymizable.java
index ca360e371d..272b8ccccb 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Anonymizable.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Anonymizable.java
@@ -1,5 +1,5 @@
package pl.allegro.tech.hermes.api;
public interface Anonymizable {
- Anonymizable anonymize();
+ Anonymizable anonymize();
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/AvroMediaType.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/AvroMediaType.java
index 165befe5a7..6d1faa8844 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/AvroMediaType.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/AvroMediaType.java
@@ -2,7 +2,7 @@
public class AvroMediaType {
- public static final String AVRO_BINARY = "avro/binary";
+ public static final String AVRO_BINARY = "avro/binary";
- public static final String AVRO_JSON = "avro/json";
+ public static final String AVRO_JSON = "avro/json";
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BatchSubscriptionPolicy.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BatchSubscriptionPolicy.java
index 1c06f0ee03..79b26f3bb5 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BatchSubscriptionPolicy.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BatchSubscriptionPolicy.java
@@ -3,202 +3,208 @@
import com.fasterxml.jackson.annotation.JsonCreator;
import com.google.common.base.MoreObjects;
import jakarta.validation.constraints.Min;
-import pl.allegro.tech.hermes.api.helpers.Patch;
-
import java.util.Map;
import java.util.Objects;
+import pl.allegro.tech.hermes.api.helpers.Patch;
public class BatchSubscriptionPolicy {
- private static final int DEFAULT_MESSAGE_TTL = 60;
- private static final int DEFAULT_MESSAGE_BACKOFF = 500;
- private static final int DEFAULT_REQUEST_TIMEOUT = 30 * 1000;
- private static final int DEFAULT_BATCH_SIZE = 100;
- private static final int DEFAULT_BATCH_TIME = 30 * 1000;
- private static final int DEFAULT_BATCH_VOLUME = 64 * 1000;
-
- @Min(0)
- private int messageTtl;
-
- private boolean retryClientErrors;
-
- @Min(0)
- private int messageBackoff;
-
- @Min(1)
- private int requestTimeout;
-
- @Min(1)
- private int batchSize;
-
- @Min(1)
- private int batchTime;
-
- @Min(1)
- private int batchVolume;
-
- private BatchSubscriptionPolicy() {}
-
- public BatchSubscriptionPolicy(int messageTtl,
- boolean retryClientErrors,
- int messageBackoff,
- int requestTimeout,
- int batchSize,
- int batchTime,
- int batchVolume) {
- this.messageTtl = messageTtl;
- this.retryClientErrors = retryClientErrors;
- this.messageBackoff = messageBackoff;
- this.requestTimeout = requestTimeout;
- this.batchSize = batchSize;
- this.batchTime = batchTime;
- this.batchVolume = batchVolume;
+ private static final int DEFAULT_MESSAGE_TTL = 60;
+ private static final int DEFAULT_MESSAGE_BACKOFF = 500;
+ private static final int DEFAULT_REQUEST_TIMEOUT = 30 * 1000;
+ private static final int DEFAULT_BATCH_SIZE = 100;
+ private static final int DEFAULT_BATCH_TIME = 30 * 1000;
+ private static final int DEFAULT_BATCH_VOLUME = 64 * 1000;
+
+ @Min(0)
+ private int messageTtl;
+
+ private boolean retryClientErrors;
+
+ @Min(0)
+ private int messageBackoff;
+
+ @Min(1)
+ private int requestTimeout;
+
+ @Min(1)
+ private int batchSize;
+
+ @Min(1)
+ private int batchTime;
+
+ @Min(1)
+ private int batchVolume;
+
+ private BatchSubscriptionPolicy() {}
+
+ public BatchSubscriptionPolicy(
+ int messageTtl,
+ boolean retryClientErrors,
+ int messageBackoff,
+ int requestTimeout,
+ int batchSize,
+ int batchTime,
+ int batchVolume) {
+ this.messageTtl = messageTtl;
+ this.retryClientErrors = retryClientErrors;
+ this.messageBackoff = messageBackoff;
+ this.requestTimeout = requestTimeout;
+ this.batchSize = batchSize;
+ this.batchTime = batchTime;
+ this.batchVolume = batchVolume;
+ }
+
+ @JsonCreator
+ public static BatchSubscriptionPolicy create(Map properties) {
+ return new BatchSubscriptionPolicy(
+ (Integer) properties.getOrDefault("messageTtl", DEFAULT_MESSAGE_TTL),
+ (Boolean) properties.getOrDefault("retryClientErrors", false),
+ (Integer) properties.getOrDefault("messageBackoff", DEFAULT_MESSAGE_BACKOFF),
+ (Integer) properties.getOrDefault("requestTimeout", DEFAULT_REQUEST_TIMEOUT),
+ (Integer) properties.getOrDefault("batchSize", DEFAULT_BATCH_SIZE),
+ (Integer) properties.getOrDefault("batchTime", DEFAULT_BATCH_TIME),
+ (Integer) properties.getOrDefault("batchVolume", DEFAULT_BATCH_VOLUME));
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ messageTtl,
+ retryClientErrors,
+ messageBackoff,
+ requestTimeout,
+ batchSize,
+ batchTime,
+ batchVolume);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
}
-
- @JsonCreator
- public static BatchSubscriptionPolicy create(Map properties) {
- return new BatchSubscriptionPolicy(
- (Integer) properties.getOrDefault("messageTtl", DEFAULT_MESSAGE_TTL),
- (Boolean) properties.getOrDefault("retryClientErrors", false),
- (Integer) properties.getOrDefault("messageBackoff", DEFAULT_MESSAGE_BACKOFF),
- (Integer) properties.getOrDefault("requestTimeout", DEFAULT_REQUEST_TIMEOUT),
- (Integer) properties.getOrDefault("batchSize", DEFAULT_BATCH_SIZE),
- (Integer) properties.getOrDefault("batchTime", DEFAULT_BATCH_TIME),
- (Integer) properties.getOrDefault("batchVolume", DEFAULT_BATCH_VOLUME)
- );
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ final BatchSubscriptionPolicy other = (BatchSubscriptionPolicy) obj;
+ return Objects.equals(this.messageTtl, other.messageTtl)
+ && Objects.equals(this.retryClientErrors, other.retryClientErrors)
+ && Objects.equals(this.messageBackoff, other.messageBackoff)
+ && Objects.equals(this.requestTimeout, other.requestTimeout)
+ && Objects.equals(this.batchSize, other.batchSize)
+ && Objects.equals(this.batchTime, other.batchTime)
+ && Objects.equals(this.batchVolume, other.batchVolume);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("messageTtl", messageTtl)
+ .add("messageBackoff", messageBackoff)
+ .add("retryClientErrors", retryClientErrors)
+ .add("batchSize", batchSize)
+ .add("batchTime", batchTime)
+ .add("batchVolume", batchVolume)
+ .add("requestTimeout", requestTimeout)
+ .toString();
+ }
+
+ public Integer getMessageTtl() {
+ return messageTtl;
+ }
+
+ public Integer getMessageBackoff() {
+ return messageBackoff;
+ }
+
+ public Boolean isRetryClientErrors() {
+ return retryClientErrors;
+ }
+
+ public Integer getBatchSize() {
+ return batchSize;
+ }
+
+ public Integer getBatchTime() {
+ return batchTime;
+ }
+
+ public Integer getBatchVolume() {
+ return batchVolume;
+ }
+
+ public Integer getRequestTimeout() {
+ return requestTimeout;
+ }
+
+ public static class Builder {
+
+ private BatchSubscriptionPolicy subscriptionPolicy;
+
+ public static Builder batchSubscriptionPolicy() {
+ return new Builder();
}
- @Override
- public int hashCode() {
- return Objects.hash(messageTtl, retryClientErrors, messageBackoff, requestTimeout, batchSize, batchTime, batchVolume);
+ public Builder() {
+ subscriptionPolicy = new BatchSubscriptionPolicy();
}
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
- final BatchSubscriptionPolicy other = (BatchSubscriptionPolicy) obj;
- return Objects.equals(this.messageTtl, other.messageTtl)
- && Objects.equals(this.retryClientErrors, other.retryClientErrors)
- && Objects.equals(this.messageBackoff, other.messageBackoff)
- && Objects.equals(this.requestTimeout, other.requestTimeout)
- && Objects.equals(this.batchSize, other.batchSize)
- && Objects.equals(this.batchTime, other.batchTime)
- && Objects.equals(this.batchVolume, other.batchVolume);
+ public Builder withMessageTtl(int messageTtl) {
+ subscriptionPolicy.messageTtl = messageTtl;
+ return this;
}
- @Override
- public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("messageTtl", messageTtl)
- .add("messageBackoff", messageBackoff)
- .add("retryClientErrors", retryClientErrors)
- .add("batchSize", batchSize)
- .add("batchTime", batchTime)
- .add("batchVolume", batchVolume)
- .add("requestTimeout", requestTimeout)
- .toString();
+ public Builder withMessageBackoff(int messageBackoff) {
+ subscriptionPolicy.messageBackoff = messageBackoff;
+ return this;
}
- public Integer getMessageTtl() {
- return messageTtl;
+ public Builder withClientErrorRetry(boolean retryClientErrors) {
+ subscriptionPolicy.retryClientErrors = retryClientErrors;
+ return this;
}
- public Integer getMessageBackoff() {
- return messageBackoff;
+ public Builder withBatchSize(int batchSize) {
+ subscriptionPolicy.batchSize = batchSize;
+ return this;
}
- public Boolean isRetryClientErrors() {
- return retryClientErrors;
+ public Builder withBatchTime(int batchTime) {
+ subscriptionPolicy.batchTime = batchTime;
+ return this;
}
- public Integer getBatchSize() {
- return batchSize;
+ public Builder withBatchVolume(int batchVolume) {
+ subscriptionPolicy.batchVolume = batchVolume;
+ return this;
}
- public Integer getBatchTime() {
- return batchTime;
+ public Builder withRequestTimeout(int requestTimeout) {
+ subscriptionPolicy.requestTimeout = requestTimeout;
+ return this;
}
- public Integer getBatchVolume() {
- return batchVolume;
+ public BatchSubscriptionPolicy build() {
+ return new BatchSubscriptionPolicy(
+ subscriptionPolicy.messageTtl,
+ subscriptionPolicy.retryClientErrors,
+ subscriptionPolicy.messageBackoff,
+ subscriptionPolicy.requestTimeout,
+ subscriptionPolicy.batchSize,
+ subscriptionPolicy.batchTime,
+ subscriptionPolicy.batchVolume);
}
- public Integer getRequestTimeout() {
- return requestTimeout;
+ public Builder applyDefaults() {
+ return this;
}
- public static class Builder {
-
- private BatchSubscriptionPolicy subscriptionPolicy;
-
- public static Builder batchSubscriptionPolicy() {
- return new Builder();
- }
-
- public Builder() {
- subscriptionPolicy = new BatchSubscriptionPolicy();
- }
-
- public Builder withMessageTtl(int messageTtl) {
- subscriptionPolicy.messageTtl = messageTtl;
- return this;
- }
-
- public Builder withMessageBackoff(int messageBackoff) {
- subscriptionPolicy.messageBackoff = messageBackoff;
- return this;
- }
-
- public Builder withClientErrorRetry(boolean retryClientErrors) {
- subscriptionPolicy.retryClientErrors = retryClientErrors;
- return this;
- }
-
- public Builder withBatchSize(int batchSize) {
- subscriptionPolicy.batchSize = batchSize;
- return this;
- }
-
- public Builder withBatchTime(int batchTime) {
- subscriptionPolicy.batchTime = batchTime;
- return this;
- }
-
- public Builder withBatchVolume(int batchVolume) {
- subscriptionPolicy.batchVolume = batchVolume;
- return this;
- }
-
- public Builder withRequestTimeout(int requestTimeout) {
- subscriptionPolicy.requestTimeout = requestTimeout;
- return this;
- }
-
- public BatchSubscriptionPolicy build() {
- return new BatchSubscriptionPolicy(
- subscriptionPolicy.messageTtl,
- subscriptionPolicy.retryClientErrors,
- subscriptionPolicy.messageBackoff,
- subscriptionPolicy.requestTimeout,
- subscriptionPolicy.batchSize,
- subscriptionPolicy.batchTime,
- subscriptionPolicy.batchVolume);
- }
-
- public Builder applyDefaults() {
- return this;
- }
-
- public Builder applyPatch(PatchData patch) {
- if (patch != null) {
- subscriptionPolicy = Patch.apply(subscriptionPolicy, patch);
- }
- return this;
- }
+ public Builder applyPatch(PatchData patch) {
+ if (patch != null) {
+ subscriptionPolicy = Patch.apply(subscriptionPolicy, patch);
+ }
+ return this;
}
+ }
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BlacklistStatus.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BlacklistStatus.java
index 8be8d8d53b..250b9a54f5 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BlacklistStatus.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/BlacklistStatus.java
@@ -2,39 +2,38 @@
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
-
import java.util.Objects;
public final class BlacklistStatus {
- public static final BlacklistStatus BLACKLISTED = new BlacklistStatus(true);
- public static final BlacklistStatus NOT_BLACKLISTED = new BlacklistStatus(false);
+ public static final BlacklistStatus BLACKLISTED = new BlacklistStatus(true);
+ public static final BlacklistStatus NOT_BLACKLISTED = new BlacklistStatus(false);
- private final boolean blacklisted;
+ private final boolean blacklisted;
- @JsonCreator
- private BlacklistStatus(@JsonProperty("blacklisted") boolean blacklisted) {
- this.blacklisted = blacklisted;
- }
+ @JsonCreator
+ private BlacklistStatus(@JsonProperty("blacklisted") boolean blacklisted) {
+ this.blacklisted = blacklisted;
+ }
- public boolean isBlacklisted() {
- return blacklisted;
- }
+ public boolean isBlacklisted() {
+ return blacklisted;
+ }
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- BlacklistStatus that = (BlacklistStatus) o;
- return blacklisted == that.blacklisted;
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
}
-
- @Override
- public int hashCode() {
- return Objects.hash(blacklisted);
+ if (o == null || getClass() != o.getClass()) {
+ return false;
}
+ BlacklistStatus that = (BlacklistStatus) o;
+ return blacklisted == that.blacklisted;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(blacklisted);
+ }
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Constraints.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Constraints.java
index 60d71c6e40..fcf82f100a 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Constraints.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/Constraints.java
@@ -3,37 +3,36 @@
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import jakarta.validation.constraints.Min;
-
import java.util.Objects;
public class Constraints {
- @Min(1)
- private final int consumersNumber;
+ @Min(1)
+ private final int consumersNumber;
- @JsonCreator
- public Constraints(@JsonProperty("consumersNumber") int consumersNumber) {
- this.consumersNumber = consumersNumber;
- }
+ @JsonCreator
+ public Constraints(@JsonProperty("consumersNumber") int consumersNumber) {
+ this.consumersNumber = consumersNumber;
+ }
- public int getConsumersNumber() {
- return consumersNumber;
- }
+ public int getConsumersNumber() {
+ return consumersNumber;
+ }
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- Constraints that = (Constraints) o;
- return consumersNumber == that.consumersNumber;
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
}
-
- @Override
- public int hashCode() {
- return Objects.hash(consumersNumber);
+ if (o == null || getClass() != o.getClass()) {
+ return false;
}
+ Constraints that = (Constraints) o;
+ return consumersNumber == that.consumersNumber;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(consumersNumber);
+ }
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroup.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroup.java
index d7acd8e37e..61528237df 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroup.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroup.java
@@ -2,62 +2,62 @@
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
-
import java.util.Objects;
import java.util.Set;
public class ConsumerGroup {
- private final String clusterName;
- private final String groupId;
- private final String state;
-
- private final Set members;
-
- @JsonCreator
- public ConsumerGroup(@JsonProperty("clusterName") String clusterName,
- @JsonProperty("groupId") String groupId,
- @JsonProperty("state") String state,
- @JsonProperty("members") Set members) {
- this.clusterName = clusterName;
- this.groupId = groupId;
- this.state = state;
- this.members = members;
- }
-
- public String getClusterName() {
- return clusterName;
- }
-
- public String getGroupId() {
- return groupId;
- }
-
- public String getState() {
- return state;
- }
-
- public Set getMembers() {
- return members;
+ private final String clusterName;
+ private final String groupId;
+ private final String state;
+
+ private final Set members;
+
+ @JsonCreator
+ public ConsumerGroup(
+ @JsonProperty("clusterName") String clusterName,
+ @JsonProperty("groupId") String groupId,
+ @JsonProperty("state") String state,
+ @JsonProperty("members") Set members) {
+ this.clusterName = clusterName;
+ this.groupId = groupId;
+ this.state = state;
+ this.members = members;
+ }
+
+ public String getClusterName() {
+ return clusterName;
+ }
+
+ public String getGroupId() {
+ return groupId;
+ }
+
+ public String getState() {
+ return state;
+ }
+
+ public Set getMembers() {
+ return members;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
}
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- ConsumerGroup that = (ConsumerGroup) o;
- return Objects.equals(clusterName, that.clusterName)
- && Objects.equals(groupId, that.groupId)
- && Objects.equals(state, that.state)
- && Objects.equals(members, that.members);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(clusterName, groupId, state, members);
+ if (o == null || getClass() != o.getClass()) {
+ return false;
}
+ ConsumerGroup that = (ConsumerGroup) o;
+ return Objects.equals(clusterName, that.clusterName)
+ && Objects.equals(groupId, that.groupId)
+ && Objects.equals(state, that.state)
+ && Objects.equals(members, that.members);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(clusterName, groupId, state, members);
+ }
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroupMember.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroupMember.java
index 62348cb9b6..94d0491b9d 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroupMember.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ConsumerGroupMember.java
@@ -2,61 +2,61 @@
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
-
import java.util.Objects;
import java.util.Set;
public class ConsumerGroupMember {
- private final String consumerId;
- private final String clientId;
- private final String host;
-
- private final Set partitions;
-
- @JsonCreator
- public ConsumerGroupMember(@JsonProperty("consumerId") String consumerId,
- @JsonProperty("clientId") String clientId,
- @JsonProperty("host")String host,
- @JsonProperty("partitions") Set partitions) {
- this.consumerId = consumerId;
- this.clientId = clientId;
- this.host = host;
- this.partitions = partitions;
- }
-
- public String getConsumerId() {
- return consumerId;
- }
-
- public String getClientId() {
- return clientId;
- }
-
- public String getHost() {
- return host;
- }
-
- public Set getPartitions() {
- return partitions;
+ private final String consumerId;
+ private final String clientId;
+ private final String host;
+
+ private final Set partitions;
+
+ @JsonCreator
+ public ConsumerGroupMember(
+ @JsonProperty("consumerId") String consumerId,
+ @JsonProperty("clientId") String clientId,
+ @JsonProperty("host") String host,
+ @JsonProperty("partitions") Set partitions) {
+ this.consumerId = consumerId;
+ this.clientId = clientId;
+ this.host = host;
+ this.partitions = partitions;
+ }
+
+ public String getConsumerId() {
+ return consumerId;
+ }
+
+ public String getClientId() {
+ return clientId;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public Set getPartitions() {
+ return partitions;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
}
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- ConsumerGroupMember that = (ConsumerGroupMember) o;
- return Objects.equals(consumerId, that.consumerId)
- && Objects.equals(clientId, that.clientId)
- && Objects.equals(host, that.host)
- && Objects.equals(partitions, that.partitions);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(consumerId, clientId, host, partitions);
+ if (o == null || getClass() != o.getClass()) {
+ return false;
}
+ ConsumerGroupMember that = (ConsumerGroupMember) o;
+ return Objects.equals(consumerId, that.consumerId)
+ && Objects.equals(clientId, that.clientId)
+ && Objects.equals(host, that.host)
+ && Objects.equals(partitions, that.partitions);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(consumerId, clientId, host, partitions);
+ }
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ContentType.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ContentType.java
index 383f1cdf47..1980a07a1e 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ContentType.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/ContentType.java
@@ -1,5 +1,6 @@
package pl.allegro.tech.hermes.api;
public enum ContentType {
- JSON, AVRO
+ JSON,
+ AVRO
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/CrowdGroupDescription.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/CrowdGroupDescription.java
deleted file mode 100644
index 8e0a65db96..0000000000
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/CrowdGroupDescription.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package pl.allegro.tech.hermes.api;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-@JsonIgnoreProperties(ignoreUnknown = true)
-public class CrowdGroupDescription {
-
- private final String name;
-
- public CrowdGroupDescription(@JsonProperty("name") String name) {
- this.name = name;
- }
-
- public String getName() {
- return name;
- }
-}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/CrowdGroups.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/CrowdGroups.java
deleted file mode 100644
index 16bc708a10..0000000000
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/CrowdGroups.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package pl.allegro.tech.hermes.api;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import java.util.List;
-
-@JsonIgnoreProperties(ignoreUnknown = true)
-public class CrowdGroups {
-
- private final List crowdGroupDescriptions;
-
- public CrowdGroups(@JsonProperty("groups") List crowdGroupDescriptions) {
- this.crowdGroupDescriptions = crowdGroupDescriptions;
- }
-
- public List getCrowdGroupDescriptions() {
- return crowdGroupDescriptions;
- }
-}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DatacenterReadiness.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DatacenterReadiness.java
index 024c9d32b1..279bffc33e 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DatacenterReadiness.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DatacenterReadiness.java
@@ -2,57 +2,52 @@
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
-
import java.util.Objects;
public class DatacenterReadiness {
- private final String datacenter;
- private final ReadinessStatus status;
-
- @JsonCreator
- public DatacenterReadiness(@JsonProperty("datacenter") String datacenter,
- @JsonProperty("status") ReadinessStatus status) {
- this.datacenter = datacenter;
- this.status = status;
- }
-
- public String getDatacenter() {
- return datacenter;
- }
-
- public ReadinessStatus getStatus() {
- return status;
- }
-
- @Override
- public String toString() {
- return "DatacenterReadiness{"
- + "datacenter='" + datacenter + '\''
- + ", status=" + status
- + '}';
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (!(o instanceof DatacenterReadiness)) {
- return false;
- }
- DatacenterReadiness that = (DatacenterReadiness) o;
- return status == that.status
- && Objects.equals(datacenter, that.datacenter);
+ private final String datacenter;
+ private final ReadinessStatus status;
+
+ @JsonCreator
+ public DatacenterReadiness(
+ @JsonProperty("datacenter") String datacenter,
+ @JsonProperty("status") ReadinessStatus status) {
+ this.datacenter = datacenter;
+ this.status = status;
+ }
+
+ public String getDatacenter() {
+ return datacenter;
+ }
+
+ public ReadinessStatus getStatus() {
+ return status;
+ }
+
+ @Override
+ public String toString() {
+ return "DatacenterReadiness{" + "datacenter='" + datacenter + '\'' + ", status=" + status + '}';
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
}
-
- @Override
- public int hashCode() {
- return Objects.hash(datacenter, status);
- }
-
- public enum ReadinessStatus {
- READY,
- NOT_READY,
- UNDEFINED
+ if (!(o instanceof DatacenterReadiness)) {
+ return false;
}
+ DatacenterReadiness that = (DatacenterReadiness) o;
+ return status == that.status && Objects.equals(datacenter, that.datacenter);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(datacenter, status);
+ }
+
+ public enum ReadinessStatus {
+ READY,
+ NOT_READY
+ }
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DeliveryType.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DeliveryType.java
index 9edad2adb3..ade2c08947 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DeliveryType.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/DeliveryType.java
@@ -1,5 +1,6 @@
package pl.allegro.tech.hermes.api;
public enum DeliveryType {
- SERIAL, BATCH
+ SERIAL,
+ BATCH
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddress.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddress.java
index 140a932de1..0dfdeacff9 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddress.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddress.java
@@ -5,145 +5,145 @@
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
-import pl.allegro.tech.hermes.api.jackson.EndpointAddressDeserializer;
-import pl.allegro.tech.hermes.api.jackson.EndpointAddressSerializer;
-
import java.net.URI;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import pl.allegro.tech.hermes.api.jackson.EndpointAddressDeserializer;
+import pl.allegro.tech.hermes.api.jackson.EndpointAddressSerializer;
@JsonDeserialize(using = EndpointAddressDeserializer.class)
@JsonSerialize(using = EndpointAddressSerializer.class)
public class EndpointAddress implements Anonymizable {
- private static final String ANONYMIZED_PASSWORD = "*****";
+ private static final String ANONYMIZED_PASSWORD = "*****";
- private static final Pattern URL_PATTERN = Pattern.compile("([a-zA-Z0-9]*)://(([a-zA-Z0-9\\.\\~\\-\\_]*):(.*)@)?(.*)");
+ private static final Pattern URL_PATTERN =
+ Pattern.compile("([a-zA-Z0-9]*)://(([a-zA-Z0-9\\.\\~\\-\\_]*):(.*)@)?(.*)");
- private static final int PROTOCOL_GROUP = 1;
+ private static final int PROTOCOL_GROUP = 1;
- private static final int ADDRESS_GROUP = 5;
+ private static final int ADDRESS_GROUP = 5;
- private static final int USER_INFO_GROUP = 2;
+ private static final int USER_INFO_GROUP = 2;
- private static final int USERNAME_GROUP = 3;
+ private static final int USERNAME_GROUP = 3;
- private static final int PASSWORD_GROUP = 4;
+ private static final int PASSWORD_GROUP = 4;
- private final boolean containsCredentials;
+ private final boolean containsCredentials;
- private final String protocol;
+ private final String protocol;
- private final String username;
+ private final String username;
- private final String password;
+ private final String password;
- private final String endpoint;
+ private final String endpoint;
- private final String rawEndpoint;
+ private final String rawEndpoint;
- public EndpointAddress(String endpoint) {
- this.rawEndpoint = endpoint;
+ public EndpointAddress(String endpoint) {
+ this.rawEndpoint = endpoint;
- Matcher matcher = URL_PATTERN.matcher(endpoint);
- if (matcher.matches()) {
- this.protocol = matcher.group(PROTOCOL_GROUP);
- this.containsCredentials = !Strings.isNullOrEmpty(matcher.group(USER_INFO_GROUP));
+ Matcher matcher = URL_PATTERN.matcher(endpoint);
+ if (matcher.matches()) {
+ this.protocol = matcher.group(PROTOCOL_GROUP);
+ this.containsCredentials = !Strings.isNullOrEmpty(matcher.group(USER_INFO_GROUP));
- this.username = containsCredentials ? matcher.group(USERNAME_GROUP) : null;
- this.password = containsCredentials ? matcher.group(PASSWORD_GROUP) : null;
+ this.username = containsCredentials ? matcher.group(USERNAME_GROUP) : null;
+ this.password = containsCredentials ? matcher.group(PASSWORD_GROUP) : null;
- this.endpoint = containsCredentials ? protocol + "://" + matcher.group(ADDRESS_GROUP) : endpoint;
- } else {
- this.protocol = null;
- this.containsCredentials = false;
- this.username = null;
- this.password = null;
- this.endpoint = endpoint;
- }
+ this.endpoint =
+ containsCredentials ? protocol + "://" + matcher.group(ADDRESS_GROUP) : endpoint;
+ } else {
+ this.protocol = null;
+ this.containsCredentials = false;
+ this.username = null;
+ this.password = null;
+ this.endpoint = endpoint;
}
+ }
- private EndpointAddress(String protocol, String endpoint, String username) {
- this.protocol = protocol;
- this.endpoint = endpoint;
- this.containsCredentials = true;
- this.username = username;
- this.password = ANONYMIZED_PASSWORD;
+ private EndpointAddress(String protocol, String endpoint, String username) {
+ this.protocol = protocol;
+ this.endpoint = endpoint;
+ this.containsCredentials = true;
+ this.username = username;
+ this.password = ANONYMIZED_PASSWORD;
- this.rawEndpoint = protocol + "://" + username + ":" + password + "@" + endpoint.replace(protocol + "://", "");
- }
+ this.rawEndpoint =
+ protocol + "://" + username + ":" + password + "@" + endpoint.replace(protocol + "://", "");
+ }
- public static EndpointAddress of(String endpoint) {
- return new EndpointAddress(endpoint);
- }
+ public static EndpointAddress of(String endpoint) {
+ return new EndpointAddress(endpoint);
+ }
- public static EndpointAddress of(URI endpoint) {
- return new EndpointAddress(endpoint.toString());
- }
+ public static EndpointAddress of(URI endpoint) {
+ return new EndpointAddress(endpoint.toString());
+ }
- public static String extractProtocolFromAddress(String endpoint) {
- Preconditions.checkArgument(endpoint.indexOf(':') != -1);
+ public static String extractProtocolFromAddress(String endpoint) {
+ Preconditions.checkArgument(endpoint.indexOf(':') != -1);
- return endpoint.substring(0, endpoint.indexOf(':'));
- }
+ return endpoint.substring(0, endpoint.indexOf(':'));
+ }
- public String getEndpoint() {
- return endpoint;
- }
+ public String getEndpoint() {
+ return endpoint;
+ }
- public String getRawEndpoint() {
- return rawEndpoint;
- }
+ public String getRawEndpoint() {
+ return rawEndpoint;
+ }
- public URI getUri() {
- return URI.create(endpoint);
- }
+ public URI getUri() {
+ return URI.create(endpoint);
+ }
- public String getProtocol() {
- return protocol;
- }
+ public String getProtocol() {
+ return protocol;
+ }
- @Override
- public int hashCode() {
- return Objects.hash(rawEndpoint);
- }
+ @Override
+ public int hashCode() {
+ return Objects.hash(rawEndpoint);
+ }
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
- final EndpointAddress other = (EndpointAddress) obj;
- return Objects.equals(this.rawEndpoint, other.rawEndpoint);
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
}
-
- @Override
- public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("endpoint", endpoint)
- .toString();
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
}
+ final EndpointAddress other = (EndpointAddress) obj;
+ return Objects.equals(this.rawEndpoint, other.rawEndpoint);
+ }
- public boolean containsCredentials() {
- return containsCredentials;
- }
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("endpoint", endpoint).toString();
+ }
- public String getPassword() {
- return password;
- }
+ public boolean containsCredentials() {
+ return containsCredentials;
+ }
- public String getUsername() {
- return username;
- }
+ public String getPassword() {
+ return password;
+ }
- public EndpointAddress anonymize() {
- if (containsCredentials) {
- return new EndpointAddress(protocol, endpoint, username);
- }
- return this;
- }
+ public String getUsername() {
+ return username;
+ }
+
+ public EndpointAddress anonymize() {
+ if (containsCredentials) {
+ return new EndpointAddress(protocol, endpoint, username);
+ }
+ return this;
+ }
}
diff --git a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddressResolverMetadata.java b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddressResolverMetadata.java
index 8cd3f20bd8..cf863ee8d9 100644
--- a/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddressResolverMetadata.java
+++ b/hermes-api/src/main/java/pl/allegro/tech/hermes/api/EndpointAddressResolverMetadata.java
@@ -7,7 +7,6 @@
import com.fasterxml.jackson.databind.ser.std.StdSerializer;
import com.google.common.collect.ImmutableMap;
import jakarta.validation.constraints.NotNull;
-
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
@@ -15,81 +14,84 @@
import java.util.Objects;
import java.util.Optional;
-@JsonSerialize(using = EndpointAddressResolverMetadata.EndpointAddressResolverMetadataSerializer.class)
+@JsonSerialize(
+ using = EndpointAddressResolverMetadata.EndpointAddressResolverMetadataSerializer.class)
public class EndpointAddressResolverMetadata {
- private static final EndpointAddressResolverMetadata EMPTY_INSTANCE = new EndpointAddressResolverMetadata(Collections.emptyMap());
+ private static final EndpointAddressResolverMetadata EMPTY_INSTANCE =
+ new EndpointAddressResolverMetadata(Collections.emptyMap());
- @NotNull
- private Map entries;
+ @NotNull private Map entries;
- @JsonCreator
- public EndpointAddressResolverMetadata(Map entries) {
- this.entries = ImmutableMap.copyOf(entries);
- }
+ @JsonCreator
+ public EndpointAddressResolverMetadata(Map entries) {
+ this.entries = ImmutableMap.copyOf(entries);
+ }
- public static EndpointAddressResolverMetadata empty() {
- return EMPTY_INSTANCE;
- }
+ public static EndpointAddressResolverMetadata empty() {
+ return EMPTY_INSTANCE;
+ }
- public static Builder endpointAddressResolverMetadata() {
- return new Builder();
- }
+ public static Builder endpointAddressResolverMetadata() {
+ return new Builder();
+ }
- public Optional