Merge pull request #11910 from id/1109-sync-master-to-r54

sync master to r54
This commit is contained in:
Ivan Dyachkov 2023-11-09 15:14:29 +01:00 committed by GitHub
commit 6fc2b7799f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
243 changed files with 21410 additions and 5418 deletions

View File

@ -18,7 +18,7 @@ services:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
kdc:
hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04
image: ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04
container_name: kdc.emqx.net
expose:
- 88 # kdc

View File

@ -3,17 +3,17 @@ version: '3.9'
services:
erlang:
container_name: erlang
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04}
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04}
env_file:
- conf.env
environment:
GITHUB_ACTIONS: ${GITHUB_ACTIONS}
GITHUB_TOKEN: ${GITHUB_TOKEN}
GITHUB_RUN_ID: ${GITHUB_RUN_ID}
GITHUB_SHA: ${GITHUB_SHA}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}
GITHUB_REF: ${GITHUB_REF}
GITHUB_ACTIONS: ${GITHUB_ACTIONS:-}
GITHUB_TOKEN: ${GITHUB_TOKEN:-}
GITHUB_RUN_ID: ${GITHUB_RUN_ID:-}
GITHUB_SHA: ${GITHUB_SHA:-}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER:-}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME:-}
GITHUB_REF: ${GITHUB_REF:-}
networks:
- emqx_bridge
ports:

View File

@ -11,7 +11,7 @@ Please convert it to a draft if any of the following conditions are not met. Rev
- [ ] Added tests for the changes
- [ ] Added property-based tests for code which performs user input validation
- [ ] Changed lines covered in coverage report
- [ ] Change log has been added to `changes/(ce|ee)/(feat|perf|fix)-<PR-id>.en.md` files
- [ ] Change log has been added to `changes/(ce|ee)/(feat|perf|fix|breaking)-<PR-id>.en.md` files
- [ ] For internal contributor: there is a jira ticket to track this change
- [ ] Created PR to [emqx-docs](https://github.com/emqx/emqx-docs) if documentation update is required, or link to a follow-up jira ticket
- [ ] Schema changes are backward compatible

View File

@ -16,17 +16,16 @@ env:
jobs:
sanity-checks:
runs-on: ${{ fromJSON(github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]') }}
container: "ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04"
runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04"
outputs:
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
ct-host: ${{ steps.matrix.outputs.ct-host }}
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
version-emqx: ${{ steps.matrix.outputs.version-emqx }}
version-emqx-enterprise: ${{ steps.matrix.outputs.version-emqx-enterprise }}
runner_labels: ${{ github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]' }}
builder: "ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04"
builder_vsn: "5.1-4"
builder: "ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04"
builder_vsn: "5.2-3"
otp_vsn: "25.3.2-2"
elixir_vsn: "1.14.5"
@ -93,12 +92,12 @@ jobs:
MATRIX="$(echo "${APPS}" | jq -c '
[
(.[] | select(.profile == "emqx") | . + {
builder: "5.1-4",
builder: "5.2-3",
otp: "25.3.2-2",
elixir: "1.14.5"
}),
(.[] | select(.profile == "emqx-enterprise") | . + {
builder: "5.1-4",
builder: "5.2-3",
otp: ["25.3.2-2"][],
elixir: "1.14.5"
})
@ -115,7 +114,7 @@ jobs:
echo "version-emqx-enterprise=$(./pkg-vsn.sh emqx-enterprise)" | tee -a $GITHUB_OUTPUT
compile:
runs-on: ${{ fromJSON(needs.sanity-checks.outputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral-xl","linux","x64"]') }}
container: ${{ needs.sanity-checks.outputs.builder }}
needs:
- sanity-checks
@ -154,7 +153,6 @@ jobs:
- compile
uses: ./.github/workflows/run_emqx_app_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
before_ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }}
after_ref: ${{ github.sha }}
@ -165,7 +163,6 @@ jobs:
- compile
uses: ./.github/workflows/run_test_cases.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
ct-matrix: ${{ needs.sanity-checks.outputs.ct-matrix }}
ct-host: ${{ needs.sanity-checks.outputs.ct-host }}
@ -177,7 +174,6 @@ jobs:
- compile
uses: ./.github/workflows/static_checks.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
ct-matrix: ${{ needs.sanity-checks.outputs.ct-matrix }}
@ -186,7 +182,6 @@ jobs:
- sanity-checks
uses: ./.github/workflows/build_slim_packages.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
builder_vsn: ${{ needs.sanity-checks.outputs.builder_vsn }}
otp_vsn: ${{ needs.sanity-checks.outputs.otp_vsn }}
@ -197,7 +192,6 @@ jobs:
- sanity-checks
uses: ./.github/workflows/build_docker_for_test.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
otp_vsn: ${{ needs.sanity-checks.outputs.otp_vsn }}
elixir_vsn: ${{ needs.sanity-checks.outputs.elixir_vsn }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
@ -208,8 +202,6 @@ jobs:
- sanity-checks
- build_slim_packages
uses: ./.github/workflows/spellcheck.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
run_conf_tests:
needs:
@ -217,7 +209,6 @@ jobs:
- compile
uses: ./.github/workflows/run_conf_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
check_deps_integrity:
@ -225,7 +216,6 @@ jobs:
- sanity-checks
uses: ./.github/workflows/check_deps_integrity.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
run_jmeter_tests:
@ -234,7 +224,6 @@ jobs:
- build_docker_for_test
uses: ./.github/workflows/run_jmeter_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
run_docker_tests:
@ -243,7 +232,6 @@ jobs:
- build_docker_for_test
uses: ./.github/workflows/run_docker_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
version-emqx-enterprise: ${{ needs.sanity-checks.outputs.version-emqx-enterprise }}
@ -253,6 +241,5 @@ jobs:
- build_docker_for_test
uses: ./.github/workflows/run_helm_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
version-emqx-enterprise: ${{ needs.sanity-checks.outputs.version-emqx-enterprise }}

View File

@ -19,8 +19,8 @@ env:
jobs:
prepare:
runs-on: ${{ fromJSON(github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]') }}
container: 'ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04'
runs-on: ubuntu-22.04
container: 'ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04'
outputs:
profile: ${{ steps.parse-git-ref.outputs.profile }}
release: ${{ steps.parse-git-ref.outputs.release }}
@ -29,9 +29,8 @@ jobs:
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
ct-host: ${{ steps.matrix.outputs.ct-host }}
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
runner_labels: ${{ github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]' }}
builder: 'ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04'
builder_vsn: '5.1-4'
builder: 'ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04'
builder_vsn: '5.2-3'
otp_vsn: '25.3.2-2'
elixir_vsn: '1.14.5'
@ -63,12 +62,12 @@ jobs:
MATRIX="$(echo "${APPS}" | jq -c '
[
(.[] | select(.profile == "emqx") | . + {
builder: "5.1-4",
builder: "5.2-3",
otp: "25.3.2-2",
elixir: "1.14.5"
}),
(.[] | select(.profile == "emqx-enterprise") | . + {
builder: "5.1-4",
builder: "5.2-3",
otp: ["25.3.2-2"][],
elixir: "1.14.5"
})
@ -108,7 +107,6 @@ jobs:
otp_vsn: ${{ needs.prepare.outputs.otp_vsn }}
elixir_vsn: ${{ needs.prepare.outputs.elixir_vsn }}
builder_vsn: ${{ needs.prepare.outputs.builder_vsn }}
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
secrets: inherit
build_slim_packages:
@ -117,7 +115,6 @@ jobs:
- prepare
uses: ./.github/workflows/build_slim_packages.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
builder_vsn: ${{ needs.prepare.outputs.builder_vsn }}
otp_vsn: ${{ needs.prepare.outputs.otp_vsn }}
@ -125,7 +122,7 @@ jobs:
compile:
if: needs.prepare.outputs.release != 'true'
runs-on: ${{ fromJSON(needs.prepare.outputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ needs.prepare.outputs.builder }}
needs:
- prepare
@ -164,7 +161,6 @@ jobs:
- compile
uses: ./.github/workflows/run_emqx_app_tests.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
before_ref: ${{ github.event.before }}
after_ref: ${{ github.sha }}
@ -176,7 +172,6 @@ jobs:
- compile
uses: ./.github/workflows/run_test_cases.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
ct-matrix: ${{ needs.prepare.outputs.ct-matrix }}
ct-host: ${{ needs.prepare.outputs.ct-host }}
@ -189,7 +184,6 @@ jobs:
- compile
uses: ./.github/workflows/run_conf_tests.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
static_checks:
@ -199,6 +193,5 @@ jobs:
- compile
uses: ./.github/workflows/static_checks.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
ct-matrix: ${{ needs.prepare.outputs.ct-matrix }}

View File

@ -28,9 +28,6 @@ on:
builder_vsn:
required: true
type: string
runner_labels:
required: true
type: string
secrets:
DOCKER_HUB_USER:
required: true
@ -69,18 +66,14 @@ on:
builder_vsn:
required: false
type: string
default: '5.1-4'
runner_labels:
required: false
type: string
default: '["self-hosted","ephemeral","linux","x64"]'
default: '5.2-3'
permissions:
contents: read
jobs:
docker:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
strategy:
fail-fast: false

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
otp_vsn:
required: true
type: string
@ -28,7 +25,7 @@ permissions:
jobs:
docker:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
env:
EMQX_NAME: ${{ matrix.profile }}
PKG_VSN: ${{ startsWith(matrix.profile, 'emqx-enterprise') && inputs.version-emqx-enterprise || inputs.version-emqx }}

View File

@ -62,7 +62,7 @@ on:
builder_vsn:
required: false
type: string
default: '5.1-4'
default: '5.2-3'
jobs:
windows:
@ -115,6 +115,7 @@ jobs:
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
mac:
strategy:
@ -149,9 +150,10 @@ jobs:
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
linux:
runs-on: ['self-hosted', 'ephemeral', 'linux', "${{ matrix.arch }}"]
runs-on: [self-hosted, ephemeral, linux, "${{ matrix.arch }}"]
# always run in builder container because the host might have the wrong OTP version etc.
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
container:
@ -199,8 +201,6 @@ jobs:
shell: bash
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.ref }}
@ -246,6 +246,7 @@ jobs:
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
publish_artifacts:
runs-on: ubuntu-latest

View File

@ -12,7 +12,7 @@ on:
jobs:
linux:
if: github.repository_owner == 'emqx'
runs-on: ['self-hosted', 'ephemeral', 'linux', "${{ matrix.arch }}"]
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container:
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
@ -21,7 +21,6 @@ jobs:
matrix:
profile:
- ['emqx', 'master']
- ['emqx-enterprise', 'release-52']
- ['emqx-enterprise', 'release-53']
otp:
- 25.3.2-2
@ -32,7 +31,7 @@ jobs:
- ubuntu22.04
- amzn2023
builder:
- 5.1-4
- 5.2-3
elixir:
- 1.14.5
@ -77,6 +76,7 @@ jobs:
with:
name: ${{ matrix.profile[0] }}
path: _packages/${{ matrix.profile[0] }}/
retention-days: 7
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
@ -100,7 +100,6 @@ jobs:
otp:
- 25.3.2-2
os:
- macos-13
- macos-12-arm64
steps:

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -27,18 +24,14 @@ on:
inputs:
ref:
required: false
runner_labels:
required: false
type: string
default: '["self-hosted","ephemeral", "linux", "x64"]'
builder:
required: false
type: string
default: 'ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04'
default: 'ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04'
builder_vsn:
required: false
type: string
default: '5.1-4'
default: '5.2-3'
otp_vsn:
required: false
type: string
@ -50,7 +43,7 @@ on:
jobs:
linux:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
env:
EMQX_NAME: ${{ matrix.profile[0] }}
@ -113,7 +106,6 @@ jobs:
otp:
- ${{ inputs.otp_vsn }}
os:
- macos-11
- macos-12-arm64
runs-on: ${{ matrix.os }}

View File

@ -3,9 +3,6 @@ name: Check integrity of rebar and mix dependencies
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -15,7 +12,7 @@ permissions:
jobs:
check_deps_integrity:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
steps:
- uses: actions/checkout@v3

View File

@ -14,13 +14,13 @@ permissions:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
timeout-minutes: 360
permissions:
actions: read
security-events: write
container:
image: ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04
image: ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04
strategy:
fail-fast: false

View File

@ -17,7 +17,7 @@ permissions:
jobs:
rerun-failed-jobs:
if: github.repository_owner == 'emqx'
runs-on: ['self-hosted', 'linux', 'x64', 'ephemeral']
runs-on: ubuntu-22.04
permissions:
checks: read
actions: write

View File

@ -26,7 +26,7 @@ jobs:
prepare:
runs-on: ubuntu-latest
if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04
container: ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu20.04
outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -19,7 +16,7 @@ permissions:
jobs:
run_conf_tests:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
strategy:
fail-fast: false
@ -48,4 +45,4 @@ jobs:
with:
name: logs-${{ matrix.profile }}
path: _build/${{ matrix.profile }}/rel/emqx/logs
retention-days: 7

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
version-emqx:
required: true
type: string
@ -22,7 +19,7 @@ permissions:
jobs:
basic-tests:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
defaults:
run:
shell: bash
@ -66,7 +63,7 @@ jobs:
docker compose rm -fs
paho-mqtt-testing:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
defaults:
run:
shell: bash

View File

@ -10,9 +10,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -31,7 +28,7 @@ permissions:
jobs:
run_emqx_app_tests:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
defaults:
@ -66,3 +63,4 @@ jobs:
with:
name: logs-emqx-app-tests
path: apps/emqx/_build/test/logs
retention-days: 7

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
version-emqx:
required: true
type: string
@ -22,7 +19,7 @@ permissions:
jobs:
helm_test:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
defaults:
run:
shell: bash
@ -40,7 +37,10 @@ jobs:
profile:
- emqx
- emqx-enterprise
rpc:
- tcp
- ssl1.3
- ssl1.2
steps:
- uses: actions/checkout@v3
with:
@ -56,6 +56,40 @@ jobs:
echo "${stderr}";
exit 1;
fi
- name: Prepare emqxConfig.EMQX_RPC using TCP
working-directory: source
if: matrix.rpc == 'tcp'
run: |
cat > rpc-overrides.yaml <<EOL
emqxConfig:
EMQX_RPC__PROTOCOL: tcp
EOL
- name: Prepare emqxConfig.EMQX_RPC using ssl1.3
working-directory: source
if: matrix.rpc == 'ssl1.3'
run: |
cat > rpc-overrides.yaml <<EOL
emqxConfig:
EMQX_RPC__PROTOCOL: ssl
EMQX_RPC__CERTFILE: /opt/emqx/etc/certs/cert.pem
EMQX_RPC__KEYFILE: /opt/emqx/etc/certs/key.pem
EMQX_RPC__CACERTFILE: /opt/emqx/etc/certs/cacert.pem
EMQX_RPC__CIPHERS: TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256
EMQX_RPC__TLS_VERSIONS: "[tlsv1.3]"
EOL
- name: Prepare emqxConfig.EMQX_RPC using ssl1.2
working-directory: source
if: matrix.rpc == 'ssl1.2'
run: |
cat > rpc-overrides.yaml <<EOL
emqxConfig:
EMQX_RPC__PROTOCOL: ssl
EMQX_RPC__CERTFILE: /opt/emqx/etc/certs/cert.pem
EMQX_RPC__KEYFILE: /opt/emqx/etc/certs/key.pem
EMQX_RPC__CACERTFILE: /opt/emqx/etc/certs/cacert.pem
EMQX_RPC__CIPHERS: TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256
EMQX_RPC__TLS_VERSIONS: "[tlsv1.2]"
EOL
- name: run emqx on chart (k8s)
if: matrix.discovery == 'k8s'
working-directory: source
@ -72,7 +106,9 @@ jobs:
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_LOG__CONSOLE__LEVEL=debug \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
--values rpc-overrides.yaml \
deploy/charts/${EMQX_NAME} \
--debug
- name: run emqx on chart (dns)
@ -90,8 +126,11 @@ jobs:
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_LOG__CONSOLE__LEVEL=debug \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
--values rpc-overrides.yaml \
deploy/charts/${EMQX_NAME} \
--wait \
--debug
- name: waiting emqx started
timeout-minutes: 5
@ -104,12 +143,13 @@ jobs:
echo "waiting emqx started";
sleep 10;
done
- name: Get Token
- name: Setup 18083 port forwarding
run: |
nohup kubectl port-forward service/${EMQX_NAME} 18083:18083 > /dev/null &
- name: Get auth token
run: |
kubectl port-forward service/${EMQX_NAME} 18083:18083 > /dev/null &
curl --head -X GET --retry 10 --retry-connrefused --retry-delay 6 http://localhost:18083/status
echo "TOKEN=$(curl --silent -X 'POST' 'http://127.0.0.1:18083/api/v5/login' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"username": "admin","password": "public"}' | jq -r ".token")" >> $GITHUB_ENV
- name: Check cluster
timeout-minutes: 1
run: |
@ -117,8 +157,13 @@ jobs:
nodes_length="$(curl --silent -H "Authorization: Bearer $TOKEN" -X GET http://127.0.0.1:18083/api/v5/cluster| jq '.nodes|length')"
[ $nodes_length != "3" ]
do
echo "waiting ${EMQX_NAME} cluster scale. Current live nodes: $nodes_length."
sleep 1
if [ $nodes_length -eq 0 ]; then
echo "node len must >= 1, refresh Token... "
TOKEN=$(curl --silent -X 'POST' 'http://127.0.0.1:18083/api/v5/login' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"username": "admin","password": "public"}' | jq -r ".token")
else
echo "waiting ${EMQX_NAME} cluster scale. Current live nodes: $nodes_length."
fi
sleep 1;
done
- uses: actions/checkout@v3
with:

View File

@ -3,16 +3,13 @@ name: JMeter integration tests
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
version-emqx:
required: true
type: string
jobs:
jmeter_artifact:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps:
- name: Cache Jmeter
id: cache-jmeter
@ -39,9 +36,10 @@ jobs:
with:
name: apache-jmeter.tgz
path: /tmp/apache-jmeter.tgz
retention-days: 3
advanced_feat:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -90,9 +88,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
pgsql_authn_authz:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -156,9 +155,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
mysql_authn_authz:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -215,9 +215,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
JWT_authn:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -266,9 +267,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
built_in_database_authn_authz:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -309,3 +311,4 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner:
required: true
type: string
builder:
required: true
type: string
@ -19,7 +16,7 @@ permissions:
jobs:
relup_test_plan:
runs-on: ["${{ inputs.runner }}", 'linux', 'x64', 'ephemeral']
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
outputs:
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
@ -57,12 +54,13 @@ jobs:
_packages
scripts
.ci
retention-days: 7
relup_test_run:
needs:
- relup_test_plan
if: needs.relup_test_plan.outputs.OLD_VERSIONS != '[]'
runs-on: ["${{ inputs.runner }}", 'linux', 'x64', 'ephemeral']
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
strategy:
fail-fast: false
matrix:
@ -120,3 +118,4 @@ jobs:
name: debug_data
path: |
lux_logs
retention-days: 3

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -28,7 +25,7 @@ env:
jobs:
eunit_and_proper:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "eunit_and_proper (${{ matrix.profile }})"
strategy:
fail-fast: false
@ -52,6 +49,7 @@ jobs:
- name: eunit
env:
PROFILE: ${{ matrix.profile }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: make eunit
@ -59,6 +57,7 @@ jobs:
- name: proper
env:
PROFILE: ${{ matrix.profile }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: make proper
@ -66,10 +65,11 @@ jobs:
with:
name: coverdata
path: _build/test/cover
retention-days: 7
ct_docker:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
name: "ct_docker (${{ matrix.app }}-${{ matrix.suitegroup }})"
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "${{ matrix.app }}-${{ matrix.suitegroup }} (${{ matrix.profile }})"
strategy:
fail-fast: false
matrix:
@ -102,12 +102,14 @@ jobs:
MINIO_TAG: "RELEASE.2023-03-20T20-16-18Z"
PROFILE: ${{ matrix.profile }}
SUITEGROUP: ${{ matrix.suitegroup }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}
- uses: actions/upload-artifact@v3
with:
name: coverdata
path: _build/test/cover
retention-days: 7
- name: compress logs
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
@ -116,10 +118,11 @@ jobs:
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
path: logs.tar.gz
retention-days: 7
ct:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
name: "ct (${{ matrix.app }}-${{ matrix.suitegroup }})"
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "${{ matrix.app }}-${{ matrix.suitegroup }} (${{ matrix.profile }})"
strategy:
fail-fast: false
matrix:
@ -144,6 +147,7 @@ jobs:
env:
PROFILE: ${{ matrix.profile }}
SUITEGROUP: ${{ matrix.suitegroup }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
run: |
make "${{ matrix.app }}-ct"
@ -152,6 +156,7 @@ jobs:
name: coverdata
path: _build/test/cover
if-no-files-found: warn # do not fail if no coverdata found
retention-days: 7
- name: compress logs
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
@ -160,13 +165,14 @@ jobs:
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
path: logs.tar.gz
retention-days: 7
tests_passed:
needs:
- eunit_and_proper
- ct
- ct_docker
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ubuntu-22.04
strategy:
fail-fast: false
steps:
@ -177,7 +183,7 @@ jobs:
- eunit_and_proper
- ct
- ct_docker
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
strategy:
fail-fast: false
@ -217,7 +223,7 @@ jobs:
# do this in a separate job
upload_coverdata:
needs: make_cover
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ubuntu-22.04
steps:
- name: Coveralls Finished
env:

View File

@ -6,10 +6,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
permissions:
contents: read
@ -21,7 +17,7 @@ jobs:
profile:
- emqx
- emqx-enterprise
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps:
- uses: actions/download-artifact@v3
with:

View File

@ -14,7 +14,7 @@ permissions:
jobs:
stale:
if: github.repository_owner == 'emqx'
runs-on: ['self-hosted', 'linux', 'x64', 'ephemeral']
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
permissions:
issues: write
pull-requests: none

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -25,7 +22,7 @@ permissions:
jobs:
static_checks:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "static_checks (${{ matrix.profile }})"
strategy:
fail-fast: false

View File

@ -1,3 +1,8 @@
ifeq ($(DEBUG),1)
DEBUG_INFO = $(info $1)
else
DEBUG_INFO = @:
endif
REBAR = $(CURDIR)/rebar3
BUILD = $(CURDIR)/build
SCRIPTS = $(CURDIR)/scripts
@ -18,17 +23,6 @@ endif
export EMQX_DASHBOARD_VERSION ?= v1.5.0
export EMQX_EE_DASHBOARD_VERSION ?= e1.3.0
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
# In make 4.4+, for backward-compatibility the value from the original environment is used.
# so the shell script will be executed tons of times.
# https://github.com/emqx/emqx/pull/10627
ifeq ($(strip $(OTP_VSN)),)
export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh)
endif
ifeq ($(strip $(ELIXIR_VSN)),)
export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh)
endif
PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise
PKG_PROFILES := emqx-pkg emqx-enterprise-pkg
@ -75,11 +69,11 @@ mix-deps-get: $(ELIXIR_COMMON_DEPS)
.PHONY: eunit
eunit: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) eunit --name eunit@127.0.0.1 -v -c --cover_export_name $(CT_COVER_EXPORT_PREFIX)-eunit
@$(REBAR) eunit --name eunit@127.0.0.1 -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-eunit
.PHONY: proper
proper: $(REBAR)
@ENABLE_COVER_COMPILE=1 $(REBAR) proper -d test/props -c
@$(REBAR) proper -d test/props -c
.PHONY: test-compile
test-compile: $(REBAR) merge-config
@ -91,7 +85,7 @@ $(REL_PROFILES:%=%-compile): $(REBAR) merge-config
.PHONY: ct
ct: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
@$(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
## only check bpapi for enterprise profile because it's a super-set.
.PHONY: static_checks
@ -101,31 +95,56 @@ static_checks:
./scripts/check-i18n-style.sh
./scripts/check_missing_reboot_apps.exs
APPS=$(shell $(SCRIPTS)/find-apps.sh)
# Allow user-set CASES environment variable
ifneq ($(CASES),)
CASES_ARG := --case $(CASES)
endif
.PHONY: $(APPS:%=%-ct)
# Allow user-set GROUPS environment variable
ifneq ($(GROUPS),)
GROUPS_ARG := --groups $(GROUPS)
endif
ifeq ($(ENABLE_COVER_COMPILE),1)
cover_args = --cover --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1)
else
cover_args =
endif
## example:
## env SUITES=apps/appname/test/test_SUITE.erl CASES=t_foo make apps/appname-ct
define gen-app-ct-target
$1-ct: $(REBAR) merge-config clean-test-cluster-config
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
ifneq ($(SUITES),)
ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
--readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
--suite $(SUITES)
$(REBAR) ct -v \
--readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \
$(call cover_args,$1) \
--suite $(SUITES) \
$(GROUPS_ARG) \
$(CASES_ARG)
else
@echo 'No suites found for $1'
@echo 'No suites found for $1'
endif
endef
$(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
ifneq ($(filter %-ct,$(MAKECMDGOALS)),)
app_to_test := $(patsubst %-ct,%,$(filter %-ct,$(MAKECMDGOALS)))
$(call DEBUG_INFO,app_to_test $(app_to_test))
$(eval $(call gen-app-ct-target,$(app_to_test)))
endif
## apps/name-prop targets
.PHONY: $(APPS:%=%-prop)
define gen-app-prop-target
$1-prop:
$(REBAR) proper -d test/props -v -m $(shell $(SCRIPTS)/find-props.sh $1)
endef
$(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app))))
ifneq ($(filter %-prop,$(MAKECMDGOALS)),)
app_to_test := $(patsubst %-prop,%,$(filter %-prop,$(MAKECMDGOALS)))
$(call DEBUG_INFO,app_to_test $(app_to_test))
$(eval $(call gen-app-prop-target,$(app_to_test)))
endif
.PHONY: ct-suite
ct-suite: $(REBAR) merge-config clean-test-cluster-config
@ -298,8 +317,18 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
fmt: $(REBAR)
@$(SCRIPTS)/erlfmt -w 'apps/*/{src,include,priv,test,integration_test}/**/*.{erl,hrl,app.src,eterm}'
@$(SCRIPTS)/erlfmt -w 'rebar.config.erl'
@$(SCRIPTS)/erlfmt -w '$(SCRIPTS)/**/*.escript'
@$(SCRIPTS)/erlfmt -w 'bin/**/*.escript'
@mix format
.PHONY: clean-test-cluster-config
clean-test-cluster-config:
@rm -f apps/emqx_conf/data/configs/cluster.hocon || true
.PHONY: spellcheck
spellcheck:
./scripts/spellcheck/spellcheck.sh _build/docgen/$(PROFILE)/schema-en.json
.PHONY: nothing
nothing:
@:

View File

@ -14,9 +14,4 @@
%% limitations under the License.
%%--------------------------------------------------------------------
-ifndef(EMQX_BPAPI_HRL).
-define(EMQX_BPAPI_HRL, true).
-compile({parse_transform, emqx_bpapi_trans}).
-endif.
-include_lib("emqx_utils/include/bpapi.hrl").

View File

@ -52,29 +52,7 @@
-record(subscription, {topic, subid, subopts}).
%% See 'Application Message' in MQTT Version 5.0
-record(message, {
%% Global unique message ID
id :: binary(),
%% Message QoS
qos = 0,
%% Message from
from :: atom() | binary(),
%% Message flags
flags = #{} :: emqx_types:flags(),
%% Message headers. May contain any metadata. e.g. the
%% protocol version number, username, peerhost or
%% the PUBLISH properties (MQTT 5.0).
headers = #{} :: emqx_types:headers(),
%% Topic that the message is published to
topic :: emqx_types:topic(),
%% Message Payload
payload :: emqx_types:payload(),
%% Timestamp (Unit: millisecond)
timestamp :: integer(),
%% not used so far, for future extension
extra = [] :: term()
}).
-include_lib("emqx_utils/include/emqx_message.hrl").
-record(delivery, {
%% Sender of the delivery

View File

@ -19,67 +19,79 @@
-define(PH_VAR_THIS, <<"$_THIS_">>).
-define(PH(Type), <<"${", Type/binary, "}">>).
-define(PH(Var), <<"${" Var "}">>).
%% action: publish/subscribe
-define(PH_ACTION, <<"${action}">>).
-define(VAR_ACTION, "action").
-define(PH_ACTION, ?PH(?VAR_ACTION)).
%% cert
-define(PH_CERT_SUBJECT, <<"${cert_subject}">>).
-define(PH_CERT_CN_NAME, <<"${cert_common_name}">>).
-define(VAR_CERT_SUBJECT, "cert_subject").
-define(VAR_CERT_CN_NAME, "cert_common_name").
-define(PH_CERT_SUBJECT, ?PH(?VAR_CERT_SUBJECT)).
-define(PH_CERT_CN_NAME, ?PH(?VAR_CERT_CN_NAME)).
%% MQTT
-define(PH_PASSWORD, <<"${password}">>).
-define(PH_CLIENTID, <<"${clientid}">>).
-define(PH_FROM_CLIENTID, <<"${from_clientid}">>).
-define(PH_USERNAME, <<"${username}">>).
-define(PH_FROM_USERNAME, <<"${from_username}">>).
-define(PH_TOPIC, <<"${topic}">>).
-define(VAR_PASSWORD, "password").
-define(VAR_CLIENTID, "clientid").
-define(VAR_USERNAME, "username").
-define(VAR_TOPIC, "topic").
-define(PH_PASSWORD, ?PH(?VAR_PASSWORD)).
-define(PH_CLIENTID, ?PH(?VAR_CLIENTID)).
-define(PH_FROM_CLIENTID, ?PH("from_clientid")).
-define(PH_USERNAME, ?PH(?VAR_USERNAME)).
-define(PH_FROM_USERNAME, ?PH("from_username")).
-define(PH_TOPIC, ?PH(?VAR_TOPIC)).
%% MQTT payload
-define(PH_PAYLOAD, <<"${payload}">>).
-define(PH_PAYLOAD, ?PH("payload")).
%% client IPAddress
-define(PH_PEERHOST, <<"${peerhost}">>).
-define(VAR_PEERHOST, "peerhost").
-define(PH_PEERHOST, ?PH(?VAR_PEERHOST)).
%% ip & port
-define(PH_HOST, <<"${host}">>).
-define(PH_PORT, <<"${port}">>).
-define(PH_HOST, ?PH("host")).
-define(PH_PORT, ?PH("port")).
%% Enumeration of message QoS 0,1,2
-define(PH_QOS, <<"${qos}">>).
-define(PH_FLAGS, <<"${flags}">>).
-define(VAR_QOS, "qos").
-define(PH_QOS, ?PH(?VAR_QOS)).
-define(PH_FLAGS, ?PH("flags")).
%% Additional data related to process within the MQTT message
-define(PH_HEADERS, <<"${headers}">>).
-define(PH_HEADERS, ?PH("headers")).
%% protocol name
-define(PH_PROTONAME, <<"${proto_name}">>).
-define(VAR_PROTONAME, "proto_name").
-define(PH_PROTONAME, ?PH(?VAR_PROTONAME)).
%% protocol version
-define(PH_PROTOVER, <<"${proto_ver}">>).
-define(PH_PROTOVER, ?PH("proto_ver")).
%% MQTT keepalive interval
-define(PH_KEEPALIVE, <<"${keepalive}">>).
-define(PH_KEEPALIVE, ?PH("keepalive")).
%% MQTT clean_start
-define(PH_CLEAR_START, <<"${clean_start}">>).
-define(PH_CLEAR_START, ?PH("clean_start")).
%% MQTT Session Expiration time
-define(PH_EXPIRY_INTERVAL, <<"${expiry_interval}">>).
-define(PH_EXPIRY_INTERVAL, ?PH("expiry_interval")).
%% Time when PUBLISH message reaches Broker (ms)
-define(PH_PUBLISH_RECEIVED_AT, <<"${publish_received_at}">>).
-define(PH_PUBLISH_RECEIVED_AT, ?PH("publish_received_at")).
%% Mountpoint for bridging messages
-define(PH_MOUNTPOINT, <<"${mountpoint}">>).
-define(VAR_MOUNTPOINT, "mountpoint").
-define(PH_MOUNTPOINT, ?PH(?VAR_MOUNTPOINT)).
%% IPAddress and Port of terminal
-define(PH_PEERNAME, <<"${peername}">>).
-define(PH_PEERNAME, ?PH("peername")).
%% IPAddress and Port listened by emqx
-define(PH_SOCKNAME, <<"${sockname}">>).
-define(PH_SOCKNAME, ?PH("sockname")).
%% whether it is MQTT bridge connection
-define(PH_IS_BRIDGE, <<"${is_bridge}">>).
-define(PH_IS_BRIDGE, ?PH("is_bridge")).
%% Terminal connection completion time (s)
-define(PH_CONNECTED_AT, <<"${connected_at}">>).
-define(PH_CONNECTED_AT, ?PH("connected_at")).
%% Event trigger time(millisecond)
-define(PH_TIMESTAMP, <<"${timestamp}">>).
-define(PH_TIMESTAMP, ?PH("timestamp")).
%% Terminal disconnection completion time (s)
-define(PH_DISCONNECTED_AT, <<"${disconnected_at}">>).
-define(PH_DISCONNECTED_AT, ?PH("disconnected_at")).
-define(PH_NODE, <<"${node}">>).
-define(PH_REASON, <<"${reason}">>).
-define(PH_NODE, ?PH("node")).
-define(PH_REASON, ?PH("reason")).
-define(PH_ENDPOINT_NAME, <<"${endpoint_name}">>).
-define(PH_RETAIN, <<"${retain}">>).
-define(PH_ENDPOINT_NAME, ?PH("endpoint_name")).
-define(VAR_RETAIN, "retain").
-define(PH_RETAIN, ?PH(?VAR_RETAIN)).
%% sync change these place holder with binary def.
-define(PH_S_ACTION, "${action}").

View File

@ -35,7 +35,7 @@
-define(EMQX_RELEASE_CE, "5.3.1-alpha.1").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.3.1-alpha.1").
-define(EMQX_RELEASE_EE, "5.3.1-alpha.4").
%% The HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -1,7 +1,7 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ds_SUITE).
-module(emqx_persistent_session_ds_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
@ -14,7 +14,6 @@
-define(DEFAULT_KEYSPACE, default).
-define(DS_SHARD_ID, <<"local">>).
-define(DS_SHARD, {?DEFAULT_KEYSPACE, ?DS_SHARD_ID}).
-define(ITERATOR_REF_TAB, emqx_ds_iterator_ref).
-import(emqx_common_test_helpers, [on_exit/1]).
@ -91,9 +90,6 @@ get_mqtt_port(Node, Type) ->
{_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]),
Port.
get_all_iterator_refs(Node) ->
erpc:call(Node, mnesia, dirty_all_keys, [?ITERATOR_REF_TAB]).
get_all_iterator_ids(Node) ->
Fn = fun(K, _V, Acc) -> [K | Acc] end,
erpc:call(Node, fun() ->
@ -126,6 +122,32 @@ start_client(Opts0 = #{}) ->
on_exit(fun() -> catch emqtt:stop(Client) end),
Client.
restart_node(Node, NodeSpec) ->
?tp(will_restart_node, #{}),
?tp(notice, "restarting node", #{node => Node}),
true = monitor_node(Node, true),
ok = erpc:call(Node, init, restart, []),
receive
{nodedown, Node} ->
ok
after 10_000 ->
ct:fail("node ~p didn't stop", [Node])
end,
?tp(notice, "waiting for nodeup", #{node => Node}),
wait_nodeup(Node),
wait_gen_rpc_down(NodeSpec),
?tp(notice, "restarting apps", #{node => Node}),
Apps = maps:get(apps, NodeSpec),
ok = erpc:call(Node, emqx_cth_suite, load_apps, [Apps]),
_ = erpc:call(Node, emqx_cth_suite, start_apps, [Apps, NodeSpec]),
%% have to re-inject this so that we may stop the node succesfully at the
%% end....
ok = emqx_cth_cluster:set_node_opts(Node, NodeSpec),
ok = snabbkaffe:forward_trace(Node),
?tp(notice, "node restarted", #{node => Node}),
?tp(restarted_node, #{}),
ok.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
@ -143,24 +165,14 @@ t_non_persistent_session_subscription(_Config) ->
{ok, _} = emqtt:connect(Client),
?tp(notice, "subscribing", #{}),
{ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(Client, SubTopicFilter, qos2),
IteratorRefs = get_all_iterator_refs(node()),
IteratorIds = get_all_iterator_ids(node()),
ok = emqtt:stop(Client),
#{
iterator_refs => IteratorRefs,
iterator_ids => IteratorIds
}
ok
end,
fun(Res, Trace) ->
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
#{
iterator_refs := IteratorRefs,
iterator_ids := IteratorIds
} = Res,
?assertEqual([], IteratorRefs),
?assertEqual({ok, []}, IteratorIds),
?assertEqual([], ?of_kind(ds_session_subscription_added, Trace)),
ok
end
),
@ -175,7 +187,7 @@ t_session_subscription_idempotency(Config) ->
?check_trace(
begin
?force_ordering(
#{?snk_kind := persistent_session_ds_iterator_added},
#{?snk_kind := persistent_session_ds_subscription_added},
_NEvents0 = 1,
#{?snk_kind := will_restart_node},
_Guard0 = true
@ -187,32 +199,7 @@ t_session_subscription_idempotency(Config) ->
_Guard1 = true
),
spawn_link(fun() ->
?tp(will_restart_node, #{}),
?tp(notice, "restarting node", #{node => Node1}),
true = monitor_node(Node1, true),
ok = erpc:call(Node1, init, restart, []),
receive
{nodedown, Node1} ->
ok
after 10_000 ->
ct:fail("node ~p didn't stop", [Node1])
end,
?tp(notice, "waiting for nodeup", #{node => Node1}),
wait_nodeup(Node1),
wait_gen_rpc_down(Node1Spec),
?tp(notice, "restarting apps", #{node => Node1}),
Apps = maps:get(apps, Node1Spec),
ok = erpc:call(Node1, emqx_cth_suite, load_apps, [Apps]),
_ = erpc:call(Node1, emqx_cth_suite, start_apps, [Apps, Node1Spec]),
%% have to re-inject this so that we may stop the node succesfully at the
%% end....
ok = emqx_cth_cluster:set_node_opts(Node1, Node1Spec),
ok = snabbkaffe:forward_trace(Node1),
?tp(notice, "node restarted", #{node => Node1}),
?tp(restarted_node, #{}),
ok
end),
spawn_link(fun() -> restart_node(Node1, Node1Spec) end),
?tp(notice, "starting 1", #{}),
Client0 = start_client(#{port => Port, clientid => ClientId}),
@ -223,7 +210,7 @@ t_session_subscription_idempotency(Config) ->
receive
{'EXIT', {shutdown, _}} ->
ok
after 0 -> ok
after 100 -> ok
end,
process_flag(trap_exit, false),
@ -240,10 +227,7 @@ t_session_subscription_idempotency(Config) ->
end,
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
%% Exactly one iterator should have been opened.
SubTopicFilterWords = emqx_topic:words(SubTopicFilter),
?assertEqual([{ClientId, SubTopicFilterWords}], get_all_iterator_refs(Node1)),
?assertMatch({ok, [_]}, get_all_iterator_ids(Node1)),
?assertMatch(
{ok, #{}, #{SubTopicFilterWords := #{}}},
erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId])
@ -262,7 +246,10 @@ t_session_unsubscription_idempotency(Config) ->
?check_trace(
begin
?force_ordering(
#{?snk_kind := persistent_session_ds_close_iterators, ?snk_span := {complete, _}},
#{
?snk_kind := persistent_session_ds_subscription_delete,
?snk_span := {complete, _}
},
_NEvents0 = 1,
#{?snk_kind := will_restart_node},
_Guard0 = true
@ -270,36 +257,11 @@ t_session_unsubscription_idempotency(Config) ->
?force_ordering(
#{?snk_kind := restarted_node},
_NEvents1 = 1,
#{?snk_kind := persistent_session_ds_iterator_delete, ?snk_span := start},
#{?snk_kind := persistent_session_ds_subscription_route_delete, ?snk_span := start},
_Guard1 = true
),
spawn_link(fun() ->
?tp(will_restart_node, #{}),
?tp(notice, "restarting node", #{node => Node1}),
true = monitor_node(Node1, true),
ok = erpc:call(Node1, init, restart, []),
receive
{nodedown, Node1} ->
ok
after 10_000 ->
ct:fail("node ~p didn't stop", [Node1])
end,
?tp(notice, "waiting for nodeup", #{node => Node1}),
wait_nodeup(Node1),
wait_gen_rpc_down(Node1Spec),
?tp(notice, "restarting apps", #{node => Node1}),
Apps = maps:get(apps, Node1Spec),
ok = erpc:call(Node1, emqx_cth_suite, load_apps, [Apps]),
_ = erpc:call(Node1, emqx_cth_suite, start_apps, [Apps, Node1Spec]),
%% have to re-inject this so that we may stop the node succesfully at the
%% end....
ok = emqx_cth_cluster:set_node_opts(Node1, Node1Spec),
ok = snabbkaffe:forward_trace(Node1),
?tp(notice, "node restarted", #{node => Node1}),
?tp(restarted_node, #{}),
ok
end),
spawn_link(fun() -> restart_node(Node1, Node1Spec) end),
?tp(notice, "starting 1", #{}),
Client0 = start_client(#{port => Port, clientid => ClientId}),
@ -312,7 +274,7 @@ t_session_unsubscription_idempotency(Config) ->
receive
{'EXIT', {shutdown, _}} ->
ok
after 0 -> ok
after 100 -> ok
end,
process_flag(trap_exit, false),
@ -327,7 +289,7 @@ t_session_unsubscription_idempotency(Config) ->
?wait_async_action(
emqtt:unsubscribe(Client1, SubTopicFilter),
#{
?snk_kind := persistent_session_ds_iterator_delete,
?snk_kind := persistent_session_ds_subscription_route_delete,
?snk_span := {complete, _}
},
15_000
@ -339,9 +301,10 @@ t_session_unsubscription_idempotency(Config) ->
end,
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
%% No iterators remaining
?assertEqual([], get_all_iterator_refs(Node1)),
?assertEqual({ok, []}, get_all_iterator_ids(Node1)),
?assertMatch(
{ok, #{}, Subs = #{}} when map_size(Subs) =:= 0,
erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId])
),
ok
end
),

View File

@ -7,15 +7,18 @@
{emqx_bridge,2}.
{emqx_bridge,3}.
{emqx_bridge,4}.
{emqx_bridge,5}.
{emqx_broker,1}.
{emqx_cm,1}.
{emqx_cm,2}.
{emqx_conf,1}.
{emqx_conf,2}.
{emqx_conf,3}.
{emqx_connector, 1}.
{emqx_dashboard,1}.
{emqx_delayed,1}.
{emqx_delayed,2}.
{emqx_ds,1}.
{emqx_eviction_agent,1}.
{emqx_eviction_agent,2}.
{emqx_exhook,1}.

View File

@ -29,8 +29,8 @@
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.7"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.0"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.16"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.19"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
@ -45,7 +45,7 @@
{meck, "0.9.2"},
{proper, "1.4.0"},
{bbmustache, "1.10.0"},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.0"}}}
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}}
]},
{extra_src_dirs, [{"test", [recursive]},
{"integration_test", [recursive]}]}
@ -55,7 +55,7 @@
{meck, "0.9.2"},
{proper, "1.4.0"},
{bbmustache, "1.10.0"},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.0"}}}
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}}
]},
{extra_src_dirs, [{"test", [recursive]}]}
]}

View File

@ -325,22 +325,32 @@ init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
ok = save_schema_mod_and_names(SchemaMod),
HasDeprecatedFile = has_deprecated_file(),
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
warning_deprecated_root_key(RawConf0),
RawConf1 =
RawConf1 = upgrade_raw_conf(SchemaMod, RawConf0),
warning_deprecated_root_key(RawConf1),
RawConf2 =
case HasDeprecatedFile of
true ->
overlay_v0(SchemaMod, RawConf0);
overlay_v0(SchemaMod, RawConf1);
false ->
overlay_v1(SchemaMod, RawConf0)
overlay_v1(SchemaMod, RawConf1)
end,
RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1),
RawConf3 = fill_defaults_for_all_roots(SchemaMod, RawConf2),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf3, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConf),
ok = save_to_config_map(CheckedConf, RawConf3),
maybe_init_default_zone(),
ok.
upgrade_raw_conf(SchemaMod, RawConf) ->
case erlang:function_exported(SchemaMod, upgrade_raw_conf, 1) of
true ->
%% TODO make it a schema module behaviour in hocon_schema
apply(SchemaMod, upgrade_raw_conf, [RawConf]);
false ->
RawConf
end.
%% Merge environment variable overrides on top, then merge with overrides.
overlay_v0(SchemaMod, RawConf) when is_map(RawConf) ->
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),

View File

@ -19,7 +19,7 @@
-include("logger.hrl").
-include("emqx_schema.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("hocon/include/hocon_types.hrl").
-behaviour(gen_server).
@ -703,7 +703,7 @@ atom(Bin) when is_binary(Bin), size(Bin) > 255 ->
erlang:throw(
iolist_to_binary(
io_lib:format(
"Name is is too long."
"Name is too long."
" Please provide a shorter name (<= 255 bytes)."
" The name that is too long: \"~s\"",
[Bin]
@ -736,7 +736,7 @@ remove_empty_leaf(KeyPath, Handlers) ->
end.
assert_callback_function(Mod) ->
_ = Mod:module_info(),
_ = apply(Mod, module_info, []),
case
erlang:function_exported(Mod, pre_config_update, 3) orelse
erlang:function_exported(Mod, post_config_update, 5)

View File

@ -16,6 +16,8 @@
-module(emqx_hookpoints).
-include("logger.hrl").
-type callback_result() :: stop | any().
-type fold_callback_result(Acc) :: {stop, Acc} | {ok, Acc} | stop | any().
@ -62,12 +64,16 @@
'delivery.dropped',
'delivery.completed',
'cm.channel.unregistered',
'tls_handshake.psk_lookup',
'tls_handshake.psk_lookup'
]).
%% Our template plugin used this hookpoints before its 5.1.0 version,
%% so we keep them here
-define(DEPRECATED_HOOKPOINTS, [
%% This is a deprecated hookpoint renamed to 'client.authorize'
%% However, our template plugin used this hookpoint before its 5.1.0 version,
%% so we keep it here
'client.check_acl'
'client.check_acl',
%% Misspelled hookpoint
'session.takeovered'
]).
%%-----------------------------------------------------------------------------
@ -206,27 +212,42 @@ when
%% API
%%-----------------------------------------------------------------------------
default_hookpoints() ->
?HOOKPOINTS.
%% Binary hookpoint names are dynamic and used for bridges
-type registered_hookpoint() :: atom().
-type registered_hookpoint_status() :: valid | deprecated.
-spec default_hookpoints() -> #{registered_hookpoint() => registered_hookpoint_status()}.
default_hookpoints() ->
maps:merge(
maps:from_keys(?HOOKPOINTS, valid),
maps:from_keys(?DEPRECATED_HOOKPOINTS, deprecated)
).
-spec register_hookpoints() -> ok.
register_hookpoints() ->
register_hookpoints(default_hookpoints()).
register_hookpoints(HookPoints) ->
persistent_term:put(?MODULE, maps:from_keys(HookPoints, true)).
-spec register_hookpoints(
[registered_hookpoint()] | #{registered_hookpoint() => registered_hookpoint_status()}
) -> ok.
register_hookpoints(HookPoints) when is_list(HookPoints) ->
register_hookpoints(maps:from_keys(HookPoints, valid));
register_hookpoints(HookPoints) when is_map(HookPoints) ->
persistent_term:put(?MODULE, HookPoints).
-spec verify_hookpoint(registered_hookpoint() | binary()) -> ok | no_return().
verify_hookpoint(HookPoint) when is_binary(HookPoint) -> ok;
verify_hookpoint(HookPoint) ->
case maps:is_key(HookPoint, registered_hookpoints()) of
true ->
ok;
false ->
error({invalid_hookpoint, HookPoint})
case maps:find(HookPoint, registered_hookpoints()) of
{ok, valid} -> ok;
{ok, deprecated} -> ?SLOG(warning, #{msg => deprecated_hookpoint, hookpoint => HookPoint});
error -> error({invalid_hookpoint, HookPoint})
end.
%%-----------------------------------------------------------------------------
%% Internal API
%%-----------------------------------------------------------------------------
-spec registered_hookpoints() -> #{registered_hookpoint() => registered_hookpoint_status()}.
registered_hookpoints() ->
persistent_term:get(?MODULE, #{}).

View File

@ -66,7 +66,8 @@
-export([
is_expired/1,
update_expiry/1
update_expiry/1,
timestamp_now/0
]).
-export([
@ -113,14 +114,13 @@ make(From, Topic, Payload) ->
emqx_types:payload()
) -> emqx_types:message().
make(From, QoS, Topic, Payload) when ?QOS_0 =< QoS, QoS =< ?QOS_2 ->
Now = erlang:system_time(millisecond),
#message{
id = emqx_guid:gen(),
qos = QoS,
from = From,
topic = Topic,
payload = Payload,
timestamp = Now
timestamp = timestamp_now()
}.
-spec make(
@ -137,7 +137,6 @@ make(From, QoS, Topic, Payload, Flags, Headers) when
is_map(Flags),
is_map(Headers)
->
Now = erlang:system_time(millisecond),
#message{
id = emqx_guid:gen(),
qos = QoS,
@ -146,7 +145,7 @@ make(From, QoS, Topic, Payload, Flags, Headers) when
headers = Headers,
topic = Topic,
payload = Payload,
timestamp = Now
timestamp = timestamp_now()
}.
-spec make(
@ -164,7 +163,6 @@ make(MsgId, From, QoS, Topic, Payload, Flags, Headers) when
is_map(Flags),
is_map(Headers)
->
Now = erlang:system_time(millisecond),
#message{
id = MsgId,
qos = QoS,
@ -173,7 +171,7 @@ make(MsgId, From, QoS, Topic, Payload, Flags, Headers) when
headers = Headers,
topic = Topic,
payload = Payload,
timestamp = Now
timestamp = timestamp_now()
}.
%% optimistic esitmation of a message size after serialization
@ -403,6 +401,11 @@ from_map(#{
extra = Extra
}.
%% @doc Get current timestamp in milliseconds.
-spec timestamp_now() -> integer().
timestamp_now() ->
erlang:system_time(millisecond).
%% MilliSeconds
elapsed(Since) ->
max(0, erlang:system_time(millisecond) - Since).
max(0, timestamp_now() - Since).

View File

@ -83,7 +83,7 @@ do_check_pass({_SimpleHash, _Salt, _SaltPosition} = HashParams, PasswordHash, Pa
compare_secure(Hash, PasswordHash).
-spec hash(hash_params(), password()) -> password_hash().
hash({pbkdf2, MacFun, Salt, Iterations, DKLength}, Password) ->
hash({pbkdf2, MacFun, Salt, Iterations, DKLength}, Password) when Iterations > 0 ->
case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of
{ok, HashPasswd} ->
hex(HashPasswd);

View File

@ -23,16 +23,12 @@
%% Message persistence
-export([
persist/1,
serialize/1,
deserialize/1
persist/1
]).
%% FIXME
-define(DS_SHARD_ID, <<"local">>).
-define(DEFAULT_KEYSPACE, default).
-define(DS_SHARD, {?DEFAULT_KEYSPACE, ?DS_SHARD_ID}).
-define(PERSISTENT_MESSAGE_DB, emqx_persistent_message).
%% FIXME
-define(WHEN_ENABLED(DO),
case is_store_enabled() of
true -> DO;
@ -44,18 +40,10 @@
init() ->
?WHEN_ENABLED(begin
ok = emqx_ds:ensure_shard(
?DS_SHARD,
#{
dir => filename:join([
emqx:data_dir(),
ds,
messages,
?DEFAULT_KEYSPACE,
?DS_SHARD_ID
])
}
),
ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{
backend => builtin,
storage => {emqx_ds_storage_bitfield_lts, #{}}
}),
ok = emqx_persistent_session_ds_router:init_tables(),
ok = emqx_persistent_session_ds:create_tables(),
ok
@ -82,19 +70,11 @@ persist(Msg) ->
needs_persistence(Msg) ->
not (emqx_message:get_flag(dup, Msg) orelse emqx_message:is_sys(Msg)).
-spec store_message(emqx_types:message()) -> emqx_ds:store_batch_result().
store_message(Msg) ->
ID = emqx_message:id(Msg),
Timestamp = emqx_guid:timestamp(ID),
Topic = emqx_topic:words(emqx_message:topic(Msg)),
emqx_ds_storage_layer:store(?DS_SHARD, ID, Timestamp, Topic, serialize(Msg)).
emqx_ds:store_batch(?PERSISTENT_MESSAGE_DB, [Msg]).
has_subscribers(#message{topic = Topic}) ->
emqx_persistent_session_ds_router:has_any_route(Topic).
%%
serialize(Msg) ->
term_to_binary(emqx_message:to_map(Msg)).
deserialize(Bin) ->
emqx_message:from_map(binary_to_term(Bin)).

View File

@ -0,0 +1,213 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% @doc This module implements the routines for replaying streams of
%% messages.
-module(emqx_persistent_message_ds_replayer).
%% API:
-export([new/0, next_packet_id/1, replay/2, commit_offset/3, poll/3]).
%% internal exports:
-export([]).
-export_type([inflight/0]).
-include("emqx_persistent_session_ds.hrl").
%%================================================================================
%% Type declarations
%%================================================================================
%% Note: sequence numbers are monotonic; they don't wrap around:
-type seqno() :: non_neg_integer().
-record(range, {
stream :: emqx_ds:stream(),
first :: seqno(),
last :: seqno(),
iterator_next :: emqx_ds:iterator() | undefined
}).
-type range() :: #range{}.
-record(inflight, {
next_seqno = 0 :: seqno(),
acked_seqno = 0 :: seqno(),
offset_ranges = [] :: [range()]
}).
-opaque inflight() :: #inflight{}.
%%================================================================================
%% API funcions
%%================================================================================
-spec new() -> inflight().
new() ->
#inflight{}.
-spec next_packet_id(inflight()) -> {emqx_types:packet_id(), inflight()}.
next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqno}) ->
Inflight = Inflight0#inflight{next_seqno = LastSeqno + 1},
{seqno_to_packet_id(LastSeqno), Inflight}.
-spec replay(emqx_persistent_session_ds:id(), inflight()) ->
emqx_session:replies().
replay(_SessionId, _Inflight = #inflight{offset_ranges = _Ranges}) ->
[].
-spec commit_offset(emqx_persistent_session_ds:id(), emqx_types:packet_id(), inflight()) ->
{_IsValidOffset :: boolean(), inflight()}.
commit_offset(
SessionId,
PacketId,
Inflight0 = #inflight{
acked_seqno = AckedSeqno0, next_seqno = NextSeqNo, offset_ranges = Ranges0
}
) ->
AckedSeqno = packet_id_to_seqno(NextSeqNo, PacketId),
true = AckedSeqno0 < AckedSeqno,
Ranges = lists:filter(
fun(#range{stream = Stream, last = LastSeqno, iterator_next = ItNext}) ->
case LastSeqno =< AckedSeqno of
true ->
%% This range has been fully
%% acked. Remove it and replace saved
%% iterator with the trailing iterator.
update_iterator(SessionId, Stream, ItNext),
false;
false ->
%% This range still has unacked
%% messages:
true
end
end,
Ranges0
),
Inflight = Inflight0#inflight{acked_seqno = AckedSeqno, offset_ranges = Ranges},
{true, Inflight}.
-spec poll(emqx_persistent_session_ds:id(), inflight(), pos_integer()) ->
{emqx_session:replies(), inflight()}.
poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff ->
#inflight{next_seqno = NextSeqNo0, acked_seqno = AckedSeqno} =
Inflight0,
FetchThreshold = max(1, WindowSize div 2),
FreeSpace = AckedSeqno + WindowSize - NextSeqNo0,
case FreeSpace >= FetchThreshold of
false ->
%% TODO: this branch is meant to avoid fetching data from
%% the DB in chunks that are too small. However, this
%% logic is not exactly good for the latency. Can the
%% client get stuck even?
{[], Inflight0};
true ->
Streams = shuffle(get_streams(SessionId)),
fetch(SessionId, Inflight0, Streams, FreeSpace, [])
end.
%%================================================================================
%% Internal exports
%%================================================================================
%%================================================================================
%% Internal functions
%%================================================================================
fetch(_SessionId, Inflight, _Streams = [], _N, Acc) ->
{lists:reverse(Acc), Inflight};
fetch(_SessionId, Inflight, _Streams, 0, Acc) ->
{lists:reverse(Acc), Inflight};
fetch(SessionId, Inflight0, [#ds_stream{stream = Stream} | Streams], N, Publishes0) ->
#inflight{next_seqno = FirstSeqNo, offset_ranges = Ranges0} = Inflight0,
ItBegin = get_last_iterator(SessionId, Stream, Ranges0),
{ok, ItEnd, Messages} = emqx_ds:next(ItBegin, N),
{Publishes, Inflight1} =
lists:foldl(
fun(Msg, {PubAcc0, InflightAcc0}) ->
{PacketId, InflightAcc} = next_packet_id(InflightAcc0),
PubAcc = [{PacketId, Msg} | PubAcc0],
{PubAcc, InflightAcc}
end,
{Publishes0, Inflight0},
Messages
),
#inflight{next_seqno = LastSeqNo} = Inflight1,
NMessages = LastSeqNo - FirstSeqNo,
case NMessages > 0 of
true ->
Range = #range{
first = FirstSeqNo,
last = LastSeqNo - 1,
stream = Stream,
iterator_next = ItEnd
},
Inflight = Inflight1#inflight{offset_ranges = Ranges0 ++ [Range]},
fetch(SessionId, Inflight, Streams, N - NMessages, Publishes);
false ->
fetch(SessionId, Inflight1, Streams, N, Publishes)
end.
update_iterator(SessionId, Stream, Iterator) ->
mria:dirty_write(?SESSION_ITER_TAB, #ds_iter{id = {SessionId, Stream}, iter = Iterator}).
get_last_iterator(SessionId, Stream, Ranges) ->
case lists:keyfind(Stream, #range.stream, lists:reverse(Ranges)) of
false ->
get_iterator(SessionId, Stream);
#range{iterator_next = Next} ->
Next
end.
get_iterator(SessionId, Stream) ->
Id = {SessionId, Stream},
[#ds_iter{iter = It}] = mnesia:dirty_read(?SESSION_ITER_TAB, Id),
It.
get_streams(SessionId) ->
mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId).
%% Packet ID as defined by MQTT protocol is a 16-bit integer in range
%% 1..FFFF. This function translates internal session sequence number
%% to MQTT packet ID by chopping off most significant bits and adding
%% 1. This assumes that there's never more FFFF in-flight packets at
%% any time:
-spec seqno_to_packet_id(non_neg_integer()) -> emqx_types:packet_id().
seqno_to_packet_id(Counter) ->
Counter rem 16#ffff + 1.
%% Reconstruct session counter by adding most significant bits from
%% the current counter to the packet id.
-spec packet_id_to_seqno(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer().
packet_id_to_seqno(NextSeqNo, PacketId) ->
N = ((NextSeqNo bsr 16) bsl 16) + PacketId,
case N > NextSeqNo of
true -> N - 16#10000;
false -> N
end.
-spec shuffle([A]) -> [A].
shuffle(L0) ->
L1 = lists:map(
fun(A) ->
{rand:uniform(), A}
end,
L0
),
L2 = lists:sort(L1),
{_, L} = lists:unzip(L2),
L.

View File

@ -18,9 +18,12 @@
-include("emqx.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("stdlib/include/ms_transform.hrl").
-include("emqx_mqtt.hrl").
-include("emqx_persistent_session_ds.hrl").
%% Session API
-export([
create/3,
@ -50,7 +53,7 @@
-export([
deliver/3,
replay/3,
% handle_timeout/3,
handle_timeout/3,
disconnect/1,
terminate/2
]).
@ -58,33 +61,27 @@
%% session table operations
-export([create_tables/0]).
-ifdef(TEST).
-export([session_open/1]).
-endif.
%% RPC
-export([
ensure_iterator_closed_on_all_shards/1,
ensure_all_iterators_closed/1
]).
%% Remove me later (satisfy checks for an unused BPAPI)
-export([
do_open_iterator/3,
do_ensure_iterator_closed/1,
do_ensure_all_iterators_closed/1
]).
%% FIXME
-define(DS_SHARD_ID, <<"local">>).
-define(DEFAULT_KEYSPACE, default).
-define(DS_SHARD, {?DEFAULT_KEYSPACE, ?DS_SHARD_ID}).
-ifdef(TEST).
-export([session_open/1]).
-endif.
%% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be
%% an atom, in theory (?).
-type id() :: binary().
-type iterator() :: emqx_ds:iterator().
-type iterator_id() :: emqx_ds:iterator_id().
-type topic_filter() :: emqx_ds:topic_filter().
-type iterators() :: #{topic_filter() => iterator()}.
-type subscription_id() :: {id(), topic_filter()}.
-type subscription() :: #{
start_time := emqx_ds:time(),
propts := map(),
extra := map()
}.
-type session() :: #{
%% Client ID
id := id(),
@ -93,11 +90,15 @@
%% When the session should expire
expires_at := timestamp() | never,
%% Clients Subscriptions.
iterators := #{topic() => iterator()},
iterators := #{topic() => subscription()},
%% Inflight messages
inflight := emqx_persistent_message_ds_replayer:inflight(),
%%
props := map()
}.
%% -type session() :: #session{}.
-type timestamp() :: emqx_utils_calendar:epoch_millisecond().
-type topic() :: emqx_types:topic().
-type clientinfo() :: emqx_types:clientinfo().
@ -106,12 +107,15 @@
-export_type([id/0]).
-define(PERSISTENT_MESSAGE_DB, emqx_persistent_message).
%%
-spec create(clientinfo(), conninfo(), emqx_session:conf()) ->
session().
create(#{clientid := ClientID}, _ConnInfo, Conf) ->
% TODO: expiration
ensure_timers(),
ensure_session(ClientID, Conf).
-spec open(clientinfo(), conninfo()) ->
@ -126,6 +130,7 @@ open(#{clientid := ClientID}, _ConnInfo) ->
ok = emqx_cm:discard_session(ClientID),
case open_session(ClientID) of
Session = #{} ->
ensure_timers(),
{true, Session, []};
false ->
false
@ -137,17 +142,17 @@ ensure_session(ClientID, Conf) ->
open_session(ClientID) ->
case session_open(ClientID) of
{ok, Session, Iterators} ->
Session#{iterators => prep_iterators(Iterators)};
{ok, Session, Subscriptions} ->
Session#{iterators => prep_subscriptions(Subscriptions)};
false ->
false
end.
prep_iterators(Iterators) ->
prep_subscriptions(Subscriptions) ->
maps:fold(
fun(Topic, Iterator, Acc) -> Acc#{emqx_topic:join(Topic) => Iterator} end,
fun(Topic, Subscription, Acc) -> Acc#{emqx_topic:join(Topic) => Subscription} end,
#{},
Iterators
Subscriptions
).
-spec destroy(session() | clientinfo()) -> ok.
@ -157,7 +162,6 @@ destroy(#{clientid := ClientID}) ->
destroy_session(ClientID).
destroy_session(ClientID) ->
_ = ensure_all_iterators_closed(ClientID),
session_drop(ClientID).
%%--------------------------------------------------------------------
@ -245,7 +249,7 @@ unsubscribe(
) when is_map_key(TopicFilter, Iters) ->
Iterator = maps:get(TopicFilter, Iters),
SubOpts = maps:get(props, Iterator),
ok = del_subscription(TopicFilter, Iterator, ID),
ok = del_subscription(TopicFilter, ID),
{ok, Session#{iterators := maps:remove(TopicFilter, Iters)}, SubOpts};
unsubscribe(
_TopicFilter,
@ -271,19 +275,29 @@ get_subscription(TopicFilter, #{iterators := Iters}) ->
{ok, emqx_types:publish_result(), replies(), session()}
| {error, emqx_types:reason_code()}.
publish(_PacketId, Msg, Session) ->
% TODO: stub
{ok, emqx_broker:publish(Msg), [], Session}.
%% TODO:
Result = emqx_broker:publish(Msg),
{ok, Result, [], Session}.
%%--------------------------------------------------------------------
%% Client -> Broker: PUBACK
%%--------------------------------------------------------------------
%% FIXME: parts of the commit offset function are mocked
-dialyzer({nowarn_function, puback/3}).
-spec puback(clientinfo(), emqx_types:packet_id(), session()) ->
{ok, emqx_types:message(), replies(), session()}
| {error, emqx_types:reason_code()}.
puback(_ClientInfo, _PacketId, _Session = #{}) ->
% TODO: stub
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}.
puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) ->
case emqx_persistent_message_ds_replayer:commit_offset(Id, PacketId, Inflight0) of
{true, Inflight} ->
%% TODO
Msg = #message{},
{ok, Msg, [], Session#{inflight => Inflight}};
{false, _} ->
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
end.
%%--------------------------------------------------------------------
%% Client -> Broker: PUBREC
@ -320,10 +334,22 @@ pubcomp(_ClientInfo, _PacketId, _Session = #{}) ->
%%--------------------------------------------------------------------
-spec deliver(clientinfo(), [emqx_types:deliver()], session()) ->
no_return().
deliver(_ClientInfo, _Delivers, _Session = #{}) ->
% TODO: ensure it's unreachable somehow
error(unexpected).
{ok, replies(), session()}.
deliver(_ClientInfo, _Delivers, Session) ->
%% TODO: QoS0 and system messages end up here.
{ok, [], Session}.
-spec handle_timeout(clientinfo(), _Timeout, session()) ->
{ok, replies(), session()} | {ok, replies(), timeout(), session()}.
handle_timeout(_ClientInfo, pull, Session = #{id := Id, inflight := Inflight0}) ->
WindowSize = 100,
{Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll(Id, Inflight0, WindowSize),
ensure_timer(pull),
{ok, Publishes, Session#{inflight => Inflight}};
handle_timeout(_ClientInfo, get_streams, Session = #{id := Id}) ->
renew_streams(Id),
ensure_timer(get_streams),
{ok, [], Session}.
-spec replay(clientinfo(), [], session()) ->
{ok, replies(), session()}.
@ -344,151 +370,69 @@ terminate(_Reason, _Session = #{}) ->
%%--------------------------------------------------------------------
-spec add_subscription(topic(), emqx_types:subopts(), id()) ->
emqx_ds:iterator().
subscription().
add_subscription(TopicFilterBin, SubOpts, DSSessionID) ->
% N.B.: we chose to update the router before adding the subscription to the
% session/iterator table. The reasoning for this is as follows:
%
% Messages matching this topic filter should start to be persisted as soon as
% possible to avoid missing messages. If this is the first such persistent
% session subscription, it's important to do so early on.
%
% This could, in turn, lead to some inconsistency: if such a route gets
% created but the session/iterator data fails to be updated accordingly, we
% have a dangling route. To remove such dangling routes, we may have a
% periodic GC process that removes routes that do not have a matching
% persistent subscription. Also, route operations use dirty mnesia
% operations, which inherently have room for inconsistencies.
%
% In practice, we use the iterator reference table as a source of truth,
% since it is guarded by a transaction context: we consider a subscription
% operation to be successful if it ended up changing this table. Both router
% and iterator information can be reconstructed from this table, if needed.
%% N.B.: we chose to update the router before adding the subscription to the
%% session/iterator table. The reasoning for this is as follows:
%%
%% Messages matching this topic filter should start to be persisted as soon as
%% possible to avoid missing messages. If this is the first such persistent
%% session subscription, it's important to do so early on.
%%
%% This could, in turn, lead to some inconsistency: if such a route gets
%% created but the session/iterator data fails to be updated accordingly, we
%% have a dangling route. To remove such dangling routes, we may have a
%% periodic GC process that removes routes that do not have a matching
%% persistent subscription. Also, route operations use dirty mnesia
%% operations, which inherently have room for inconsistencies.
%%
%% In practice, we use the iterator reference table as a source of truth,
%% since it is guarded by a transaction context: we consider a subscription
%% operation to be successful if it ended up changing this table. Both router
%% and iterator information can be reconstructed from this table, if needed.
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilterBin, DSSessionID),
TopicFilter = emqx_topic:words(TopicFilterBin),
{ok, Iterator, IsNew} = session_add_iterator(
{ok, DSSubExt, IsNew} = session_add_subscription(
DSSessionID, TopicFilter, SubOpts
),
Ctx = #{iterator => Iterator, is_new => IsNew},
?tp(persistent_session_ds_iterator_added, Ctx),
?tp_span(
persistent_session_ds_open_iterators,
Ctx,
ok = open_iterator_on_all_shards(TopicFilter, Iterator)
),
Iterator.
?tp(persistent_session_ds_subscription_added, #{sub => DSSubExt, is_new => IsNew}),
%% we'll list streams and open iterators when implementing message replay.
DSSubExt.
-spec update_subscription(topic(), iterator(), emqx_types:subopts(), id()) ->
iterator().
update_subscription(TopicFilterBin, Iterator, SubOpts, DSSessionID) ->
-spec update_subscription(topic(), subscription(), emqx_types:subopts(), id()) ->
subscription().
update_subscription(TopicFilterBin, DSSubExt, SubOpts, DSSessionID) ->
TopicFilter = emqx_topic:words(TopicFilterBin),
{ok, NIterator, false} = session_add_iterator(
{ok, NDSSubExt, false} = session_add_subscription(
DSSessionID, TopicFilter, SubOpts
),
ok = ?tp(persistent_session_ds_iterator_updated, #{iterator => Iterator}),
NIterator.
ok = ?tp(persistent_session_ds_iterator_updated, #{sub => DSSubExt}),
NDSSubExt.
-spec open_iterator_on_all_shards(emqx_types:words(), emqx_ds:iterator()) -> ok.
open_iterator_on_all_shards(TopicFilter, Iterator) ->
?tp(persistent_session_ds_will_open_iterators, #{iterator => Iterator}),
%% Note: currently, shards map 1:1 to nodes, but this will change in the future.
Nodes = emqx:running_nodes(),
Results = emqx_persistent_session_ds_proto_v1:open_iterator(
Nodes,
TopicFilter,
maps:get(start_time, Iterator),
maps:get(id, Iterator)
),
%% TODO
%% 1. Handle errors.
%% 2. Iterator handles are rocksdb resources, it's doubtful they survive RPC.
%% Even if they do, we throw them away here anyway. All in all, we probably should
%% hold each of them in a process on the respective node.
true = lists:all(fun(Res) -> element(1, Res) =:= ok end, Results),
-spec del_subscription(topic(), id()) ->
ok.
%% RPC target.
-spec do_open_iterator(emqx_types:words(), emqx_ds:time(), emqx_ds:iterator_id()) ->
{ok, emqx_ds_storage_layer:iterator()} | {error, _Reason}.
do_open_iterator(TopicFilter, StartMS, IteratorID) ->
Replay = {TopicFilter, StartMS},
emqx_ds_storage_layer:ensure_iterator(?DS_SHARD, IteratorID, Replay).
-spec del_subscription(topic(), iterator(), id()) ->
ok.
del_subscription(TopicFilterBin, #{id := IteratorID}, DSSessionID) ->
% N.B.: see comments in `?MODULE:add_subscription' for a discussion about the
% order of operations here.
del_subscription(TopicFilterBin, DSSessionId) ->
TopicFilter = emqx_topic:words(TopicFilterBin),
Ctx = #{iterator_id => IteratorID},
?tp_span(
persistent_session_ds_close_iterators,
Ctx,
ok = ensure_iterator_closed_on_all_shards(IteratorID)
persistent_session_ds_subscription_delete,
#{session_id => DSSessionId},
ok = session_del_subscription(DSSessionId, TopicFilter)
),
?tp_span(
persistent_session_ds_iterator_delete,
Ctx,
session_del_iterator(DSSessionID, TopicFilter)
),
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionID).
-spec ensure_iterator_closed_on_all_shards(emqx_ds:iterator_id()) -> ok.
ensure_iterator_closed_on_all_shards(IteratorID) ->
%% Note: currently, shards map 1:1 to nodes, but this will change in the future.
Nodes = emqx:running_nodes(),
Results = emqx_persistent_session_ds_proto_v1:close_iterator(Nodes, IteratorID),
%% TODO: handle errors
true = lists:all(fun(Res) -> Res =:= {ok, ok} end, Results),
ok.
%% RPC target.
-spec do_ensure_iterator_closed(emqx_ds:iterator_id()) -> ok.
do_ensure_iterator_closed(IteratorID) ->
ok = emqx_ds_storage_layer:discard_iterator(?DS_SHARD, IteratorID),
ok.
-spec ensure_all_iterators_closed(id()) -> ok.
ensure_all_iterators_closed(DSSessionID) ->
%% Note: currently, shards map 1:1 to nodes, but this will change in the future.
Nodes = emqx:running_nodes(),
Results = emqx_persistent_session_ds_proto_v1:close_all_iterators(Nodes, DSSessionID),
%% TODO: handle errors
true = lists:all(fun(Res) -> Res =:= {ok, ok} end, Results),
ok.
%% RPC target.
-spec do_ensure_all_iterators_closed(id()) -> ok.
do_ensure_all_iterators_closed(DSSessionID) ->
ok = emqx_ds_storage_layer:discard_iterator_prefix(?DS_SHARD, DSSessionID),
ok.
persistent_session_ds_subscription_route_delete,
#{session_id => DSSessionId},
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionId)
).
%%--------------------------------------------------------------------
%% Session tables operations
%%--------------------------------------------------------------------
-define(SESSION_TAB, emqx_ds_session).
-define(ITERATOR_REF_TAB, emqx_ds_iterator_ref).
-define(DS_MRIA_SHARD, emqx_ds_shard).
-record(session, {
%% same as clientid
id :: id(),
%% creation time
created_at :: _Millisecond :: non_neg_integer(),
expires_at = never :: _Millisecond :: non_neg_integer() | never,
%% for future usage
props = #{} :: map()
}).
-record(iterator_ref, {
ref_id :: {id(), emqx_ds:topic_filter()},
it_id :: emqx_ds:iterator_id(),
start_time :: emqx_ds:time(),
props = #{} :: map()
}).
create_tables() ->
ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{
backend => builtin,
storage => {emqx_ds_storage_bitfield_lts, #{}}
}),
ok = mria:create_table(
?SESSION_TAB,
[
@ -500,15 +444,38 @@ create_tables() ->
]
),
ok = mria:create_table(
?ITERATOR_REF_TAB,
?SESSION_SUBSCRIPTIONS_TAB,
[
{rlog_shard, ?DS_MRIA_SHARD},
{type, ordered_set},
{storage, storage()},
{record_name, iterator_ref},
{attributes, record_info(fields, iterator_ref)}
{record_name, ds_sub},
{attributes, record_info(fields, ds_sub)}
]
),
ok = mria:create_table(
?SESSION_STREAM_TAB,
[
{rlog_shard, ?DS_MRIA_SHARD},
{type, bag},
{storage, storage()},
{record_name, ds_stream},
{attributes, record_info(fields, ds_stream)}
]
),
ok = mria:create_table(
?SESSION_ITER_TAB,
[
{rlog_shard, ?DS_MRIA_SHARD},
{type, set},
{storage, storage()},
{record_name, ds_iter},
{attributes, record_info(fields, ds_iter)}
]
),
ok = mria:wait_for_tables([
?SESSION_TAB, ?SESSION_SUBSCRIPTIONS_TAB, ?SESSION_STREAM_TAB, ?SESSION_ITER_TAB
]),
ok.
-dialyzer({nowarn_function, storage/0}).
@ -529,26 +496,26 @@ storage() ->
%% Note: session API doesn't handle session takeovers, it's the job of
%% the broker.
-spec session_open(id()) ->
{ok, session(), iterators()} | false.
{ok, session(), #{topic() => subscription()}} | false.
session_open(SessionId) ->
transaction(fun() ->
case mnesia:read(?SESSION_TAB, SessionId, write) of
[Record = #session{}] ->
Session = export_record(Record),
IteratorRefs = session_read_iterators(SessionId),
Iterators = export_iterators(IteratorRefs),
{ok, Session, Iterators};
Session = export_session(Record),
DSSubs = session_read_subscriptions(SessionId),
Subscriptions = export_subscriptions(DSSubs),
{ok, Session, Subscriptions};
[] ->
false
end
end).
-spec session_ensure_new(id(), _Props :: map()) ->
{ok, session(), iterators()}.
{ok, session(), #{topic() => subscription()}}.
session_ensure_new(SessionId, Props) ->
transaction(fun() ->
ok = session_drop_iterators(SessionId),
Session = export_record(session_create(SessionId, Props)),
ok = session_drop_subscriptions(SessionId),
Session = export_session(session_create(SessionId, Props)),
{ok, Session, #{}}
end).
@ -557,7 +524,8 @@ session_create(SessionId, Props) ->
id = SessionId,
created_at = erlang:system_time(millisecond),
expires_at = never,
props = Props
props = Props,
inflight = emqx_persistent_message_ds_replayer:new()
},
ok = mnesia:write(?SESSION_TAB, Session, write),
Session.
@ -568,80 +536,143 @@ session_create(SessionId, Props) ->
session_drop(DSSessionId) ->
transaction(fun() ->
%% TODO: ensure all iterators from this clientid are closed?
ok = session_drop_iterators(DSSessionId),
ok = session_drop_subscriptions(DSSessionId),
ok = mnesia:delete(?SESSION_TAB, DSSessionId, write)
end).
session_drop_iterators(DSSessionId) ->
IteratorRefs = session_read_iterators(DSSessionId),
ok = lists:foreach(fun session_del_iterator/1, IteratorRefs).
session_drop_subscriptions(DSSessionId) ->
IteratorRefs = session_read_subscriptions(DSSessionId),
ok = lists:foreach(fun session_del_subscription/1, IteratorRefs).
%% @doc Called when a client subscribes to a topic. Idempotent.
-spec session_add_iterator(id(), topic_filter(), _Props :: map()) ->
{ok, iterator(), _IsNew :: boolean()}.
session_add_iterator(DSSessionId, TopicFilter, Props) ->
IteratorRefId = {DSSessionId, TopicFilter},
-spec session_add_subscription(id(), topic_filter(), _Props :: map()) ->
{ok, subscription(), _IsNew :: boolean()}.
session_add_subscription(DSSessionId, TopicFilter, Props) ->
DSSubId = {DSSessionId, TopicFilter},
transaction(fun() ->
case mnesia:read(?ITERATOR_REF_TAB, IteratorRefId, write) of
case mnesia:read(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write) of
[] ->
IteratorRef = session_insert_iterator(DSSessionId, TopicFilter, Props),
Iterator = export_record(IteratorRef),
DSSub = session_insert_subscription(DSSessionId, TopicFilter, Props),
DSSubExt = export_subscription(DSSub),
?tp(
ds_session_subscription_added,
#{iterator => Iterator, session_id => DSSessionId}
#{sub => DSSubExt, session_id => DSSessionId}
),
{ok, Iterator, _IsNew = true};
[#iterator_ref{} = IteratorRef] ->
NIteratorRef = session_update_iterator(IteratorRef, Props),
NIterator = export_record(NIteratorRef),
{ok, DSSubExt, _IsNew = true};
[#ds_sub{} = DSSub] ->
NDSSub = session_update_subscription(DSSub, Props),
NDSSubExt = export_subscription(NDSSub),
?tp(
ds_session_subscription_present,
#{iterator => NIterator, session_id => DSSessionId}
#{sub => NDSSubExt, session_id => DSSessionId}
),
{ok, NIterator, _IsNew = false}
{ok, NDSSubExt, _IsNew = false}
end
end).
session_insert_iterator(DSSessionId, TopicFilter, Props) ->
{IteratorId, StartMS} = new_iterator_id(DSSessionId),
IteratorRef = #iterator_ref{
ref_id = {DSSessionId, TopicFilter},
it_id = IteratorId,
-spec session_insert_subscription(id(), topic_filter(), map()) -> ds_sub().
session_insert_subscription(DSSessionId, TopicFilter, Props) ->
{DSSubId, StartMS} = new_subscription_id(DSSessionId, TopicFilter),
DSSub = #ds_sub{
id = DSSubId,
start_time = StartMS,
props = Props
props = Props,
extra = #{}
},
ok = mnesia:write(?ITERATOR_REF_TAB, IteratorRef, write),
IteratorRef.
ok = mnesia:write(?SESSION_SUBSCRIPTIONS_TAB, DSSub, write),
DSSub.
session_update_iterator(IteratorRef, Props) ->
NIteratorRef = IteratorRef#iterator_ref{props = Props},
ok = mnesia:write(?ITERATOR_REF_TAB, NIteratorRef, write),
NIteratorRef.
-spec session_update_subscription(ds_sub(), map()) -> ds_sub().
session_update_subscription(DSSub, Props) ->
NDSSub = DSSub#ds_sub{props = Props},
ok = mnesia:write(?SESSION_SUBSCRIPTIONS_TAB, NDSSub, write),
NDSSub.
%% @doc Called when a client unsubscribes from a topic.
-spec session_del_iterator(id(), topic_filter()) -> ok.
session_del_iterator(DSSessionId, TopicFilter) ->
IteratorRefId = {DSSessionId, TopicFilter},
session_del_subscription(DSSessionId, TopicFilter) ->
DSSubId = {DSSessionId, TopicFilter},
transaction(fun() ->
mnesia:delete(?ITERATOR_REF_TAB, IteratorRefId, write)
mnesia:delete(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write)
end).
session_del_iterator(#iterator_ref{ref_id = IteratorRefId}) ->
mnesia:delete(?ITERATOR_REF_TAB, IteratorRefId, write).
session_del_subscription(#ds_sub{id = DSSubId}) ->
mnesia:delete(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write).
session_read_iterators(DSSessionId) ->
% NOTE: somewhat convoluted way to trick dialyzer
Pat = erlang:make_tuple(record_info(size, iterator_ref), '_', [
{1, iterator_ref},
{#iterator_ref.ref_id, {DSSessionId, '_'}}
]),
mnesia:match_object(?ITERATOR_REF_TAB, Pat, read).
session_read_subscriptions(DSSessionId) ->
MS = ets:fun2ms(
fun(Sub = #ds_sub{id = {Sess, _}}) when Sess =:= DSSessionId ->
Sub
end
),
mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, read).
-spec new_iterator_id(id()) -> {iterator_id(), emqx_ds:time()}.
new_iterator_id(DSSessionId) ->
NowMS = erlang:system_time(microsecond),
IteratorId = <<DSSessionId/binary, (emqx_guid:gen())/binary>>,
{IteratorId, NowMS}.
-spec new_subscription_id(id(), topic_filter()) -> {subscription_id(), integer()}.
new_subscription_id(DSSessionId, TopicFilter) ->
%% Note: here we use _milliseconds_ to match with the timestamp
%% field of `#message' record.
NowMS = erlang:system_time(millisecond),
DSSubId = {DSSessionId, TopicFilter},
{DSSubId, NowMS}.
%%--------------------------------------------------------------------
%% RPC targets (v1)
%%--------------------------------------------------------------------
%% RPC target.
-spec do_open_iterator(emqx_types:words(), emqx_ds:time(), emqx_ds:iterator_id()) ->
{ok, emqx_ds_storage_layer:iterator()} | {error, _Reason}.
do_open_iterator(_TopicFilter, _StartMS, _IteratorID) ->
{error, not_implemented}.
%% RPC target.
-spec do_ensure_iterator_closed(emqx_ds:iterator_id()) -> ok.
do_ensure_iterator_closed(_IteratorID) ->
ok.
%% RPC target.
-spec do_ensure_all_iterators_closed(id()) -> ok.
do_ensure_all_iterators_closed(_DSSessionID) ->
ok.
%%--------------------------------------------------------------------
%% Reading batches
%%--------------------------------------------------------------------
renew_streams(Id) ->
Subscriptions = ro_transaction(fun() -> session_read_subscriptions(Id) end),
ExistingStreams = ro_transaction(fun() -> mnesia:read(?SESSION_STREAM_TAB, Id) end),
lists:foreach(
fun(#ds_sub{id = {_, TopicFilter}, start_time = StartTime}) ->
renew_streams(Id, ExistingStreams, TopicFilter, StartTime)
end,
Subscriptions
).
renew_streams(Id, ExistingStreams, TopicFilter, StartTime) ->
AllStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime),
transaction(
fun() ->
lists:foreach(
fun({Rank, Stream}) ->
Rec = #ds_stream{
session = Id,
topic_filter = TopicFilter,
stream = Stream,
rank = Rank
},
case lists:member(Rec, ExistingStreams) of
true ->
ok;
false ->
mnesia:write(?SESSION_STREAM_TAB, Rec, write),
{ok, Iterator} = emqx_ds:make_iterator(Stream, TopicFilter, StartTime),
IterRec = #ds_iter{id = {Id, Stream}, iter = Iterator},
mnesia:write(?SESSION_ITER_TAB, IterRec, write)
end
end,
AllStreams
)
end
).
%%--------------------------------------------------------------------------------
@ -649,23 +680,39 @@ transaction(Fun) ->
{atomic, Res} = mria:transaction(?DS_MRIA_SHARD, Fun),
Res.
ro_transaction(Fun) ->
{atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun),
Res.
%%--------------------------------------------------------------------------------
export_iterators(IteratorRefs) ->
export_subscriptions(DSSubs) ->
lists:foldl(
fun(IteratorRef = #iterator_ref{ref_id = {_DSSessionId, TopicFilter}}, Acc) ->
Acc#{TopicFilter => export_record(IteratorRef)}
fun(DSSub = #ds_sub{id = {_DSSessionId, TopicFilter}}, Acc) ->
Acc#{TopicFilter => export_subscription(DSSub)}
end,
#{},
IteratorRefs
DSSubs
).
export_record(#session{} = Record) ->
export_record(Record, #session.id, [id, created_at, expires_at, props], #{});
export_record(#iterator_ref{} = Record) ->
export_record(Record, #iterator_ref.it_id, [id, start_time, props], #{}).
export_session(#session{} = Record) ->
export_record(Record, #session.id, [id, created_at, expires_at, inflight, props], #{}).
export_subscription(#ds_sub{} = Record) ->
export_record(Record, #ds_sub.start_time, [start_time, props, extra], #{}).
export_record(Record, I, [Field | Rest], Acc) ->
export_record(Record, I + 1, Rest, Acc#{Field => element(I, Record)});
export_record(_, _, [], Acc) ->
Acc.
%% TODO: find a more reliable way to perform actions that have side
%% effects. Add `CBM:init' callback to the session behavior?
ensure_timers() ->
ensure_timer(pull),
ensure_timer(get_streams).
-spec ensure_timer(pull | get_streams) -> ok.
ensure_timer(Type) ->
_ = emqx_utils:start_timer(100, {emqx_session, Type}),
ok.

View File

@ -0,0 +1,56 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-ifndef(EMQX_PERSISTENT_SESSION_DS_HRL_HRL).
-define(EMQX_PERSISTENT_SESSION_DS_HRL_HRL, true).
-define(SESSION_TAB, emqx_ds_session).
-define(SESSION_SUBSCRIPTIONS_TAB, emqx_ds_session_subscriptions).
-define(SESSION_STREAM_TAB, emqx_ds_stream_tab).
-define(SESSION_ITER_TAB, emqx_ds_iter_tab).
-define(DS_MRIA_SHARD, emqx_ds_session_shard).
-record(ds_sub, {
id :: emqx_persistent_session_ds:subscription_id(),
start_time :: emqx_ds:time(),
props = #{} :: map(),
extra = #{} :: map()
}).
-type ds_sub() :: #ds_sub{}.
-record(ds_stream, {
session :: emqx_persistent_session_ds:id(),
topic_filter :: emqx_ds:topic_filter(),
stream :: emqx_ds:stream(),
rank :: emqx_ds:stream_rank()
}).
-record(ds_iter, {
id :: {emqx_persistent_session_ds:id(), emqx_ds:stream()},
iter :: emqx_ds:iterator()
}).
-record(session, {
%% same as clientid
id :: emqx_persistent_session_ds:id(),
%% creation time
created_at :: _Millisecond :: non_neg_integer(),
expires_at = never :: _Millisecond :: non_neg_integer() | never,
inflight :: emqx_persistent_message_ds_replayer:inflight(),
%% for future usage
props = #{} :: map()
}).
-endif.

View File

@ -169,7 +169,11 @@
-export([namespace/0, roots/0, roots/1, fields/1, desc/1, tags/0]).
-export([conf_get/2, conf_get/3, keys/2, filter/1]).
-export([
server_ssl_opts_schema/2, client_ssl_opts_schema/1, ciphers_schema/1, tls_versions_schema/1
server_ssl_opts_schema/2,
client_ssl_opts_schema/1,
ciphers_schema/1,
tls_versions_schema/1,
description_schema/0
]).
-export([password_converter/2, bin_str_converter/2]).
-export([authz_fields/0]).
@ -3649,3 +3653,14 @@ default_mem_check_interval() ->
true -> <<"60s">>;
false -> disabled
end.
description_schema() ->
sc(
string(),
#{
default => <<"">>,
desc => ?DESC(description),
required => false,
importance => ?IMPORTANCE_LOW
}
).

View File

@ -20,6 +20,7 @@
-export([
introduced_in/0,
deprecated_since/0,
open_iterator/4,
close_iterator/2,
@ -31,9 +32,11 @@
-define(TIMEOUT, 30_000).
introduced_in() ->
%% FIXME
"5.3.0".
deprecated_since() ->
"5.4.0".
-spec open_iterator(
[node()],
emqx_types:words(),

View File

@ -22,6 +22,8 @@
-export([
all/1,
matrix_to_groups/2,
group_path/1,
init_per_testcase/3,
end_per_testcase/3,
boot_modules/1,
@ -1375,3 +1377,83 @@ select_free_port(GenModule, Fun) when
end,
ct:pal("Select free OS port: ~p", [Port]),
Port.
%% Generate ct sub-groups from test-case's 'matrix' clause
%% NOTE: the test cases must have a root group name which
%% is unkonwn to this API.
%%
%% e.g.
%% all() -> [{group, g1}].
%%
%% groups() ->
%% emqx_common_test_helpers:groups(?MODULE, [case1, case2]).
%%
%% case1(matrix) ->
%% {g1, [[tcp, no_auth],
%% [ssl, no_auth],
%% [ssl, basic_auth]
%% ]};
%%
%% case2(matrix) ->
%% {g1, ...}
%% ...
%%
%% Return:
%%
%% [{g1, [],
%% [ {tcp, [], [{no_auth, [], [case1, case2]}
%% ]},
%% {ssl, [], [{no_auth, [], [case1, case2]},
%% {basic_auth, [], [case1, case2]}
%% ]}
%% ]
%% }
%% ]
matrix_to_groups(Module, Cases) ->
lists:foldr(
fun(Case, Acc) ->
add_case_matrix(Module, Case, Acc)
end,
[],
Cases
).
add_case_matrix(Module, Case, Acc0) ->
{RootGroup, Matrix} = Module:Case(matrix),
lists:foldr(
fun(Row, Acc) ->
add_group([RootGroup | Row], Acc, Case)
end,
Acc0,
Matrix
).
add_group([], Acc, Case) ->
case lists:member(Case, Acc) of
true ->
Acc;
false ->
[Case | Acc]
end;
add_group([Name | More], Acc, Cases) ->
case lists:keyfind(Name, 1, Acc) of
false ->
[{Name, [], add_group(More, [], Cases)} | Acc];
{Name, [], SubGroup} ->
New = {Name, [], add_group(More, SubGroup, Cases)},
lists:keystore(Name, 1, Acc, New)
end.
group_path(Config) ->
try
Current = proplists:get_value(tc_group_properties, Config),
NameF = fun(Props) ->
{name, Name} = lists:keyfind(name, 1, Props),
Name
end,
Stack = proplists:get_value(tc_group_path, Config),
lists:reverse(lists:map(NameF, [Current | Stack]))
catch
_:_ ->
[]
end.

View File

@ -31,6 +31,7 @@
]).
-define(DEFAULT_APP_ID, <<"default_appid">>).
-define(DEFAULT_APP_KEY, <<"default_app_key">>).
-define(DEFAULT_APP_SECRET, <<"default_app_secret">>).
%% from emqx_dashboard/include/emqx_dashboard_rbac.hrl
@ -63,7 +64,7 @@ request_api(Method, Url, QueryParams, Auth, Body, HttpOpts) ->
do_request_api(Method, Request, HttpOpts).
do_request_api(Method, Request, HttpOpts) ->
ct:pal("Method: ~p, Request: ~p", [Method, Request]),
% ct:pal("Method: ~p, Request: ~p", [Method, Request]),
case httpc:request(Method, Request, HttpOpts, [{body_format, binary}]) of
{error, socket_closed_remotely} ->
{error, socket_closed_remotely};
@ -94,6 +95,7 @@ create_default_app() ->
ExpiredAt = Now + timer:minutes(10),
emqx_mgmt_auth:create(
?DEFAULT_APP_ID,
?DEFAULT_APP_KEY,
?DEFAULT_APP_SECRET,
true,
ExpiredAt,

View File

@ -941,10 +941,13 @@ t_revoked(Config) ->
{port, 8883}
]),
unlink(C),
?assertMatch(
{error, {ssl_error, _Sock, {tls_alert, {certificate_revoked, _}}}}, emqtt:connect(C)
),
ok.
case emqtt:connect(C) of
{error, {ssl_error, _Sock, {tls_alert, {certificate_revoked, _}}}} ->
ok;
{error, closed} ->
%% this happens due to an unidentified race-condition
ok
end.
t_revoke_then_refresh(Config) ->
DataDir = ?config(data_dir, Config),

View File

@ -26,9 +26,7 @@
-import(emqx_common_test_helpers, [on_exit/1]).
-define(DEFAULT_KEYSPACE, default).
-define(DS_SHARD_ID, <<"local">>).
-define(DS_SHARD, {?DEFAULT_KEYSPACE, ?DS_SHARD_ID}).
-define(PERSISTENT_MESSAGE_DB, emqx_persistent_message).
all() ->
emqx_common_test_helpers:all(?MODULE).
@ -48,6 +46,7 @@ init_per_testcase(t_session_subscription_iterators = TestCase, Config) ->
Nodes = emqx_cth_cluster:start(Cluster, #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}),
[{nodes, Nodes} | Config];
init_per_testcase(TestCase, Config) ->
ok = emqx_ds:drop_db(?PERSISTENT_MESSAGE_DB),
Apps = emqx_cth_suite:start(
app_specs(),
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
@ -58,10 +57,11 @@ end_per_testcase(t_session_subscription_iterators, Config) ->
Nodes = ?config(nodes, Config),
emqx_common_test_helpers:call_janitor(60_000),
ok = emqx_cth_cluster:stop(Nodes),
ok;
end_per_testcase(common, Config);
end_per_testcase(_TestCase, Config) ->
Apps = ?config(apps, Config),
Apps = proplists:get_value(apps, Config, []),
emqx_common_test_helpers:call_janitor(60_000),
clear_db(),
emqx_cth_suite:stop(Apps),
ok.
@ -95,14 +95,15 @@ t_messages_persisted(_Config) ->
Results = [emqtt:publish(CP, Topic, Payload, 1) || {Topic, Payload} <- Messages],
ct:pal("Results = ~p", [Results]),
timer:sleep(2000),
Persisted = consume(?DS_SHARD, {['#'], 0}),
Persisted = consume(['#'], 0),
ct:pal("Persisted = ~p", [Persisted]),
?assertEqual(
[M1, M2, M5, M7, M9, M10],
[{emqx_message:topic(M), emqx_message:payload(M)} || M <- Persisted]
lists:sort([M1, M2, M5, M7, M9, M10]),
lists:sort([{emqx_message:topic(M), emqx_message:payload(M)} || M <- Persisted])
),
ok.
@ -139,23 +140,25 @@ t_messages_persisted_2(_Config) ->
{ok, #{reason_code := ?RC_NO_MATCHING_SUBSCRIBERS}} =
emqtt:publish(CP, T(<<"client/2/topic">>), <<"8">>, 1),
Persisted = consume(?DS_SHARD, {['#'], 0}),
timer:sleep(2000),
Persisted = consume(['#'], 0),
ct:pal("Persisted = ~p", [Persisted]),
?assertEqual(
[
lists:sort([
{T(<<"client/1/topic">>), <<"4">>},
{T(<<"client/2/topic">>), <<"5">>}
],
[{emqx_message:topic(M), emqx_message:payload(M)} || M <- Persisted]
]),
lists:sort([{emqx_message:topic(M), emqx_message:payload(M)} || M <- Persisted])
),
ok.
%% TODO: test quic and ws too
t_session_subscription_iterators(Config) ->
[Node1, Node2] = ?config(nodes, Config),
[Node1, _Node2] = ?config(nodes, Config),
Port = get_mqtt_port(Node1, tcp),
Topic = <<"t/topic">>,
SubTopicFilter = <<"t/+">>,
@ -202,11 +205,8 @@ t_session_subscription_iterators(Config) ->
messages => [Message1, Message2, Message3, Message4]
}
end,
fun(Results, Trace) ->
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
#{
messages := [_Message1, Message2, Message3 | _]
} = Results,
case ?of_kind(ds_session_subscription_added, Trace) of
[] ->
%% Since `emqx_durable_storage' is a dependency of `emqx', it gets
@ -228,17 +228,6 @@ t_session_subscription_iterators(Config) ->
),
ok
end,
?assertMatch({ok, [_]}, get_all_iterator_ids(Node1)),
{ok, [IteratorId]} = get_all_iterator_ids(Node1),
?assertMatch({ok, [IteratorId]}, get_all_iterator_ids(Node2)),
ReplayMessages1 = erpc:call(Node1, fun() -> consume(?DS_SHARD, IteratorId) end),
ExpectedMessages = [Message2, Message3],
%% Note: it is expected that this will break after replayers are in place.
%% They might have consumed all the messages by this time.
?assertEqual(ExpectedMessages, ReplayMessages1),
%% Different DS shard
ReplayMessages2 = erpc:call(Node2, fun() -> consume(?DS_SHARD, IteratorId) end),
?assertEqual([], ReplayMessages2),
ok
end
),
@ -263,33 +252,26 @@ connect(Opts0 = #{}) ->
{ok, _} = emqtt:connect(Client),
Client.
consume(Shard, Replay = {_TopicFiler, _StartMS}) ->
{ok, It} = emqx_ds_storage_layer:make_iterator(Shard, Replay),
consume(It);
consume(Shard, IteratorId) when is_binary(IteratorId) ->
{ok, It} = emqx_ds_storage_layer:restore_iterator(Shard, IteratorId),
consume(It).
consume(TopicFilter, StartMS) ->
Streams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartMS),
lists:flatmap(
fun({_Rank, Stream}) ->
{ok, It} = emqx_ds:make_iterator(Stream, TopicFilter, StartMS),
consume(It)
end,
Streams
).
consume(It) ->
case emqx_ds_storage_layer:next(It) of
{value, Msg, NIt} ->
[emqx_persistent_message:deserialize(Msg) | consume(NIt)];
none ->
case emqx_ds:next(It, 100) of
{ok, _NIt, _Msgs = []} ->
[];
{ok, NIt, Msgs} ->
Msgs ++ consume(NIt);
{ok, end_of_stream} ->
[]
end.
delete_all_messages() ->
Persisted = consume(?DS_SHARD, {['#'], 0}),
lists:foreach(
fun(Msg) ->
GUID = emqx_message:id(Msg),
Topic = emqx_topic:words(emqx_message:topic(Msg)),
Timestamp = emqx_guid:timestamp(GUID),
ok = emqx_ds_storage_layer:delete(?DS_SHARD, GUID, Timestamp, Topic)
end,
Persisted
).
receive_messages(Count) ->
receive_messages(Count, []).
@ -306,13 +288,6 @@ receive_messages(Count, Msgs) ->
publish(Node, Message) ->
erpc:call(Node, emqx, publish, [Message]).
get_iterator_ids(Node, ClientId) ->
Channel = erpc:call(Node, fun() ->
[ConnPid] = emqx_cm:lookup_channels(ClientId),
sys:get_state(ConnPid)
end),
emqx_connection:info({channel, {session, iterators}}, Channel).
app_specs() ->
[
emqx_durable_storage,
@ -330,5 +305,6 @@ get_mqtt_port(Node, Type) ->
{_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]),
Port.
get_all_iterator_ids(Node) ->
erpc:call(Node, emqx_ds_storage_layer, list_iterator_prefix, [?DS_SHARD, <<>>]).
clear_db() ->
ok = emqx_ds:drop_db(?PERSISTENT_MESSAGE_DB),
ok.

View File

@ -24,6 +24,8 @@
-compile(export_all).
-compile(nowarn_export_all).
-define(PERSISTENT_MESSAGE_DB, emqx_persistent_message).
%%--------------------------------------------------------------------
%% SUITE boilerplate
%%--------------------------------------------------------------------
@ -131,6 +133,7 @@ get_listener_port(Type, Name) ->
end_per_group(Group, Config) when Group == tcp; Group == ws; Group == quic ->
ok = emqx_cth_suite:stop(?config(group_apps, Config));
end_per_group(_, _Config) ->
ok = emqx_ds:drop_db(?PERSISTENT_MESSAGE_DB),
ok.
init_per_testcase(TestCase, Config) ->
@ -188,7 +191,7 @@ receive_messages(Count, Msgs) ->
receive_messages(Count - 1, [Msg | Msgs]);
_Other ->
receive_messages(Count, Msgs)
after 5000 ->
after 15000 ->
Msgs
end.
@ -227,11 +230,11 @@ wait_for_cm_unregister(ClientId, N) ->
end.
publish(Topic, Payloads) ->
publish(Topic, Payloads, false).
publish(Topic, Payloads, false, 2).
publish(Topic, Payloads, WaitForUnregister) ->
publish(Topic, Payloads, WaitForUnregister, QoS) ->
Fun = fun(Client, Payload) ->
{ok, _} = emqtt:publish(Client, Topic, Payload, 2)
{ok, _} = emqtt:publish(Client, Topic, Payload, QoS)
end,
do_publish(Payloads, Fun, WaitForUnregister).
@ -510,6 +513,48 @@ t_process_dies_session_expires(Config) ->
emqtt:disconnect(Client2).
t_publish_while_client_is_gone_qos1(Config) ->
%% A persistent session should receive messages in its
%% subscription even if the process owning the session dies.
ConnFun = ?config(conn_fun, Config),
Topic = ?config(topic, Config),
STopic = ?config(stopic, Config),
Payload1 = <<"hello1">>,
Payload2 = <<"hello2">>,
ClientId = ?config(client_id, Config),
{ok, Client1} = emqtt:start_link([
{proto_ver, v5},
{clientid, ClientId},
{properties, #{'Session-Expiry-Interval' => 30}},
{clean_start, true}
| Config
]),
{ok, _} = emqtt:ConnFun(Client1),
{ok, _, [1]} = emqtt:subscribe(Client1, STopic, qos1),
ok = emqtt:disconnect(Client1),
maybe_kill_connection_process(ClientId, Config),
ok = publish(Topic, [Payload1, Payload2], false, 1),
{ok, Client2} = emqtt:start_link([
{proto_ver, v5},
{clientid, ClientId},
{properties, #{'Session-Expiry-Interval' => 30}},
{clean_start, false}
| Config
]),
{ok, _} = emqtt:ConnFun(Client2),
Msgs = receive_messages(2),
?assertMatch([_, _], Msgs),
[Msg2, Msg1] = Msgs,
?assertEqual({ok, iolist_to_binary(Payload1)}, maps:find(payload, Msg1)),
?assertEqual({ok, 1}, maps:find(qos, Msg1)),
?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg2)),
?assertEqual({ok, 1}, maps:find(qos, Msg2)),
ok = emqtt:disconnect(Client2).
t_publish_while_client_is_gone(init, Config) -> skip_ds_tc(Config);
t_publish_while_client_is_gone('end', _Config) -> ok.
t_publish_while_client_is_gone(Config) ->

View File

@ -92,7 +92,7 @@ fields(pbkdf2) ->
)},
{iterations,
sc(
integer(),
pos_integer(),
#{required => true, desc => "Iteration count for PBKDF2 hashing algorithm."}
)},
{dk_length, fun dk_length/1}

View File

@ -18,6 +18,7 @@
-include_lib("emqx/include/emqx_placeholder.hrl").
-include_lib("emqx_authn.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-export([
create_resource/3,
@ -44,13 +45,13 @@
default_headers_no_content_type/0
]).
-define(AUTHN_PLACEHOLDERS, [
?PH_USERNAME,
?PH_CLIENTID,
?PH_PASSWORD,
?PH_PEERHOST,
?PH_CERT_SUBJECT,
?PH_CERT_CN_NAME
-define(ALLOWED_VARS, [
?VAR_USERNAME,
?VAR_CLIENTID,
?VAR_PASSWORD,
?VAR_PEERHOST,
?VAR_CERT_SUBJECT,
?VAR_CERT_CN_NAME
]).
-define(DEFAULT_RESOURCE_OPTS, #{
@ -107,48 +108,96 @@ check_password_from_selected_map(Algorithm, Selected, Password) ->
end.
parse_deep(Template) ->
emqx_placeholder:preproc_tmpl_deep(Template, #{placeholders => ?AUTHN_PLACEHOLDERS}).
Result = emqx_template:parse_deep(Template),
handle_disallowed_placeholders(Result, {deep, Template}).
parse_str(Template) ->
emqx_placeholder:preproc_tmpl(Template, #{placeholders => ?AUTHN_PLACEHOLDERS}).
Result = emqx_template:parse(Template),
handle_disallowed_placeholders(Result, {string, Template}).
parse_sql(Template, ReplaceWith) ->
emqx_placeholder:preproc_sql(
{Statement, Result} = emqx_template_sql:parse_prepstmt(
Template,
#{
replace_with => ReplaceWith,
placeholders => ?AUTHN_PLACEHOLDERS,
strip_double_quote => true
}
).
#{parameters => ReplaceWith, strip_double_quote => true}
),
{Statement, handle_disallowed_placeholders(Result, {string, Template})}.
handle_disallowed_placeholders(Template, Source) ->
case emqx_template:validate(?ALLOWED_VARS, Template) of
ok ->
Template;
{error, Disallowed} ->
?tp(warning, "authn_template_invalid", #{
template => Source,
reason => Disallowed,
allowed => #{placeholders => ?ALLOWED_VARS},
notice =>
"Disallowed placeholders will be rendered as is."
" However, consider using `${$}` escaping for literal `$` where"
" needed to avoid unexpected results."
}),
Result = prerender_disallowed_placeholders(Template),
case Source of
{string, _} ->
emqx_template:parse(Result);
{deep, _} ->
emqx_template:parse_deep(Result)
end
end.
prerender_disallowed_placeholders(Template) ->
{Result, _} = emqx_template:render(Template, #{}, #{
var_trans => fun(Name, _) ->
% NOTE
% Rendering disallowed placeholders in escaped form, which will then
% parse as a literal string.
case lists:member(Name, ?ALLOWED_VARS) of
true -> "${" ++ Name ++ "}";
false -> "${$}{" ++ Name ++ "}"
end
end
}),
Result.
render_deep(Template, Credential) ->
emqx_placeholder:proc_tmpl_deep(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{Term, _Errors} = emqx_template:render(
Template,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun handle_var/2}
).
#{var_trans => fun to_string/2}
),
Term.
render_str(Template, Credential) ->
emqx_placeholder:proc_tmpl(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{String, _Errors} = emqx_template:render(
Template,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun handle_var/2}
).
#{var_trans => fun to_string/2}
),
unicode:characters_to_binary(String).
render_urlencoded_str(Template, Credential) ->
emqx_placeholder:proc_tmpl(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{String, _Errors} = emqx_template:render(
Template,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
#{var_trans => fun to_urlencoded_string/2}
),
unicode:characters_to_binary(String).
render_sql_params(ParamList, Credential) ->
emqx_placeholder:proc_tmpl(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{Row, _Errors} = emqx_template:render(
ParamList,
mapping_credential(Credential),
#{return => rawlist, var_trans => fun handle_sql_var/2}
).
#{var_trans => fun to_sql_valaue/2}
),
Row.
is_superuser(#{<<"is_superuser">> := Value}) ->
#{is_superuser => to_bool(Value)};
@ -269,22 +318,24 @@ without_password(Credential, [Name | Rest]) ->
without_password(Credential, Rest)
end.
urlencode_var(Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value)).
to_urlencoded_string(Name, Value) ->
emqx_http_lib:uri_encode(to_string(Name, Value)).
handle_var(_Name, undefined) ->
<<>>;
handle_var([<<"peerhost">>], PeerHost) ->
emqx_placeholder:bin(inet:ntoa(PeerHost));
handle_var(_, Value) ->
emqx_placeholder:bin(Value).
to_string(Name, Value) ->
emqx_template:to_string(render_var(Name, Value)).
handle_sql_var(_Name, undefined) ->
to_sql_valaue(Name, Value) ->
emqx_utils_sql:to_sql_value(render_var(Name, Value)).
render_var(_, undefined) ->
% NOTE
% Any allowed but undefined binding will be replaced with empty string, even when
% rendering SQL values.
<<>>;
handle_sql_var([<<"peerhost">>], PeerHost) ->
emqx_placeholder:bin(inet:ntoa(PeerHost));
handle_sql_var(_, Value) ->
emqx_placeholder:sql_data(Value).
render_var(?VAR_PEERHOST, Value) ->
inet:ntoa(Value);
render_var(_Name, Value) ->
Value.
mapping_credential(C = #{cn := CN, dn := DN}) ->
C#{cert_common_name => CN, cert_subject => DN};

View File

@ -49,6 +49,8 @@
aggregate_metrics/1
]).
-export([with_source/2]).
-define(TAGS, [<<"Authorization">>]).
api_spec() ->

View File

@ -183,19 +183,14 @@ compile_topic(<<"eq ", Topic/binary>>) ->
compile_topic({eq, Topic}) ->
{eq, emqx_topic:words(bin(Topic))};
compile_topic(Topic) ->
TopicBin = bin(Topic),
case
emqx_placeholder:preproc_tmpl(
TopicBin,
#{placeholders => [?PH_USERNAME, ?PH_CLIENTID]}
)
of
[{str, _}] -> emqx_topic:words(TopicBin);
Tokens -> {pattern, Tokens}
Template = emqx_authz_utils:parse_str(Topic, [?VAR_USERNAME, ?VAR_CLIENTID]),
case emqx_template:is_const(Template) of
true -> emqx_topic:words(bin(Topic));
false -> {pattern, Template}
end.
bin(L) when is_list(L) ->
list_to_binary(L);
unicode:characters_to_binary(L);
bin(B) when is_binary(B) ->
B.
@ -307,7 +302,7 @@ match_who(_, _) ->
match_topics(_ClientInfo, _Topic, []) ->
false;
match_topics(ClientInfo, Topic, [{pattern, PatternFilter} | Filters]) ->
TopicFilter = emqx_placeholder:proc_tmpl(PatternFilter, ClientInfo),
TopicFilter = bin(emqx_template:render_strict(PatternFilter, ClientInfo)),
match_topic(emqx_topic:words(Topic), emqx_topic:words(TopicFilter)) orelse
match_topics(ClientInfo, Topic, Filters);
match_topics(ClientInfo, Topic, [TopicFilter | Filters]) ->

View File

@ -16,7 +16,9 @@
-module(emqx_authz_utils).
-include_lib("emqx/include/emqx_placeholder.hrl").
-include_lib("emqx_authz.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-export([
cleanup_resources/0,
@ -108,48 +110,97 @@ update_config(Path, ConfigRequest) ->
}).
parse_deep(Template, PlaceHolders) ->
emqx_placeholder:preproc_tmpl_deep(Template, #{placeholders => PlaceHolders}).
Result = emqx_template:parse_deep(Template),
handle_disallowed_placeholders(Result, {deep, Template}, PlaceHolders).
parse_str(Template, PlaceHolders) ->
emqx_placeholder:preproc_tmpl(Template, #{placeholders => PlaceHolders}).
Result = emqx_template:parse(Template),
handle_disallowed_placeholders(Result, {string, Template}, PlaceHolders).
parse_sql(Template, ReplaceWith, PlaceHolders) ->
emqx_placeholder:preproc_sql(
{Statement, Result} = emqx_template_sql:parse_prepstmt(
Template,
#{
replace_with => ReplaceWith,
placeholders => PlaceHolders,
strip_double_quote => true
}
).
#{parameters => ReplaceWith, strip_double_quote => true}
),
FResult = handle_disallowed_placeholders(Result, {string, Template}, PlaceHolders),
{Statement, FResult}.
handle_disallowed_placeholders(Template, Source, Allowed) ->
case emqx_template:validate(Allowed, Template) of
ok ->
Template;
{error, Disallowed} ->
?tp(warning, "authz_template_invalid", #{
template => Source,
reason => Disallowed,
allowed => #{placeholders => Allowed},
notice =>
"Disallowed placeholders will be rendered as is."
" However, consider using `${$}` escaping for literal `$` where"
" needed to avoid unexpected results."
}),
Result = prerender_disallowed_placeholders(Template, Allowed),
case Source of
{string, _} ->
emqx_template:parse(Result);
{deep, _} ->
emqx_template:parse_deep(Result)
end
end.
prerender_disallowed_placeholders(Template, Allowed) ->
{Result, _} = emqx_template:render(Template, #{}, #{
var_trans => fun(Name, _) ->
% NOTE
% Rendering disallowed placeholders in escaped form, which will then
% parse as a literal string.
case lists:member(Name, Allowed) of
true -> "${" ++ Name ++ "}";
false -> "${$}{" ++ Name ++ "}"
end
end
}),
Result.
render_deep(Template, Values) ->
emqx_placeholder:proc_tmpl_deep(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{Term, _Errors} = emqx_template:render(
Template,
client_vars(Values),
#{return => full_binary, var_trans => fun handle_var/2}
).
#{var_trans => fun to_string/2}
),
Term.
render_str(Template, Values) ->
emqx_placeholder:proc_tmpl(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{String, _Errors} = emqx_template:render(
Template,
client_vars(Values),
#{return => full_binary, var_trans => fun handle_var/2}
).
#{var_trans => fun to_string/2}
),
unicode:characters_to_binary(String).
render_urlencoded_str(Template, Values) ->
emqx_placeholder:proc_tmpl(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{String, _Errors} = emqx_template:render(
Template,
client_vars(Values),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
#{var_trans => fun to_urlencoded_string/2}
),
unicode:characters_to_binary(String).
render_sql_params(ParamList, Values) ->
emqx_placeholder:proc_tmpl(
% NOTE
% Ignoring errors here, undefined bindings will be replaced with empty string.
{Row, _Errors} = emqx_template:render(
ParamList,
client_vars(Values),
#{return => rawlist, var_trans => fun handle_sql_var/2}
).
#{var_trans => fun to_sql_value/2}
),
Row.
-spec parse_http_resp_body(binary(), binary()) -> allow | deny | ignore | error.
parse_http_resp_body(<<"application/x-www-form-urlencoded", _/binary>>, Body) ->
@ -215,22 +266,24 @@ convert_client_var({dn, DN}) -> {cert_subject, DN};
convert_client_var({protocol, Proto}) -> {proto_name, Proto};
convert_client_var(Other) -> Other.
urlencode_var(Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value)).
to_urlencoded_string(Name, Value) ->
emqx_http_lib:uri_encode(to_string(Name, Value)).
handle_var(_Name, undefined) ->
<<>>;
handle_var([<<"peerhost">>], IpAddr) ->
inet_parse:ntoa(IpAddr);
handle_var(_Name, Value) ->
emqx_placeholder:bin(Value).
to_string(Name, Value) ->
emqx_template:to_string(render_var(Name, Value)).
handle_sql_var(_Name, undefined) ->
to_sql_value(Name, Value) ->
emqx_utils_sql:to_sql_value(render_var(Name, Value)).
render_var(_, undefined) ->
% NOTE
% Any allowed but undefined binding will be replaced with empty string, even when
% rendering SQL values.
<<>>;
handle_sql_var([<<"peerhost">>], IpAddr) ->
inet_parse:ntoa(IpAddr);
handle_sql_var(_Name, Value) ->
emqx_placeholder:sql_data(Value).
render_var(?VAR_PEERHOST, Value) ->
inet:ntoa(Value);
render_var(_Name, Value) ->
Value.
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
bin(L) when is_list(L) -> list_to_binary(L);

View File

@ -185,3 +185,29 @@ hash_examples() ->
}
}
].
t_pbkdf2_schema(_Config) ->
Config = fun(Iterations) ->
#{
<<"pbkdf2">> => #{
<<"name">> => <<"pbkdf2">>,
<<"mac_fun">> => <<"sha">>,
<<"iterations">> => Iterations
}
}
end,
?assertException(
throw,
{emqx_authn_password_hashing, _},
hocon_tconf:check_plain(emqx_authn_password_hashing, Config(0), #{}, [pbkdf2])
),
?assertException(
throw,
{emqx_authn_password_hashing, _},
hocon_tconf:check_plain(emqx_authn_password_hashing, Config(-1), #{}, [pbkdf2])
),
?assertMatch(
#{<<"pbkdf2">> := _},
hocon_tconf:check_plain(emqx_authn_password_hashing, Config(1), #{}, [pbkdf2])
).

View File

@ -67,6 +67,10 @@ set_special_configs(_App) ->
ok.
t_compile(_) ->
% NOTE
% Some of the following testcase are relying on the internal representation of
% `emqx_template:t()`. If the internal representation is changed, these testcases
% may fail.
?assertEqual({deny, all, all, [['#']]}, emqx_authz_rule:compile({deny, all})),
?assertEqual(
@ -74,13 +78,13 @@ t_compile(_) ->
emqx_authz_rule:compile({allow, {ipaddr, "127.0.0.1"}, all, [{eq, "#"}, {eq, "+"}]})
),
?assertEqual(
?assertMatch(
{allow,
{ipaddrs, [
{{127, 0, 0, 1}, {127, 0, 0, 1}, 32},
{{192, 168, 1, 0}, {192, 168, 1, 255}, 24}
]},
subscribe, [{pattern, [{var, [<<"clientid">>]}]}]},
subscribe, [{pattern, [{var, "clientid", [_]}]}]},
emqx_authz_rule:compile(
{allow, {ipaddrs, ["127.0.0.1", "192.168.1.0/24"]}, subscribe, [?PH_S_CLIENTID]}
)
@ -102,7 +106,7 @@ t_compile(_) ->
{clientid, {re_pattern, _, _, _, _}}
]},
publish, [
{pattern, [{var, [<<"username">>]}]}, {pattern, [{var, [<<"clientid">>]}]}
{pattern, [{var, "username", [_]}]}, {pattern, [{var, "clientid", [_]}]}
]},
emqx_authz_rule:compile(
{allow,
@ -114,9 +118,9 @@ t_compile(_) ->
)
),
?assertEqual(
?assertMatch(
{allow, {username, {eq, <<"test">>}}, publish, [
{pattern, [{str, <<"t/foo">>}, {var, [<<"username">>]}, {str, <<"boo">>}]}
{pattern, [<<"t/foo">>, {var, "username", [_]}, <<"boo">>]}
]},
emqx_authz_rule:compile({allow, {username, "test"}, publish, ["t/foo${username}boo"]})
),

View File

@ -38,21 +38,21 @@
-compile(nowarn_export_all).
-endif.
-define(PLACEHOLDERS, [
?PH_USERNAME,
?PH_CLIENTID,
?PH_PEERHOST,
?PH_PROTONAME,
?PH_MOUNTPOINT,
?PH_TOPIC,
?PH_ACTION,
?PH_CERT_SUBJECT,
?PH_CERT_CN_NAME
-define(ALLOWED_VARS, [
?VAR_USERNAME,
?VAR_CLIENTID,
?VAR_PEERHOST,
?VAR_PROTONAME,
?VAR_MOUNTPOINT,
?VAR_TOPIC,
?VAR_ACTION,
?VAR_CERT_SUBJECT,
?VAR_CERT_CN_NAME
]).
-define(PLACEHOLDERS_FOR_RICH_ACTIONS, [
?PH_QOS,
?PH_RETAIN
-define(ALLOWED_VARS_RICH_ACTIONS, [
?VAR_QOS,
?VAR_RETAIN
]).
description() ->
@ -157,14 +157,14 @@ parse_config(
method => Method,
base_url => BaseUrl,
headers => Headers,
base_path_templete => emqx_authz_utils:parse_str(Path, placeholders()),
base_path_templete => emqx_authz_utils:parse_str(Path, allowed_vars()),
base_query_template => emqx_authz_utils:parse_deep(
cow_qs:parse_qs(to_bin(Query)),
placeholders()
allowed_vars()
),
body_template => emqx_authz_utils:parse_deep(
maps:to_list(maps:get(body, Conf, #{})),
placeholders()
allowed_vars()
),
request_timeout => ReqTimeout,
%% pool_type default value `random`
@ -260,10 +260,10 @@ to_bin(B) when is_binary(B) -> B;
to_bin(L) when is_list(L) -> list_to_binary(L);
to_bin(X) -> X.
placeholders() ->
placeholders(emqx_authz:feature_available(rich_actions)).
allowed_vars() ->
allowed_vars(emqx_authz:feature_available(rich_actions)).
placeholders(true) ->
?PLACEHOLDERS ++ ?PLACEHOLDERS_FOR_RICH_ACTIONS;
placeholders(false) ->
?PLACEHOLDERS.
allowed_vars(true) ->
?ALLOWED_VARS ++ ?ALLOWED_VARS_RICH_ACTIONS;
allowed_vars(false) ->
?ALLOWED_VARS.

View File

@ -27,7 +27,7 @@
-define(PATH, [?CONF_NS_ATOM]).
-define(HTTP_PORT, 32333).
-define(HTTP_PATH, "/auth").
-define(HTTP_PATH, "/auth/[...]").
-define(CREDENTIALS, #{
clientid => <<"clienta">>,
username => <<"plain">>,
@ -146,8 +146,12 @@ t_authenticate(_Config) ->
test_user_auth(#{
handler := Handler,
config_params := SpecificConfgParams,
result := Result
result := Expect
}) ->
Result = perform_user_auth(SpecificConfgParams, Handler, ?CREDENTIALS),
?assertEqual(Expect, Result).
perform_user_auth(SpecificConfgParams, Handler, Credentials) ->
AuthConfig = maps:merge(raw_http_auth_config(), SpecificConfgParams),
{ok, _} = emqx:update_config(
@ -157,21 +161,21 @@ test_user_auth(#{
ok = emqx_authn_http_test_server:set_handler(Handler),
?assertEqual(Result, emqx_access_control:authenticate(?CREDENTIALS)),
Result = emqx_access_control:authenticate(Credentials),
emqx_authn_test_lib:delete_authenticators(
[authentication],
?GLOBAL
).
),
Result.
t_authenticate_path_placeholders(_Config) ->
ok = emqx_authn_http_test_server:stop(),
{ok, _} = emqx_authn_http_test_server:start_link(?HTTP_PORT, <<"/[...]">>),
ok = emqx_authn_http_test_server:set_handler(
fun(Req0, State) ->
Req =
case cowboy_req:path(Req0) of
<<"/my/p%20ath//us%20er/auth//">> ->
<<"/auth/p%20ath//us%20er/auth//">> ->
cowboy_req:reply(
200,
#{<<"content-type">> => <<"application/json">>},
@ -193,7 +197,7 @@ t_authenticate_path_placeholders(_Config) ->
AuthConfig = maps:merge(
raw_http_auth_config(),
#{
<<"url">> => <<"http://127.0.0.1:32333/my/p%20ath//${username}/auth//">>,
<<"url">> => <<"http://127.0.0.1:32333/auth/p%20ath//${username}/auth//">>,
<<"body">> => #{}
}
),
@ -255,6 +259,39 @@ t_no_value_for_placeholder(_Config) ->
?GLOBAL
).
t_disallowed_placeholders_preserved(_Config) ->
Config = #{
<<"method">> => <<"post">>,
<<"headers">> => #{<<"content-type">> => <<"application/json">>},
<<"body">> => #{
<<"username">> => ?PH_USERNAME,
<<"password">> => ?PH_PASSWORD,
<<"this">> => <<"${whatisthis}">>
}
},
Handler = fun(Req0, State) ->
{ok, Body, Req1} = cowboy_req:read_body(Req0),
#{
<<"username">> := <<"plain">>,
<<"password">> := <<"plain">>,
<<"this">> := <<"${whatisthis}">>
} = emqx_utils_json:decode(Body),
Req = cowboy_req:reply(
200,
#{<<"content-type">> => <<"application/json">>},
emqx_utils_json:encode(#{result => allow, is_superuser => false}),
Req1
),
{ok, Req, State}
end,
?assertMatch({ok, _}, perform_user_auth(Config, Handler, ?CREDENTIALS)),
% NOTE: disallowed placeholder left intact, which makes the URL invalid
ConfigUrl = Config#{
<<"url">> => <<"http://127.0.0.1:32333/auth/${whatisthis}">>
},
?assertMatch({error, _}, perform_user_auth(ConfigUrl, Handler, ?CREDENTIALS)).
t_destroy(_Config) ->
AuthConfig = raw_http_auth_config(),

View File

@ -494,6 +494,67 @@ t_no_value_for_placeholder(_Config) ->
emqx_access_control:authorize(ClientInfo, ?AUTHZ_PUBLISH, <<"t">>)
).
t_disallowed_placeholders_preserved(_Config) ->
ok = setup_handler_and_config(
fun(Req0, State) ->
{ok, Body, Req1} = cowboy_req:read_body(Req0),
?assertMatch(
#{
<<"cname">> := <<>>,
<<"usertypo">> := <<"${usertypo}">>
},
emqx_utils_json:decode(Body)
),
{ok, ?AUTHZ_HTTP_RESP(allow, Req1), State}
end,
#{
<<"method">> => <<"post">>,
<<"body">> => #{
<<"cname">> => ?PH_CERT_CN_NAME,
<<"usertypo">> => <<"${usertypo}">>
}
}
),
ClientInfo = #{
clientid => <<"client id">>,
username => <<"user name">>,
peerhost => {127, 0, 0, 1},
protocol => <<"MQTT">>,
zone => default,
listener => {tcp, default}
},
?assertEqual(
allow,
emqx_access_control:authorize(ClientInfo, ?AUTHZ_PUBLISH, <<"t">>)
).
t_disallowed_placeholders_path(_Config) ->
ok = setup_handler_and_config(
fun(Req, State) ->
{ok, ?AUTHZ_HTTP_RESP(allow, Req), State}
end,
#{
<<"url">> => <<"http://127.0.0.1:33333/authz/use%20rs/${typo}">>
}
),
ClientInfo = #{
clientid => <<"client id">>,
username => <<"user name">>,
peerhost => {127, 0, 0, 1},
protocol => <<"MQTT">>,
zone => default,
listener => {tcp, default}
},
% % NOTE: disallowed placeholder left intact, which makes the URL invalid
?assertEqual(
deny,
emqx_access_control:authorize(ClientInfo, ?AUTHZ_PUBLISH, <<"t">>)
).
t_create_replace(_Config) ->
ClientInfo = #{
clientid => <<"clientid">>,

View File

@ -50,7 +50,7 @@
%% Internal exports (RPC)
-export([
do_destroy/1,
do_add_user/2,
do_add_user/1,
do_delete_user/2,
do_update_user/3,
import/2,
@ -187,24 +187,22 @@ import_users({Filename0, FileData}, State) ->
{error, {unsupported_file_format, Extension}}
end.
add_user(UserInfo, State) ->
trans(fun ?MODULE:do_add_user/2, [UserInfo, State]).
add_user(
UserInfo,
State
) ->
UserInfoRecord = user_info_record(UserInfo, State),
trans(fun ?MODULE:do_add_user/1, [UserInfoRecord]).
do_add_user(
#{
user_id := UserID,
password := Password
} = UserInfo,
#{
user_group := UserGroup,
password_hash_algorithm := Algorithm
}
#user_info{
user_id = {_UserGroup, UserID} = DBUserID,
is_superuser = IsSuperuser
} = UserInfoRecord
) ->
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
case mnesia:read(?TAB, DBUserID, write) of
[] ->
{PasswordHash, Salt} = emqx_authn_password_hashing:hash(Algorithm, Password),
IsSuperuser = maps:get(is_superuser, UserInfo, false),
insert_user(UserGroup, UserID, PasswordHash, Salt, IsSuperuser),
insert_user(UserInfoRecord),
{ok, #{user_id => UserID, is_superuser => IsSuperuser}};
[_] ->
{error, already_exist}
@ -222,38 +220,30 @@ do_delete_user(UserID, #{user_group := UserGroup}) ->
end.
update_user(UserID, UserInfo, State) ->
trans(fun ?MODULE:do_update_user/3, [UserID, UserInfo, State]).
FieldsToUpdate = fields_to_update(
UserInfo,
[
hash_and_salt,
is_superuser
],
State
),
trans(fun ?MODULE:do_update_user/3, [UserID, FieldsToUpdate, State]).
do_update_user(
UserID,
UserInfo,
FieldsToUpdate,
#{
user_group := UserGroup,
password_hash_algorithm := Algorithm
user_group := UserGroup
}
) ->
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
[] ->
{error, not_found};
[
#user_info{
password_hash = PasswordHash,
salt = Salt,
is_superuser = IsSuperuser
}
] ->
NSuperuser = maps:get(is_superuser, UserInfo, IsSuperuser),
{NPasswordHash, NSalt} =
case UserInfo of
#{password := Password} ->
emqx_authn_password_hashing:hash(
Algorithm, Password
);
#{} ->
{PasswordHash, Salt}
end,
insert_user(UserGroup, UserID, NPasswordHash, NSalt, NSuperuser),
{ok, #{user_id => UserID, is_superuser => NSuperuser}}
[#user_info{} = UserInfoRecord] ->
NUserInfoRecord = update_user_record(UserInfoRecord, FieldsToUpdate),
insert_user(NUserInfoRecord),
{ok, #{user_id => UserID, is_superuser => NUserInfoRecord#user_info.is_superuser}}
end.
lookup_user(UserID, #{user_group := UserGroup}) ->
@ -391,13 +381,59 @@ get_user_info_by_seq(_, _, _) ->
{error, bad_format}.
insert_user(UserGroup, UserID, PasswordHash, Salt, IsSuperuser) ->
UserInfo = #user_info{
UserInfoRecord = user_info_record(UserGroup, UserID, PasswordHash, Salt, IsSuperuser),
insert_user(UserInfoRecord).
insert_user(#user_info{} = UserInfoRecord) ->
mnesia:write(?TAB, UserInfoRecord, write).
user_info_record(UserGroup, UserID, PasswordHash, Salt, IsSuperuser) ->
#user_info{
user_id = {UserGroup, UserID},
password_hash = PasswordHash,
salt = Salt,
is_superuser = IsSuperuser
},
mnesia:write(?TAB, UserInfo, write).
}.
user_info_record(
#{
user_id := UserID,
password := Password
} = UserInfo,
#{
password_hash_algorithm := Algorithm,
user_group := UserGroup
} = _State
) ->
IsSuperuser = maps:get(is_superuser, UserInfo, false),
{PasswordHash, Salt} = emqx_authn_password_hashing:hash(Algorithm, Password),
user_info_record(UserGroup, UserID, PasswordHash, Salt, IsSuperuser).
fields_to_update(
#{password := Password} = UserInfo,
[hash_and_salt | Rest],
#{password_hash_algorithm := Algorithm} = State
) ->
[
{hash_and_salt,
emqx_authn_password_hashing:hash(
Algorithm, Password
)}
| fields_to_update(UserInfo, Rest, State)
];
fields_to_update(#{is_superuser := IsSuperuser} = UserInfo, [is_superuser | Rest], State) ->
[{is_superuser, IsSuperuser} | fields_to_update(UserInfo, Rest, State)];
fields_to_update(UserInfo, [_ | Rest], State) ->
fields_to_update(UserInfo, Rest, State);
fields_to_update(_UserInfo, [], _State) ->
[].
update_user_record(UserInfoRecord, []) ->
UserInfoRecord;
update_user_record(UserInfoRecord, [{hash_and_salt, {PasswordHash, Salt}} | Rest]) ->
update_user_record(UserInfoRecord#user_info{password_hash = PasswordHash, salt = Salt}, Rest);
update_user_record(UserInfoRecord, [{is_superuser, IsSuperuser} | Rest]) ->
update_user_record(UserInfoRecord#user_info{is_superuser = IsSuperuser}, Rest).
%% TODO: Support other type
get_user_identity(#{username := Username}, username) ->

View File

@ -51,7 +51,7 @@
%% Internal exports (RPC)
-export([
do_destroy/1,
do_add_user/2,
do_add_user/1,
do_delete_user/2,
do_update_user/3
]).
@ -157,19 +157,15 @@ do_destroy(UserGroup) ->
).
add_user(UserInfo, State) ->
trans(fun ?MODULE:do_add_user/2, [UserInfo, State]).
UserInfoRecord = user_info_record(UserInfo, State),
trans(fun ?MODULE:do_add_user/1, [UserInfoRecord]).
do_add_user(
#{
user_id := UserID,
password := Password
} = UserInfo,
#{user_group := UserGroup} = State
#user_info{user_id = {UserID, _} = DBUserID, is_superuser = IsSuperuser} = UserInfoRecord
) ->
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
case mnesia:read(?TAB, DBUserID, write) of
[] ->
IsSuperuser = maps:get(is_superuser, UserInfo, false),
add_user(UserGroup, UserID, Password, IsSuperuser, State),
mnesia:write(?TAB, UserInfoRecord, write),
{ok, #{user_id => UserID, is_superuser => IsSuperuser}};
[_] ->
{error, already_exist}
@ -187,36 +183,28 @@ do_delete_user(UserID, #{user_group := UserGroup}) ->
end.
update_user(UserID, User, State) ->
trans(fun ?MODULE:do_update_user/3, [UserID, User, State]).
FieldsToUpdate = fields_to_update(
User,
[
keys_and_salt,
is_superuser
],
State
),
trans(fun ?MODULE:do_update_user/3, [UserID, FieldsToUpdate, State]).
do_update_user(
UserID,
User,
#{user_group := UserGroup} = State
FieldsToUpdate,
#{user_group := UserGroup} = _State
) ->
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
[] ->
{error, not_found};
[#user_info{is_superuser = IsSuperuser} = UserInfo] ->
UserInfo1 = UserInfo#user_info{
is_superuser = maps:get(is_superuser, User, IsSuperuser)
},
UserInfo2 =
case maps:get(password, User, undefined) of
undefined ->
UserInfo1;
Password ->
{StoredKey, ServerKey, Salt} = esasl_scram:generate_authentication_info(
Password, State
),
UserInfo1#user_info{
stored_key = StoredKey,
server_key = ServerKey,
salt = Salt
}
end,
mnesia:write(?TAB, UserInfo2, write),
{ok, format_user_info(UserInfo2)}
[#user_info{} = UserInfo0] ->
UserInfo1 = update_user_record(UserInfo0, FieldsToUpdate),
mnesia:write(?TAB, UserInfo1, write),
{ok, format_user_info(UserInfo1)}
end.
lookup_user(UserID, #{user_group := UserGroup}) ->
@ -315,19 +303,56 @@ check_client_final_message(Bin, #{is_superuser := IsSuperuser} = Cache, #{algori
{error, not_authorized}
end.
add_user(UserGroup, UserID, Password, IsSuperuser, State) ->
{StoredKey, ServerKey, Salt} = esasl_scram:generate_authentication_info(Password, State),
write_user(UserGroup, UserID, StoredKey, ServerKey, Salt, IsSuperuser).
user_info_record(
#{
user_id := UserID,
password := Password
} = UserInfo,
#{user_group := UserGroup} = State
) ->
IsSuperuser = maps:get(is_superuser, UserInfo, false),
user_info_record(UserGroup, UserID, Password, IsSuperuser, State).
write_user(UserGroup, UserID, StoredKey, ServerKey, Salt, IsSuperuser) ->
UserInfo = #user_info{
user_info_record(UserGroup, UserID, Password, IsSuperuser, State) ->
{StoredKey, ServerKey, Salt} = esasl_scram:generate_authentication_info(Password, State),
#user_info{
user_id = {UserGroup, UserID},
stored_key = StoredKey,
server_key = ServerKey,
salt = Salt,
is_superuser = IsSuperuser
},
mnesia:write(?TAB, UserInfo, write).
}.
fields_to_update(
#{password := Password} = UserInfo,
[keys_and_salt | Rest],
State
) ->
{StoredKey, ServerKey, Salt} = esasl_scram:generate_authentication_info(Password, State),
[
{keys_and_salt, {StoredKey, ServerKey, Salt}}
| fields_to_update(UserInfo, Rest, State)
];
fields_to_update(#{is_superuser := IsSuperuser} = UserInfo, [is_superuser | Rest], State) ->
[{is_superuser, IsSuperuser} | fields_to_update(UserInfo, Rest, State)];
fields_to_update(UserInfo, [_ | Rest], State) ->
fields_to_update(UserInfo, Rest, State);
fields_to_update(_UserInfo, [], _State) ->
[].
update_user_record(UserInfoRecord, []) ->
UserInfoRecord;
update_user_record(UserInfoRecord, [{keys_and_salt, {StoredKey, ServerKey, Salt}} | Rest]) ->
update_user_record(
UserInfoRecord#user_info{
stored_key = StoredKey,
server_key = ServerKey,
salt = Salt
},
Rest
);
update_user_record(UserInfoRecord, [{is_superuser, IsSuperuser} | Rest]) ->
update_user_record(UserInfoRecord#user_info{is_superuser = IsSuperuser}, Rest).
retrieve(UserID, #{user_group := UserGroup}) ->
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of

View File

@ -18,6 +18,7 @@
-behaviour(minirest_api).
-include("emqx_auth_mnesia.hrl").
-include_lib("emqx_auth/include/emqx_authz.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -55,6 +56,9 @@
format_result/1
]).
%% minirest filter callback
-export([is_configured_authz_source/2]).
-define(BAD_REQUEST, 'BAD_REQUEST').
-define(NOT_FOUND, 'NOT_FOUND').
-define(ALREADY_EXISTS, 'ALREADY_EXISTS').
@ -85,6 +89,7 @@ paths() ->
schema("/authorization/sources/built_in_database/rules/users") ->
#{
'operationId' => users,
filter => fun ?MODULE:is_configured_authz_source/2,
get =>
#{
tags => [<<"authorization">>],
@ -131,6 +136,7 @@ schema("/authorization/sources/built_in_database/rules/users") ->
schema("/authorization/sources/built_in_database/rules/clients") ->
#{
'operationId' => clients,
filter => fun ?MODULE:is_configured_authz_source/2,
get =>
#{
tags => [<<"authorization">>],
@ -177,6 +183,7 @@ schema("/authorization/sources/built_in_database/rules/clients") ->
schema("/authorization/sources/built_in_database/rules/users/:username") ->
#{
'operationId' => user,
filter => fun ?MODULE:is_configured_authz_source/2,
get =>
#{
tags => [<<"authorization">>],
@ -230,6 +237,7 @@ schema("/authorization/sources/built_in_database/rules/users/:username") ->
schema("/authorization/sources/built_in_database/rules/clients/:clientid") ->
#{
'operationId' => client,
filter => fun ?MODULE:is_configured_authz_source/2,
get =>
#{
tags => [<<"authorization">>],
@ -283,6 +291,7 @@ schema("/authorization/sources/built_in_database/rules/clients/:clientid") ->
schema("/authorization/sources/built_in_database/rules/all") ->
#{
'operationId' => all,
filter => fun ?MODULE:is_configured_authz_source/2,
get =>
#{
tags => [<<"authorization">>],
@ -317,6 +326,7 @@ schema("/authorization/sources/built_in_database/rules/all") ->
schema("/authorization/sources/built_in_database/rules") ->
#{
'operationId' => rules,
filter => fun ?MODULE:is_configured_authz_source/2,
delete =>
#{
tags => [<<"authorization">>],
@ -426,6 +436,14 @@ fields(rules) ->
%% HTTP API
%%--------------------------------------------------------------------
is_configured_authz_source(Params, _Meta) ->
emqx_authz_api_sources:with_source(
?AUTHZ_TYPE_BIN,
fun(_Source) ->
{ok, Params}
end
).
users(get, #{query_string := QueryString}) ->
case
emqx_mgmt_api:node_query(
@ -440,7 +458,9 @@ users(get, #{query_string := QueryString}) ->
{error, page_limit_invalid} ->
{400, #{code => <<"INVALID_PARAMETER">>, message => <<"page_limit_invalid">>}};
{error, Node, Error} ->
Message = list_to_binary(io_lib:format("bad rpc call ~p, Reason ~p", [Node, Error])),
Message = list_to_binary(
io_lib:format("bad rpc call ~p, Reason ~p", [Node, Error])
),
{500, #{code => <<"NODE_DOWN">>, message => Message}};
Result ->
{200, Result}
@ -476,7 +496,9 @@ clients(get, #{query_string := QueryString}) ->
{error, page_limit_invalid} ->
{400, #{code => <<"INVALID_PARAMETER">>, message => <<"page_limit_invalid">>}};
{error, Node, Error} ->
Message = list_to_binary(io_lib:format("bad rpc call ~p, Reason ~p", [Node, Error])),
Message = list_to_binary(
io_lib:format("bad rpc call ~p, Reason ~p", [Node, Error])
),
{500, #{code => <<"NODE_DOWN">>, message => Message}};
Result ->
{200, Result}

View File

@ -314,6 +314,74 @@ t_update_user(_) ->
{ok, #{is_superuser := true}} = emqx_authn_scram_mnesia:lookup_user(<<"u">>, State).
t_update_user_keys(_Config) ->
Algorithm = sha512,
Username = <<"u">>,
Password = <<"p">>,
init_auth(Username, <<"badpass">>, Algorithm),
{ok, [#{state := State}]} = emqx_authn_chains:list_authenticators(?GLOBAL),
emqx_authn_scram_mnesia:update_user(
Username,
#{password => Password},
State
),
ok = emqx_config:put([mqtt, idle_timeout], 500),
{ok, Pid} = emqx_authn_mqtt_test_client:start_link("127.0.0.1", 1883),
ClientFirstMessage = esasl_scram:client_first_message(Username),
ConnectPacket = ?CONNECT_PACKET(
#mqtt_packet_connect{
proto_ver = ?MQTT_PROTO_V5,
properties = #{
'Authentication-Method' => <<"SCRAM-SHA-512">>,
'Authentication-Data' => ClientFirstMessage
}
}
),
ok = emqx_authn_mqtt_test_client:send(Pid, ConnectPacket),
?AUTH_PACKET(
?RC_CONTINUE_AUTHENTICATION,
#{'Authentication-Data' := ServerFirstMessage}
) = receive_packet(),
{continue, ClientFinalMessage, ClientCache} =
esasl_scram:check_server_first_message(
ServerFirstMessage,
#{
client_first_message => ClientFirstMessage,
password => Password,
algorithm => Algorithm
}
),
AuthContinuePacket = ?AUTH_PACKET(
?RC_CONTINUE_AUTHENTICATION,
#{
'Authentication-Method' => <<"SCRAM-SHA-512">>,
'Authentication-Data' => ClientFinalMessage
}
),
ok = emqx_authn_mqtt_test_client:send(Pid, AuthContinuePacket),
?CONNACK_PACKET(
?RC_SUCCESS,
_,
#{'Authentication-Data' := ServerFinalMessage}
) = receive_packet(),
ok = esasl_scram:check_server_final_message(
ServerFinalMessage, ClientCache#{algorithm => Algorithm}
).
t_list_users(_) ->
Config = config(),
{ok, State} = emqx_authn_scram_mnesia:create(<<"id">>, Config),

View File

@ -331,4 +331,163 @@ t_api(_) ->
[]
),
?assertEqual(0, emqx_authz_mnesia:record_count()),
Examples = make_examples(emqx_authz_api_mnesia),
?assertEqual(
14,
length(Examples)
),
Fixtures1 = fun() ->
{ok, _, _} =
request(
delete,
uri(["authorization", "sources", "built_in_database", "rules", "all"]),
[]
),
{ok, _, _} =
request(
delete,
uri(["authorization", "sources", "built_in_database", "rules", "users"]),
[]
),
{ok, _, _} =
request(
delete,
uri(["authorization", "sources", "built_in_database", "rules", "clients"]),
[]
)
end,
run_examples(Examples, Fixtures1),
Fixtures2 = fun() ->
%% disable/remove built_in_database
{ok, 204, _} =
request(
delete,
uri(["authorization", "sources", "built_in_database"]),
[]
)
end,
run_examples(404, Examples, Fixtures2),
ok.
%% test helpers
-define(REPLACEMENTS, #{
":clientid" => <<"client1">>,
":username" => <<"user1">>
}).
run_examples(Examples) ->
%% assume all ok
run_examples(
fun
({ok, Code, _}) when
Code >= 200,
Code =< 299
->
true;
(_Res) ->
ct:pal("check failed: ~p", [_Res]),
false
end,
Examples
).
run_examples(Examples, Fixtures) when is_function(Fixtures) ->
Fixtures(),
run_examples(Examples);
run_examples(Check, Examples) when is_function(Check) ->
lists:foreach(
fun({Path, Op, Body} = _Req) ->
ct:pal("req: ~p", [_Req]),
?assert(
Check(
request(Op, uri(Path), Body)
)
)
end,
Examples
);
run_examples(Code, Examples) when is_number(Code) ->
run_examples(
fun
({ok, ResCode, _}) when Code =:= ResCode -> true;
(_Res) ->
ct:pal("check failed: ~p", [_Res]),
false
end,
Examples
).
run_examples(CodeOrCheck, Examples, Fixtures) when is_function(Fixtures) ->
Fixtures(),
run_examples(CodeOrCheck, Examples).
make_examples(ApiMod) ->
make_examples(ApiMod, ?REPLACEMENTS).
-spec make_examples(Mod :: atom()) -> [{Path :: list(), [{Op :: atom(), Body :: term()}]}].
make_examples(ApiMod, Replacements) ->
Paths = ApiMod:paths(),
lists:flatten(
lists:map(
fun(Path) ->
Schema = ApiMod:schema(Path),
lists:map(
fun({Op, OpSchema}) ->
Body =
case maps:get('requestBody', OpSchema, undefined) of
undefined ->
[];
HoconWithExamples ->
maps:get(
value,
hd(
maps:values(
maps:get(
<<"examples">>,
maps:get(examples, HoconWithExamples)
)
)
)
)
end,
{replace_parts(to_parts(Path), Replacements), Op, Body}
end,
lists:sort(
fun op_sort/2, maps:to_list(maps:with([get, put, post, delete], Schema))
)
)
end,
Paths
)
).
op_sort({post, _}, {_, _}) ->
true;
op_sort({put, _}, {_, _}) ->
true;
op_sort({get, _}, {delete, _}) ->
true;
op_sort(_, _) ->
false.
to_parts(Path) ->
string:tokens(Path, "/").
replace_parts(Parts, Replacements) ->
lists:map(
fun(Part) ->
%% that's the fun part
case maps:is_key(Part, Replacements) of
true ->
maps:get(Part, Replacements);
false ->
Part
end
end,
Parts
).

View File

@ -35,12 +35,12 @@
-compile(nowarn_export_all).
-endif.
-define(PLACEHOLDERS, [
?PH_USERNAME,
?PH_CLIENTID,
?PH_PEERHOST,
?PH_CERT_CN_NAME,
?PH_CERT_SUBJECT
-define(ALLOWED_VARS, [
?VAR_USERNAME,
?VAR_CLIENTID,
?VAR_PEERHOST,
?VAR_CERT_CN_NAME,
?VAR_CERT_SUBJECT
]).
description() ->
@ -49,11 +49,11 @@ description() ->
create(#{filter := Filter} = Source) ->
ResourceId = emqx_authz_utils:make_resource_id(?MODULE),
{ok, _Data} = emqx_authz_utils:create_resource(ResourceId, emqx_mongodb, Source),
FilterTemp = emqx_authz_utils:parse_deep(Filter, ?PLACEHOLDERS),
FilterTemp = emqx_authz_utils:parse_deep(Filter, ?ALLOWED_VARS),
Source#{annotations => #{id => ResourceId}, filter_template => FilterTemp}.
update(#{filter := Filter} = Source) ->
FilterTemp = emqx_authz_utils:parse_deep(Filter, ?PLACEHOLDERS),
FilterTemp = emqx_authz_utils:parse_deep(Filter, ?ALLOWED_VARS),
case emqx_authz_utils:update_resource(emqx_mongodb, Source) of
{error, Reason} ->
error({load_config_error, Reason});

View File

@ -37,26 +37,26 @@
-compile(nowarn_export_all).
-endif.
-define(PLACEHOLDERS, [
?PH_USERNAME,
?PH_CLIENTID,
?PH_PEERHOST,
?PH_CERT_CN_NAME,
?PH_CERT_SUBJECT
-define(ALLOWED_VARS, [
?VAR_USERNAME,
?VAR_CLIENTID,
?VAR_PEERHOST,
?VAR_CERT_CN_NAME,
?VAR_CERT_SUBJECT
]).
description() ->
"AuthZ with Mysql".
create(#{query := SQL} = Source0) ->
{PrepareSQL, TmplToken} = emqx_authz_utils:parse_sql(SQL, '?', ?PLACEHOLDERS),
{PrepareSQL, TmplToken} = emqx_authz_utils:parse_sql(SQL, '?', ?ALLOWED_VARS),
ResourceId = emqx_authz_utils:make_resource_id(?MODULE),
Source = Source0#{prepare_statement => #{?PREPARE_KEY => PrepareSQL}},
{ok, _Data} = emqx_authz_utils:create_resource(ResourceId, emqx_mysql, Source),
Source#{annotations => #{id => ResourceId, tmpl_token => TmplToken}}.
update(#{query := SQL} = Source0) ->
{PrepareSQL, TmplToken} = emqx_authz_utils:parse_sql(SQL, '?', ?PLACEHOLDERS),
{PrepareSQL, TmplToken} = emqx_authz_utils:parse_sql(SQL, '?', ?ALLOWED_VARS),
Source = Source0#{prepare_statement => #{?PREPARE_KEY => PrepareSQL}},
case emqx_authz_utils:update_resource(emqx_mysql, Source) of
{error, Reason} ->

View File

@ -37,19 +37,19 @@
-compile(nowarn_export_all).
-endif.
-define(PLACEHOLDERS, [
?PH_USERNAME,
?PH_CLIENTID,
?PH_PEERHOST,
?PH_CERT_CN_NAME,
?PH_CERT_SUBJECT
-define(ALLOWED_VARS, [
?VAR_USERNAME,
?VAR_CLIENTID,
?VAR_PEERHOST,
?VAR_CERT_CN_NAME,
?VAR_CERT_SUBJECT
]).
description() ->
"AuthZ with PostgreSQL".
create(#{query := SQL0} = Source) ->
{SQL, PlaceHolders} = emqx_authz_utils:parse_sql(SQL0, '$n', ?PLACEHOLDERS),
{SQL, PlaceHolders} = emqx_authz_utils:parse_sql(SQL0, '$n', ?ALLOWED_VARS),
ResourceID = emqx_authz_utils:make_resource_id(emqx_postgresql),
{ok, _Data} = emqx_authz_utils:create_resource(
ResourceID,
@ -59,7 +59,7 @@ create(#{query := SQL0} = Source) ->
Source#{annotations => #{id => ResourceID, placeholders => PlaceHolders}}.
update(#{query := SQL0, annotations := #{id := ResourceID}} = Source) ->
{SQL, PlaceHolders} = emqx_authz_utils:parse_sql(SQL0, '$n', ?PLACEHOLDERS),
{SQL, PlaceHolders} = emqx_authz_utils:parse_sql(SQL0, '$n', ?ALLOWED_VARS),
case
emqx_authz_utils:update_resource(
emqx_postgresql,

View File

@ -0,0 +1,71 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_auth_redis_validations).
-export([
validate_command/2
]).
validate_command([], _Command) ->
ok;
validate_command([Validation | Rest], Command) ->
case validate(Validation, Command) of
ok ->
validate_command(Rest, Command);
{error, _} = Error ->
Error
end.
validate(not_empty, []) ->
{error, empty_command};
validate(not_empty, _) ->
ok;
validate({command_name, AllowedNames}, [Name | _]) ->
IsAllowed = lists:any(
fun(AllowedName) ->
string:equal(AllowedName, Name, true, none)
end,
AllowedNames
),
case IsAllowed of
true ->
ok;
false ->
{error, {invalid_command_name, Name}}
end;
validate({command_name, _}, _) ->
{error, invalid_command_name};
validate({allowed_fields, AllowedFields}, [_CmdName, _CmdKey | Args]) ->
Unknown = lists:filter(fun(Arg) -> not lists:member(Arg, AllowedFields) end, Args),
case Unknown of
[] ->
ok;
_ ->
{error, {unknown_fields, Unknown}}
end;
validate({allowed_fields, _}, _) ->
ok;
validate({required_field_one_of, Required}, [_CmdName, _CmdKey | Args]) ->
HasRequired = lists:any(fun(Field) -> lists:member(Field, Args) end, Required),
case HasRequired of
true ->
ok;
false ->
{error, {missing_required_field, Required}}
end;
validate({required_field_one_of, Required}, _) ->
{error, {missing_required_field, Required}}.

View File

@ -118,54 +118,51 @@ authenticate(
parse_config(
#{
cmd := Cmd,
cmd := CmdStr,
password_hash_algorithm := Algorithm
} = Config
) ->
try
NCmd = parse_cmd(Cmd),
ok = emqx_authn_password_hashing:init(Algorithm),
ok = emqx_authn_utils:ensure_apps_started(Algorithm),
State = maps:with([password_hash_algorithm, salt_position], Config),
{Config, State#{cmd => NCmd}}
catch
error:{unsupported_cmd, _Cmd} ->
{error, {unsupported_cmd, Cmd}};
error:missing_password_hash ->
{error, missing_password_hash};
error:{unsupported_fields, Fields} ->
{error, {unsupported_fields, Fields}}
case parse_cmd(CmdStr) of
{ok, Cmd} ->
ok = emqx_authn_password_hashing:init(Algorithm),
ok = emqx_authn_utils:ensure_apps_started(Algorithm),
State = maps:with([password_hash_algorithm, salt_position], Config),
{Config, State#{cmd => Cmd}};
{error, _} = Error ->
Error
end.
%% Only support HGET and HMGET
parse_cmd(Cmd) ->
case string:tokens(Cmd, " ") of
[Command, Key, Field | Fields] when Command =:= "HGET" orelse Command =:= "HMGET" ->
NFields = [Field | Fields],
check_fields(NFields),
KeyTemplate = emqx_authn_utils:parse_str(list_to_binary(Key)),
{Command, KeyTemplate, NFields};
_ ->
error({unsupported_cmd, Cmd})
parse_cmd(CmdStr) ->
case emqx_redis_command:split(CmdStr) of
{ok, Cmd} ->
case validate_cmd(Cmd) of
ok ->
[CommandName, Key | Fields] = Cmd,
{ok, {CommandName, emqx_authn_utils:parse_str(Key), Fields}};
{error, _} = Error ->
Error
end;
{error, _} = Error ->
Error
end.
check_fields(Fields) ->
HasPassHash = lists:member("password_hash", Fields) orelse lists:member("password", Fields),
KnownFields = ["password_hash", "password", "salt", "is_superuser"],
UnknownFields = [F || F <- Fields, not lists:member(F, KnownFields)],
case {HasPassHash, UnknownFields} of
{true, []} -> ok;
{true, _} -> error({unsupported_fields, UnknownFields});
{false, _} -> error(missing_password_hash)
end.
validate_cmd(Cmd) ->
emqx_auth_redis_validations:validate_command(
[
not_empty,
{command_name, [<<"hget">>, <<"hmget">>]},
{allowed_fields, [<<"password_hash">>, <<"password">>, <<"salt">>, <<"is_superuser">>]},
{required_field_one_of, [<<"password_hash">>, <<"password">>]}
],
Cmd
).
merge(Fields, Value) when not is_list(Value) ->
merge(Fields, [Value]);
merge(Fields, Values) ->
maps:from_list(
[
{list_to_binary(K), V}
{K, V}
|| {K, V} <- lists:zip(Fields, Values), V =/= undefined
]
).

View File

@ -85,7 +85,7 @@ common_fields() ->
{password_hash_algorithm, fun emqx_authn_password_hashing:type_ro/1}
] ++ emqx_authn_schema:common_fields().
cmd(type) -> string();
cmd(type) -> binary();
cmd(desc) -> ?DESC(?FUNCTION_NAME);
cmd(required) -> true;
cmd(_) -> undefined.

View File

@ -35,27 +35,25 @@
-compile(nowarn_export_all).
-endif.
-define(PLACEHOLDERS, [
?PH_CERT_CN_NAME,
?PH_CERT_SUBJECT,
?PH_PEERHOST,
?PH_CLIENTID,
?PH_USERNAME
-define(ALLOWED_VARS, [
?VAR_CERT_CN_NAME,
?VAR_CERT_SUBJECT,
?VAR_PEERHOST,
?VAR_CLIENTID,
?VAR_USERNAME
]).
description() ->
"AuthZ with Redis".
create(#{cmd := CmdStr} = Source) ->
Cmd = tokens(CmdStr),
CmdTemplate = parse_cmd(CmdStr),
ResourceId = emqx_authz_utils:make_resource_id(?MODULE),
CmdTemplate = emqx_authz_utils:parse_deep(Cmd, ?PLACEHOLDERS),
{ok, _Data} = emqx_authz_utils:create_resource(ResourceId, emqx_redis, Source),
Source#{annotations => #{id => ResourceId}, cmd_template => CmdTemplate}.
update(#{cmd := CmdStr} = Source) ->
Cmd = tokens(CmdStr),
CmdTemplate = emqx_authz_utils:parse_deep(Cmd, ?PLACEHOLDERS),
CmdTemplate = parse_cmd(CmdStr),
case emqx_authz_utils:update_resource(emqx_redis, Source) of
{error, Reason} ->
error({load_config_error, Reason});
@ -131,9 +129,28 @@ compile_rule(RuleBin, TopicFilterRaw) ->
error(Reason)
end.
tokens(Query) ->
Tokens = binary:split(Query, <<" ">>, [global]),
[Token || Token <- Tokens, size(Token) > 0].
parse_cmd(Query) ->
case emqx_redis_command:split(Query) of
{ok, Cmd} ->
ok = validate_cmd(Cmd),
emqx_authz_utils:parse_deep(Cmd, ?ALLOWED_VARS);
{error, Reason} ->
error({invalid_redis_cmd, Reason, Query})
end.
validate_cmd(Cmd) ->
case
emqx_auth_redis_validations:validate_command(
[
not_empty,
{command_name, [<<"hmget">>, <<"hgetall">>]}
],
Cmd
)
of
ok -> ok;
{error, Reason} -> error({invalid_redis_cmd, Reason, Cmd})
end.
parse_rule(<<"publish">>) ->
#{<<"action">> => <<"publish">>};

View File

@ -336,7 +336,22 @@ user_seeds() ->
config_params => #{},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
password_hash => <<"plainsalt">>,
salt => <<"salt">>,
is_superuser => <<"1">>
},
credentials => #{
username => <<"plain">>,
password => <<"plain">>
},
key => <<"mqtt_user:plain">>,
config_params => #{
<<"cmd">> => <<"HmGeT mqtt_user:${username} password_hash salt is_superuser">>
},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
password_hash => <<"9b4d0c43d206d48279e69b9ad7132e22">>,

View File

@ -112,7 +112,9 @@ t_create_invalid_config(_Config) ->
).
t_redis_error(_Config) ->
ok = setup_config(#{<<"cmd">> => <<"INVALID COMMAND">>}),
q([<<"SET">>, <<"notahash">>, <<"stringvalue">>]),
ok = setup_config(#{<<"cmd">> => <<"HGETALL notahash">>}),
ClientInfo = emqx_authz_test_lib:base_client_info(),
@ -121,6 +123,24 @@ t_redis_error(_Config) ->
emqx_access_control:authorize(ClientInfo, ?AUTHZ_SUBSCRIBE, <<"a">>)
).
t_invalid_command(_Config) ->
Config = raw_redis_authz_config(),
?assertMatch(
{error, _},
emqx_authz:update(?CMD_REPLACE, [Config#{<<"cmd">> => <<"HGET key">>}])
),
?assertMatch(
{ok, _},
emqx_authz:update(?CMD_REPLACE, [Config#{<<"cmd">> => <<"HGETALL key">>}])
),
?assertMatch(
{error, _},
emqx_authz:update({?CMD_REPLACE, redis}, Config#{<<"cmd">> => <<"HGET key">>})
).
%%------------------------------------------------------------------------------
%% Cases
%%------------------------------------------------------------------------------

View File

@ -62,19 +62,20 @@
%% Data backup
-export([
import_config/1
import_config/1,
%% exported for emqx_bridge_v2
import_config/4
]).
-export([query_opts/1]).
-define(EGRESS_DIR_BRIDGES(T),
T == webhook;
T == mysql;
T == gcp_pubsub;
T == influxdb_api_v1;
T == influxdb_api_v2;
%% TODO: rename this to `kafka_producer' after alias support is
%% added to hocon; keeping this as just `kafka' for backwards
%% compatibility.
T == kafka;
T == kafka_producer;
T == redis_single;
T == redis_sentinel;
T == redis_cluster;
@ -190,39 +191,50 @@ unload_hook() ->
on_message_publish(Message = #message{topic = Topic, flags = Flags}) ->
case maps:get(sys, Flags, false) of
false ->
{Msg, _} = emqx_rule_events:eventmsg_publish(Message),
send_to_matched_egress_bridges(Topic, Msg);
send_to_matched_egress_bridges(Topic, Message);
true ->
ok
end,
{ok, Message}.
send_to_matched_egress_bridges(Topic, Msg) ->
MatchedBridgeIds = get_matched_egress_bridges(Topic),
lists:foreach(
fun(Id) ->
try send_message(Id, Msg) of
{error, Reason} ->
?SLOG(error, #{
msg => "send_message_to_bridge_failed",
bridge => Id,
error => Reason
});
_ ->
ok
catch
Err:Reason:ST ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
error => Err,
reason => Reason,
stacktrace => ST
})
end
end,
MatchedBridgeIds
).
send_to_matched_egress_bridges(Topic, Message) ->
case get_matched_egress_bridges(Topic) of
[] ->
ok;
Ids ->
{Msg, _} = emqx_rule_events:eventmsg_publish(Message),
send_to_matched_egress_bridges_loop(Topic, Msg, Ids)
end.
send_to_matched_egress_bridges_loop(_Topic, _Msg, []) ->
ok;
send_to_matched_egress_bridges_loop(Topic, Msg, [Id | Ids]) ->
try send_message(Id, Msg) of
{error, Reason} ->
?SLOG(error, #{
msg => "send_message_to_bridge_failed",
bridge => Id,
error => Reason
});
_ ->
ok
catch
throw:Reason ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
reason => emqx_utils:redact(Reason)
});
Err:Reason:ST ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
error => Err,
reason => emqx_utils:redact(Reason),
stacktrace => emqx_utils:redact(ST)
})
end,
send_to_matched_egress_bridges_loop(Topic, Msg, Ids).
send_message(BridgeId, Message) ->
{BridgeType, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId),
@ -277,30 +289,40 @@ post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
Result.
list() ->
maps:fold(
fun(Type, NameAndConf, Bridges) ->
maps:fold(
fun(Name, RawConf, Acc) ->
case lookup(Type, Name, RawConf) of
{error, not_found} -> Acc;
{ok, Res} -> [Res | Acc]
end
end,
Bridges,
NameAndConf
)
end,
[],
emqx:get_raw_config([bridges], #{})
).
BridgeV1Bridges =
maps:fold(
fun(Type, NameAndConf, Bridges) ->
maps:fold(
fun(Name, RawConf, Acc) ->
case lookup(Type, Name, RawConf) of
{error, not_found} -> Acc;
{ok, Res} -> [Res | Acc]
end
end,
Bridges,
NameAndConf
)
end,
[],
emqx:get_raw_config([bridges], #{})
),
BridgeV2Bridges =
emqx_bridge_v2:list_and_transform_to_bridge_v1(),
BridgeV1Bridges ++ BridgeV2Bridges.
%%BridgeV2Bridges = emqx_bridge_v2:list().
lookup(Id) ->
{Type, Name} = emqx_bridge_resource:parse_bridge_id(Id),
lookup(Type, Name).
lookup(Type, Name) ->
RawConf = emqx:get_raw_config([bridges, Type, Name], #{}),
lookup(Type, Name, RawConf).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
emqx_bridge_v2:lookup_and_transform_to_bridge_v1(Type, Name);
false ->
RawConf = emqx:get_raw_config([bridges, Type, Name], #{}),
lookup(Type, Name, RawConf)
end.
lookup(Type, Name, RawConf) ->
case emqx_resource:get_instance(emqx_bridge_resource:resource_id(Type, Name)) of
@ -316,7 +338,18 @@ lookup(Type, Name, RawConf) ->
end.
get_metrics(Type, Name) ->
emqx_resource:get_metrics(emqx_bridge_resource:resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
case emqx_bridge_v2:is_valid_bridge_v1(Type, Name) of
true ->
BridgeV2Type = emqx_bridge_v2:bridge_v2_type_to_connector_type(Type),
emqx_bridge_v2:get_metrics(BridgeV2Type, Name);
false ->
{error, not_bridge_v1_compatible}
end;
false ->
emqx_resource:get_metrics(emqx_bridge_resource:resource_id(Type, Name))
end.
maybe_upgrade(mqtt, Config) ->
emqx_bridge_compatible_config:maybe_upgrade(Config);
@ -325,55 +358,90 @@ maybe_upgrade(webhook, Config) ->
maybe_upgrade(_Other, Config) ->
Config.
disable_enable(Action, BridgeType, BridgeName) when
disable_enable(Action, BridgeType0, BridgeName) when
Action =:= disable; Action =:= enable
->
emqx_conf:update(
config_key_path() ++ [BridgeType, BridgeName],
{Action, BridgeType, BridgeName},
#{override_to => cluster}
).
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:bridge_v1_enable_disable(Action, BridgeType, BridgeName);
false ->
emqx_conf:update(
config_key_path() ++ [BridgeType, BridgeName],
{Action, BridgeType, BridgeName},
#{override_to => cluster}
)
end.
create(BridgeType, BridgeName, RawConf) ->
create(BridgeType0, BridgeName, RawConf) ->
BridgeType = upgrade_type(BridgeType0),
?SLOG(debug, #{
bridge_action => create,
bridge_type => BridgeType,
bridge_name => BridgeName,
bridge_raw_config => emqx_utils:redact(RawConf)
}),
emqx_conf:update(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
RawConf,
#{override_to => cluster}
).
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:split_bridge_v1_config_and_create(BridgeType, BridgeName, RawConf);
false ->
emqx_conf:update(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
RawConf,
#{override_to => cluster}
)
end.
remove(BridgeType, BridgeName) ->
%% NOTE: This function can cause broken references but it is only called from
%% test cases.
-spec remove(atom() | binary(), binary()) -> ok | {error, any()}.
remove(BridgeType0, BridgeName) ->
BridgeType = upgrade_type(BridgeType0),
?SLOG(debug, #{
bridge_action => remove,
bridge_type => BridgeType,
bridge_name => BridgeName
}),
emqx_conf:remove(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
#{override_to => cluster}
).
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:remove(BridgeType, BridgeName);
false ->
remove_v1(BridgeType, BridgeName)
end.
check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
%% NOTE: This violates the design: Rule depends on data-bridge but not vice versa.
case emqx_rule_engine:get_rule_ids_by_action(BridgeId) of
[] ->
remove_v1(BridgeType0, BridgeName) ->
BridgeType = upgrade_type(BridgeType0),
case
emqx_conf:remove(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
#{override_to => cluster}
)
of
{ok, _} ->
ok;
{error, Reason} ->
{error, Reason}
end.
check_deps_and_remove(BridgeType0, BridgeName, RemoveDeps) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:bridge_v1_check_deps_and_remove(
BridgeType,
BridgeName,
RemoveDeps
);
false ->
do_check_deps_and_remove(BridgeType, BridgeName, RemoveDeps)
end.
do_check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) ->
case emqx_bridge_lib:maybe_withdraw_rule_action(BridgeType, BridgeName, RemoveDeps) of
ok ->
remove(BridgeType, BridgeName);
RuleIds when RemoveDeps =:= false ->
{error, {rules_deps_on_this_bridge, RuleIds}};
RuleIds when RemoveDeps =:= true ->
lists:foreach(
fun(R) ->
emqx_rule_engine:ensure_action_removed(R, BridgeId)
end,
RuleIds
),
remove(BridgeType, BridgeName)
{error, Reason} ->
{error, Reason}
end.
%%----------------------------------------------------------------------------------------
@ -381,15 +449,18 @@ check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) ->
%%----------------------------------------------------------------------------------------
import_config(RawConf) ->
RootKeyPath = config_key_path(),
BridgesConf = maps:get(<<"bridges">>, RawConf, #{}),
import_config(RawConf, <<"bridges">>, ?ROOT_KEY, config_key_path()).
%% Used in emqx_bridge_v2
import_config(RawConf, RawConfKey, RootKey, RootKeyPath) ->
BridgesConf = maps:get(RawConfKey, RawConf, #{}),
OldBridgesConf = emqx:get_raw_config(RootKeyPath, #{}),
MergedConf = merge_confs(OldBridgesConf, BridgesConf),
case emqx_conf:update(RootKeyPath, MergedConf, #{override_to => cluster}) of
{ok, #{raw_config := NewRawConf}} ->
{ok, #{root_key => ?ROOT_KEY, changed => changed_paths(OldBridgesConf, NewRawConf)}};
{ok, #{root_key => RootKey, changed => changed_paths(OldBridgesConf, NewRawConf)}};
Error ->
{error, #{root_key => ?ROOT_KEY, reason => Error}}
{error, #{root_key => RootKey, reason => Error}}
end.
merge_confs(OldConf, NewConf) ->
@ -505,6 +576,7 @@ flatten_confs(Conf0) ->
do_flatten_confs(Type, Conf0) ->
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
%% TODO: create a topic index for this
get_matched_egress_bridges(Topic) ->
Bridges = emqx:get_config([bridges], #{}),
maps:fold(
@ -600,3 +672,6 @@ validate_bridge_name(BridgeName0) ->
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
to_bin(B) when is_binary(B) -> B.
upgrade_type(Type) ->
emqx_bridge_lib:upgrade_type(Type).

View File

@ -387,6 +387,7 @@ schema("/bridges/:id/enable/:enable") ->
responses =>
#{
204 => <<"Success">>,
400 => error_schema('BAD_REQUEST', non_compat_bridge_msg()),
404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
@ -456,10 +457,13 @@ schema("/bridges_probe") ->
}
}.
'/bridges'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) ->
'/bridges'(post, #{body := #{<<"type">> := BridgeType0, <<"name">> := BridgeName} = Conf0}) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
?BAD_REQUEST('ALREADY_EXISTS', <<"bridge already exists">>);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST('ALREADY_EXISTS', non_compat_bridge_msg());
{error, not_found} ->
Conf = filter_out_request_body(Conf0),
create_bridge(BridgeType, BridgeName, Conf)
@ -485,12 +489,14 @@ schema("/bridges_probe") ->
?TRY_PARSE_ID(
Id,
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
{ok, #{raw_config := RawConf}} ->
%% TODO will the maybe_upgrade step done by emqx_bridge:lookup cause any problems
Conf = deobfuscate(Conf1, RawConf),
update_bridge(BridgeType, BridgeName, Conf);
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST('ALREADY_EXISTS', non_compat_bridge_msg())
end
);
'/bridges/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) ->
@ -498,27 +504,33 @@ schema("/bridges_probe") ->
Id,
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
AlsoDeleteActs =
AlsoDelete =
case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of
<<"true">> -> true;
true -> true;
_ -> false
<<"true">> -> [rule_actions, connector];
true -> [rule_actions, connector];
_ -> [connector]
end,
case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActs) of
{ok, _} ->
case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDelete) of
ok ->
?NO_CONTENT;
{error, {rules_deps_on_this_bridge, RuleIds}} ->
?BAD_REQUEST(
{<<"Cannot delete bridge while active rules are defined for this bridge">>,
RuleIds}
);
{error, #{
reason := rules_depending_on_this_bridge,
rule_ids := RuleIds
}} ->
RulesStr = [[" ", I] || I <- RuleIds],
Msg = bin([
"Cannot delete bridge while active rules are depending on it:", RulesStr
]),
?BAD_REQUEST(Msg);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end;
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST(non_compat_bridge_msg())
end
).
@ -528,20 +540,26 @@ schema("/bridges_probe") ->
'/bridges/:id/metrics/reset'(put, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(
Id,
begin
ok = emqx_bridge_resource:reset_metrics(
emqx_bridge_resource:resource_id(BridgeType, BridgeName)
),
?NO_CONTENT
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
BridgeV2Type = emqx_bridge_v2:bridge_v2_type_to_connector_type(BridgeType),
ok = emqx_bridge_v2:reset_metrics(BridgeV2Type, BridgeName),
?NO_CONTENT;
false ->
ok = emqx_bridge_resource:reset_metrics(
emqx_bridge_resource:resource_id(BridgeType, BridgeName)
),
?NO_CONTENT
end
).
'/bridges_probe'(post, Request) ->
RequestMeta = #{module => ?MODULE, method => post, path => "/bridges_probe"},
case emqx_dashboard_swagger:filter_check_request_and_translate_body(Request, RequestMeta) of
{ok, #{body := #{<<"type">> := ConnType} = Params}} ->
{ok, #{body := #{<<"type">> := BridgeType} = Params}} ->
Params1 = maybe_deobfuscate_bridge_probe(Params),
case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of
Params2 = maps:remove(<<"type">>, Params1),
case emqx_bridge_resource:create_dry_run(BridgeType, Params2) of
ok ->
?NO_CONTENT;
{error, #{kind := validation_error} = Reason0} ->
@ -560,10 +578,12 @@ schema("/bridges_probe") ->
redact(BadRequest)
end.
maybe_deobfuscate_bridge_probe(#{<<"type">> := BridgeType, <<"name">> := BridgeName} = Params) ->
maybe_deobfuscate_bridge_probe(#{<<"type">> := BridgeType0, <<"name">> := BridgeName} = Params) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
{ok, #{raw_config := RawConf}} ->
%% TODO check if RawConf optained above is compatible with the commented out code below
%% RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
deobfuscate(Params, RawConf);
_ ->
%% A bridge may be probed before it's created, so not finding it here is fine
@ -589,6 +609,8 @@ lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
{SuccCode, format_bridge_info([R || {ok, R} <- Results])};
{ok, [{error, not_found} | _]} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{ok, [{error, not_bridge_v1_compatible} | _]} ->
?NOT_FOUND(non_compat_bridge_msg());
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
@ -603,9 +625,20 @@ create_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 201).
update_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200).
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
case emqx_bridge_v2:is_valid_bridge_v1(BridgeType, BridgeName) of
true ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200);
false ->
?NOT_FOUND(non_compat_bridge_msg())
end;
false ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200)
end.
create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
create_or_update_bridge(BridgeType0, BridgeName, Conf, HttpStatusCode) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge:create(BridgeType, BridgeName, Conf) of
{ok, _} ->
lookup_from_all_nodes(BridgeType, BridgeName, HttpStatusCode);
@ -615,7 +648,8 @@ create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.
get_metrics_from_local_node(BridgeType, BridgeName) ->
get_metrics_from_local_node(BridgeType0, BridgeName) ->
BridgeType = upgrade_type(BridgeType0),
format_metrics(emqx_bridge:get_metrics(BridgeType, BridgeName)).
'/bridges/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
@ -634,6 +668,10 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST(non_compat_bridge_msg());
{error, bridge_not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end
@ -650,7 +688,7 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
invalid ->
?NOT_FOUND(<<"Invalid operation: ", Op/binary>>);
OperFunc ->
try is_enabled_bridge(BridgeType, BridgeName) of
try is_bridge_enabled(BridgeType, BridgeName) of
false ->
?BRIDGE_NOT_ENABLED;
true ->
@ -673,7 +711,7 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
invalid ->
?NOT_FOUND(<<"Invalid operation: ", Op/binary>>);
OperFunc ->
try is_enabled_bridge(BridgeType, BridgeName) of
try is_bridge_enabled(BridgeType, BridgeName) of
false ->
?BRIDGE_NOT_ENABLED;
true ->
@ -692,7 +730,14 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
end
).
is_enabled_bridge(BridgeType, BridgeName) ->
is_bridge_enabled(BridgeType, BridgeName) ->
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true -> is_bridge_enabled_v2(BridgeType, BridgeName);
false -> is_bridge_enabled_v1(BridgeType, BridgeName)
end.
is_bridge_enabled_v1(BridgeType, BridgeName) ->
%% we read from the transalted config because the defaults are populated here.
try emqx:get_config([bridges, BridgeType, binary_to_existing_atom(BridgeName)]) of
ConfMap ->
maps:get(enable, ConfMap, false)
@ -705,6 +750,20 @@ is_enabled_bridge(BridgeType, BridgeName) ->
throw(not_found)
end.
is_bridge_enabled_v2(BridgeV1Type, BridgeName) ->
BridgeV2Type = emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
try emqx:get_config([actions, BridgeV2Type, binary_to_existing_atom(BridgeName)]) of
ConfMap ->
maps:get(enable, ConfMap, true)
catch
error:{config_not_found, _} ->
throw(not_found);
error:badarg ->
%% catch non-existing atom,
%% none-existing atom means it is not available in config PT storage.
throw(not_found)
end.
node_operation_func(<<"restart">>) -> restart_bridge_to_node;
node_operation_func(<<"start">>) -> start_bridge_to_node;
node_operation_func(<<"stop">>) -> stop_bridge_to_node;
@ -837,11 +896,18 @@ format_resource(
},
Node
) ->
RawConfFull = fill_defaults(Type, RawConf),
RawConfFull =
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
%% The defaults are already filled in
downgrade_raw_conf(Type, RawConf);
false ->
fill_defaults(Type, RawConf)
end,
redact(
maps:merge(
RawConfFull#{
type => Type,
type => downgrade_type(Type),
name => maps:get(<<"name">>, RawConf, BridgeName),
node => Node
},
@ -1012,7 +1078,7 @@ call_operation(NodeOrAll, OperFunc, Args = [_Nodes, BridgeType, BridgeName]) ->
?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>);
{error, {unhealthy_target, Message}} ->
?BAD_REQUEST(Message);
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
{error, Reason} ->
?BAD_REQUEST(redact(Reason))
end.
@ -1048,10 +1114,10 @@ maybe_unwrap({error, not_implemented}) ->
maybe_unwrap(RpcMulticallResult) ->
emqx_rpc:unwrap_erpc(RpcMulticallResult).
supported_versions(start_bridge_to_node) -> [2, 3, 4];
supported_versions(start_bridges_to_all_nodes) -> [2, 3, 4];
supported_versions(get_metrics_from_all_nodes) -> [4];
supported_versions(_Call) -> [1, 2, 3, 4].
supported_versions(start_bridge_to_node) -> [2, 3, 4, 5];
supported_versions(start_bridges_to_all_nodes) -> [2, 3, 4, 5];
supported_versions(get_metrics_from_all_nodes) -> [4, 5];
supported_versions(_Call) -> [1, 2, 3, 4, 5].
redact(Term) ->
emqx_utils:redact(Term).
@ -1089,3 +1155,28 @@ map_to_json(M0) ->
M2 = maps:without([value, <<"value">>], M1),
emqx_utils_json:encode(M2)
end.
non_compat_bridge_msg() ->
<<"bridge already exists as non Bridge V1 compatible Bridge V2 bridge">>.
upgrade_type(Type) ->
emqx_bridge_lib:upgrade_type(Type).
downgrade_type(Type) ->
emqx_bridge_lib:downgrade_type(Type).
%% TODO: move it to callback
downgrade_raw_conf(kafka_producer, RawConf) ->
rename(<<"parameters">>, <<"kafka">>, RawConf);
downgrade_raw_conf(azure_event_hub_producer, RawConf) ->
rename(<<"parameters">>, <<"kafka">>, RawConf);
downgrade_raw_conf(_Type, RawConf) ->
RawConf.
rename(OldKey, NewKey, Map) ->
case maps:find(OldKey, Map) of
{ok, Value} ->
maps:remove(OldKey, maps:put(NewKey, Value, Map));
error ->
Map
end.

View File

@ -18,7 +18,6 @@
-behaviour(application).
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-export([start/2, stop/1]).
-export([
@ -33,6 +32,7 @@ start(_StartType, _StartArgs) ->
{ok, Sup} = emqx_bridge_sup:start_link(),
ok = ensure_enterprise_schema_loaded(),
ok = emqx_bridge:load(),
ok = emqx_bridge_v2:load(),
ok = emqx_bridge:load_hook(),
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE),
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH, emqx_bridge),
@ -43,6 +43,7 @@ stop(_State) ->
emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH),
emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH),
ok = emqx_bridge:unload(),
ok = emqx_bridge_v2:unload(),
ok.
-if(?EMQX_RELEASE_EDITION == ee).
@ -56,7 +57,7 @@ ensure_enterprise_schema_loaded() ->
%% NOTE: We depends on the `emqx_bridge:pre_config_update/3` to restart/stop the
%% underlying resources.
pre_config_update(_, {_Oper, _, _}, undefined) ->
pre_config_update(_, {_Oper, _Type, _Name}, undefined) ->
{error, bridge_not_found};
pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
%% to save the 'enable' to the config files

View File

@ -0,0 +1,89 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_lib).
-export([
maybe_withdraw_rule_action/3,
upgrade_type/1,
downgrade_type/1
]).
%% @doc A bridge can be used as a rule action.
%% The bridge-ID in rule-engine's world is the action-ID.
%% This function is to remove a bridge (action) from all rules
%% using it if the `rule_actions' is included in `DeleteDeps' list
maybe_withdraw_rule_action(BridgeType, BridgeName, DeleteDeps) ->
BridgeIds = external_ids(BridgeType, BridgeName),
DeleteActions = lists:member(rule_actions, DeleteDeps),
maybe_withdraw_rule_action_loop(BridgeIds, DeleteActions).
maybe_withdraw_rule_action_loop([], _DeleteActions) ->
ok;
maybe_withdraw_rule_action_loop([BridgeId | More], DeleteActions) ->
case emqx_rule_engine:get_rule_ids_by_action(BridgeId) of
[] ->
maybe_withdraw_rule_action_loop(More, DeleteActions);
RuleIds when DeleteActions ->
lists:foreach(
fun(R) ->
emqx_rule_engine:ensure_action_removed(R, BridgeId)
end,
RuleIds
),
maybe_withdraw_rule_action_loop(More, DeleteActions);
RuleIds ->
{error, #{
reason => rules_depending_on_this_bridge,
bridge_id => BridgeId,
rule_ids => RuleIds
}}
end.
%% @doc Kafka producer bridge renamed from 'kafka' to 'kafka_bridge' since 5.3.1.
upgrade_type(kafka) ->
kafka_producer;
upgrade_type(<<"kafka">>) ->
<<"kafka_producer">>;
upgrade_type(Other) ->
Other.
%% @doc Kafka producer bridge type renamed from 'kafka' to 'kafka_bridge' since 5.3.1
downgrade_type(kafka_producer) ->
kafka;
downgrade_type(<<"kafka_producer">>) ->
<<"kafka">>;
downgrade_type(Other) ->
Other.
%% A rule might be referencing an old version bridge type name
%% i.e. 'kafka' instead of 'kafka_producer' so we need to try both
external_ids(Type, Name) ->
case downgrade_type(Type) of
Type ->
[external_id(Type, Name)];
Type0 ->
[external_id(Type0, Name), external_id(Type, Name)]
end.
%% Creates the external id for the bridge_v2 that is used by the rule actions
%% to refer to the bridge_v2
external_id(BridgeType, BridgeName) ->
Name = bin(BridgeName),
Type = bin(BridgeType),
<<Type/binary, ":", Name/binary>>.
bin(Bin) when is_binary(Bin) -> Bin;
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).

View File

@ -80,7 +80,17 @@ bridge_impl_module(_BridgeType) -> undefined.
-endif.
resource_id(BridgeId) when is_binary(BridgeId) ->
<<"bridge:", BridgeId/binary>>.
case binary:split(BridgeId, <<":">>) of
[Type, _Name] ->
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
emqx_bridge_v2:bridge_v1_id_to_connector_resource_id(BridgeId);
false ->
<<"bridge:", BridgeId/binary>>
end;
_ ->
invalid_data(<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>)
end.
resource_id(BridgeType, BridgeName) ->
BridgeId = bridge_id(BridgeType, BridgeName),
@ -92,19 +102,15 @@ bridge_id(BridgeType, BridgeName) ->
<<Type/binary, ":", Name/binary>>.
parse_bridge_id(BridgeId) ->
parse_bridge_id(BridgeId, #{atom_name => true}).
parse_bridge_id(bin(BridgeId), #{atom_name => true}).
-spec parse_bridge_id(list() | binary() | atom(), #{atom_name => boolean()}) ->
-spec parse_bridge_id(binary() | atom(), #{atom_name => boolean()}) ->
{atom(), atom() | binary()}.
parse_bridge_id(<<"bridge:", ID/binary>>, Opts) ->
parse_bridge_id(ID, Opts);
parse_bridge_id(BridgeId, Opts) ->
case string:split(bin(BridgeId), ":", all) of
[Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
_ ->
invalid_data(
<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>
)
end.
{Type, Name} = emqx_resource:parse_resource_id(BridgeId, Opts),
{emqx_bridge_lib:upgrade_type(Type), Name}.
bridge_hookpoint(BridgeId) ->
<<"$bridges/", (bin(BridgeId))/binary>>.
@ -114,56 +120,48 @@ bridge_hookpoint_to_bridge_id(?BRIDGE_HOOKPOINT(BridgeId)) ->
bridge_hookpoint_to_bridge_id(_) ->
{error, bad_bridge_hookpoint}.
validate_name(Name0, Opts) ->
Name = unicode:characters_to_list(Name0, utf8),
case is_list(Name) andalso Name =/= [] of
true ->
case lists:all(fun is_id_char/1, Name) of
true ->
case maps:get(atom_name, Opts, true) of
% NOTE
% Rule may be created before bridge, thus not `list_to_existing_atom/1`,
% also it is infrequent user input anyway.
true -> list_to_atom(Name);
false -> Name0
end;
false ->
invalid_data(<<"bad name: ", Name0/binary>>)
end;
false ->
invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end.
-spec invalid_data(binary()) -> no_return().
invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true;
is_id_char(C) when C >= $A andalso C =< $Z -> true;
is_id_char($_) -> true;
is_id_char($-) -> true;
is_id_char($.) -> true;
is_id_char(_) -> false.
to_type_atom(Type) ->
try
erlang:binary_to_existing_atom(Type, utf8)
catch
_:_ ->
invalid_data(<<"unknown bridge type: ", Type/binary>>)
reset_metrics(ResourceId) ->
%% TODO we should not create atoms here
{Type, Name} = parse_bridge_id(ResourceId),
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:reset_metrics(ResourceId);
true ->
case emqx_bridge_v2:is_valid_bridge_v1(Type, Name) of
true ->
BridgeV2Type = emqx_bridge_v2:bridge_v2_type_to_connector_type(Type),
emqx_bridge_v2:reset_metrics(BridgeV2Type, Name);
false ->
{error, not_bridge_v1_compatible}
end
end.
reset_metrics(ResourceId) ->
emqx_resource:reset_metrics(ResourceId).
restart(Type, Name) ->
emqx_resource:restart(resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:restart(resource_id(Type, Name));
true ->
emqx_bridge_v2:bridge_v1_restart(Type, Name)
end.
stop(Type, Name) ->
emqx_resource:stop(resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:stop(resource_id(Type, Name));
true ->
emqx_bridge_v2:bridge_v1_stop(Type, Name)
end.
start(Type, Name) ->
emqx_resource:start(resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:start(resource_id(Type, Name));
true ->
emqx_bridge_v2:bridge_v1_start(Type, Name)
end.
create(BridgeId, Conf) ->
{BridgeType, BridgeName} = parse_bridge_id(BridgeId),
@ -257,7 +255,16 @@ recreate(Type, Name, Conf0, Opts) ->
parse_opts(Conf, Opts)
).
create_dry_run(Type, Conf0) ->
create_dry_run(Type0, Conf0) ->
Type = emqx_bridge_lib:upgrade_type(Type0),
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
create_dry_run_bridge_v1(Type, Conf0);
true ->
emqx_bridge_v2:bridge_v1_create_dry_run(Type, Conf0)
end.
create_dry_run_bridge_v1(Type, Conf0) ->
TmpName = iolist_to_binary([?TEST_ID_PREFIX, emqx_utils:gen_id(8)]),
TmpPath = emqx_utils:safe_filename(TmpName),
%% Already typechecked, no need to catch errors
@ -297,6 +304,7 @@ remove(Type, Name) ->
%% just for perform_bridge_changes/1
remove(Type, Name, _Conf, _Opts) ->
%% TODO we need to handle bridge_v2 here
?SLOG(info, #{msg => "remove_bridge", type => Type, name => Name}),
emqx_resource:remove_local(resource_id(Type, Name)).

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,807 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_api).
-behaviour(minirest_api).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_utils/include/emqx_utils_api.hrl").
-import(hoconsc, [mk/2, array/1, enum/1]).
-import(emqx_utils, [redact/1]).
%% Swagger specs from hocon schema
-export([
api_spec/0,
paths/0,
schema/1,
namespace/0
]).
%% API callbacks
-export([
'/actions'/2,
'/actions/:id'/2,
'/actions/:id/enable/:enable'/2,
'/actions/:id/:operation'/2,
'/nodes/:node/actions/:id/:operation'/2,
'/actions_probe'/2
]).
%% BpAPI
-export([lookup_from_local_node/2]).
-define(BRIDGE_NOT_FOUND(BRIDGE_TYPE, BRIDGE_NAME),
?NOT_FOUND(
<<"Bridge lookup failed: bridge named '", (bin(BRIDGE_NAME))/binary, "' of type ",
(bin(BRIDGE_TYPE))/binary, " does not exist.">>
)
).
-define(BRIDGE_NOT_ENABLED,
?BAD_REQUEST(<<"Forbidden operation, bridge not enabled">>)
).
-define(TRY_PARSE_ID(ID, EXPR),
try emqx_bridge_resource:parse_bridge_id(Id, #{atom_name => false}) of
{BridgeType, BridgeName} ->
EXPR
catch
throw:#{reason := Reason} ->
?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>)
end
).
namespace() -> "actions".
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
paths() ->
[
"/actions",
"/actions/:id",
"/actions/:id/enable/:enable",
"/actions/:id/:operation",
"/nodes/:node/actions/:id/:operation",
"/actions_probe"
].
error_schema(Code, Message) when is_atom(Code) ->
error_schema([Code], Message);
error_schema(Codes, Message) when is_list(Message) ->
error_schema(Codes, list_to_binary(Message));
error_schema(Codes, Message) when is_list(Codes) andalso is_binary(Message) ->
emqx_dashboard_swagger:error_codes(Codes, Message).
get_response_body_schema() ->
emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:get_response(),
bridge_info_examples(get)
).
bridge_info_examples(Method) ->
maps:merge(
#{},
emqx_enterprise_bridge_examples(Method)
).
bridge_info_array_example(Method) ->
lists:map(fun(#{value := Config}) -> Config end, maps:values(bridge_info_examples(Method))).
-if(?EMQX_RELEASE_EDITION == ee).
emqx_enterprise_bridge_examples(Method) ->
emqx_bridge_v2_enterprise:examples(Method).
-else.
emqx_enterprise_bridge_examples(_Method) -> #{}.
-endif.
param_path_id() ->
{id,
mk(
binary(),
#{
in => path,
required => true,
example => <<"webhook:webhook_example">>,
desc => ?DESC("desc_param_path_id")
}
)}.
param_qs_delete_cascade() ->
{also_delete_dep_actions,
mk(
boolean(),
#{
in => query,
required => false,
default => false,
desc => ?DESC("desc_qs_also_delete_dep_actions")
}
)}.
param_path_operation_cluster() ->
{operation,
mk(
enum([start]),
#{
in => path,
required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_cluster")
}
)}.
param_path_operation_on_node() ->
{operation,
mk(
enum([start]),
#{
in => path,
required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_on_node")
}
)}.
param_path_node() ->
{node,
mk(
binary(),
#{
in => path,
required => true,
example => <<"emqx@127.0.0.1">>,
desc => ?DESC("desc_param_path_node")
}
)}.
param_path_enable() ->
{enable,
mk(
boolean(),
#{
in => path,
required => true,
desc => ?DESC("desc_param_path_enable"),
example => true
}
)}.
schema("/actions") ->
#{
'operationId' => '/actions',
get => #{
tags => [<<"actions">>],
summary => <<"List bridges">>,
description => ?DESC("desc_api1"),
responses => #{
200 => emqx_dashboard_swagger:schema_with_example(
array(emqx_bridge_v2_schema:get_response()),
bridge_info_array_example(get)
)
}
},
post => #{
tags => [<<"actions">>],
summary => <<"Create bridge">>,
description => ?DESC("desc_api2"),
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:post_request(),
bridge_info_examples(post)
),
responses => #{
201 => get_response_body_schema(),
400 => error_schema('ALREADY_EXISTS', "Bridge already exists")
}
}
};
schema("/actions/:id") ->
#{
'operationId' => '/actions/:id',
get => #{
tags => [<<"actions">>],
summary => <<"Get bridge">>,
description => ?DESC("desc_api3"),
parameters => [param_path_id()],
responses => #{
200 => get_response_body_schema(),
404 => error_schema('NOT_FOUND', "Bridge not found")
}
},
put => #{
tags => [<<"actions">>],
summary => <<"Update bridge">>,
description => ?DESC("desc_api4"),
parameters => [param_path_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:put_request(),
bridge_info_examples(put)
),
responses => #{
200 => get_response_body_schema(),
404 => error_schema('NOT_FOUND', "Bridge not found"),
400 => error_schema('BAD_REQUEST', "Update bridge failed")
}
},
delete => #{
tags => [<<"actions">>],
summary => <<"Delete bridge">>,
description => ?DESC("desc_api5"),
parameters => [param_path_id(), param_qs_delete_cascade()],
responses => #{
204 => <<"Bridge deleted">>,
400 => error_schema(
'BAD_REQUEST',
"Cannot delete bridge while active rules are defined for this bridge"
),
404 => error_schema('NOT_FOUND', "Bridge not found"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/actions/:id/enable/:enable") ->
#{
'operationId' => '/actions/:id/enable/:enable',
put =>
#{
tags => [<<"actions">>],
summary => <<"Enable or disable bridge">>,
desc => ?DESC("desc_enable_bridge"),
parameters => [param_path_id(), param_path_enable()],
responses =>
#{
204 => <<"Success">>,
404 => error_schema(
'NOT_FOUND', "Bridge not found or invalid operation"
),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/actions/:id/:operation") ->
#{
'operationId' => '/actions/:id/:operation',
post => #{
tags => [<<"actions">>],
summary => <<"Manually start a bridge">>,
description => ?DESC("desc_api7"),
parameters => [
param_path_id(),
param_path_operation_cluster()
],
responses => #{
204 => <<"Operation success">>,
400 => error_schema(
'BAD_REQUEST', "Problem with configuration of external service"
),
404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"),
501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/nodes/:node/actions/:id/:operation") ->
#{
'operationId' => '/nodes/:node/actions/:id/:operation',
post => #{
tags => [<<"actions">>],
summary => <<"Manually start a bridge on a given node">>,
description => ?DESC("desc_api8"),
parameters => [
param_path_node(),
param_path_id(),
param_path_operation_on_node()
],
responses => #{
204 => <<"Operation success">>,
400 => error_schema(
'BAD_REQUEST',
"Problem with configuration of external service or bridge not enabled"
),
404 => error_schema(
'NOT_FOUND', "Bridge or node not found or invalid operation"
),
501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/actions_probe") ->
#{
'operationId' => '/actions_probe',
post => #{
tags => [<<"actions">>],
desc => ?DESC("desc_api9"),
summary => <<"Test creating bridge">>,
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:post_request(),
bridge_info_examples(post)
),
responses => #{
204 => <<"Test bridge OK">>,
400 => error_schema(['TEST_FAILED'], "bridge test failed")
}
}
}.
'/actions'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) ->
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
?BAD_REQUEST('ALREADY_EXISTS', <<"bridge already exists">>);
{error, not_found} ->
Conf = filter_out_request_body(Conf0),
create_bridge(BridgeType, BridgeName, Conf)
end;
'/actions'(get, _Params) ->
Nodes = mria:running_nodes(),
NodeReplies = emqx_bridge_proto_v5:v2_list_bridges_on_nodes(Nodes),
case is_ok(NodeReplies) of
{ok, NodeBridges} ->
AllBridges = [
[format_resource(Data, Node) || Data <- Bridges]
|| {Node, Bridges} <- lists:zip(Nodes, NodeBridges)
],
?OK(zip_bridges(AllBridges));
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
'/actions/:id'(get, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, lookup_from_all_nodes(BridgeType, BridgeName, 200));
'/actions/:id'(put, #{bindings := #{id := Id}, body := Conf0}) ->
Conf1 = filter_out_request_body(Conf0),
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
Conf = deobfuscate(Conf1, RawConf),
update_bridge(BridgeType, BridgeName, Conf);
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
end
);
'/actions/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) ->
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
AlsoDeleteActions =
case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of
<<"true">> -> true;
true -> true;
_ -> false
end,
case
emqx_bridge_v2:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActions)
of
ok ->
?NO_CONTENT;
{error, #{
reason := rules_depending_on_this_bridge,
rule_ids := RuleIds
}} ->
RuleIdLists = [binary_to_list(iolist_to_binary(X)) || X <- RuleIds],
RulesStr = string:join(RuleIdLists, ", "),
Msg = io_lib:format(
"Cannot delete bridge while active rules are depending on it: ~s\n"
"Append ?also_delete_dep_actions=true to the request URL to delete "
"rule actions that depend on this bridge as well.",
[RulesStr]
),
?BAD_REQUEST(iolist_to_binary(Msg));
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end;
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
end
).
'/actions/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:disable_enable(enable_func(Enable), BridgeType, BridgeName) of
{ok, _} ->
?NO_CONTENT;
{error, {pre_config_update, _, bridge_not_found}} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, {_, _, timeout}} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end
).
'/actions/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op}
}) ->
?TRY_PARSE_ID(
Id,
begin
OperFunc = operation_func(all, Op),
Nodes = mria:running_nodes(),
call_operation_if_enabled(all, OperFunc, [Nodes, BridgeType, BridgeName])
end
).
'/nodes/:node/actions/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op, node := Node}
}) ->
?TRY_PARSE_ID(
Id,
case emqx_utils:safe_to_existing_atom(Node, utf8) of
{ok, TargetNode} ->
OperFunc = operation_func(TargetNode, Op),
call_operation_if_enabled(TargetNode, OperFunc, [TargetNode, BridgeType, BridgeName]);
{error, _} ->
?NOT_FOUND(<<"Invalid node name: ", Node/binary>>)
end
).
'/actions_probe'(post, Request) ->
RequestMeta = #{module => ?MODULE, method => post, path => "/actions_probe"},
case emqx_dashboard_swagger:filter_check_request_and_translate_body(Request, RequestMeta) of
{ok, #{body := #{<<"type">> := ConnType} = Params}} ->
Params1 = maybe_deobfuscate_bridge_probe(Params),
Params2 = maps:remove(<<"type">>, Params1),
case emqx_bridge_v2:create_dry_run(ConnType, Params2) of
ok ->
?NO_CONTENT;
{error, #{kind := validation_error} = Reason0} ->
Reason = redact(Reason0),
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
{error, Reason0} when not is_tuple(Reason0); element(1, Reason0) =/= 'exit' ->
Reason1 =
case Reason0 of
{unhealthy_target, Message} -> Message;
_ -> Reason0
end,
Reason = redact(Reason1),
?BAD_REQUEST('TEST_FAILED', Reason)
end;
BadRequest ->
redact(BadRequest)
end.
maybe_deobfuscate_bridge_probe(#{<<"type">> := BridgeType, <<"name">> := BridgeName} = Params) ->
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, #{raw_config := RawConf}} ->
%% TODO check if RawConf optained above is compatible with the commented out code below
%% RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
deobfuscate(Params, RawConf);
_ ->
%% A bridge may be probed before it's created, so not finding it here is fine
Params
end;
maybe_deobfuscate_bridge_probe(Params) ->
Params.
%%% API helpers
is_ok(ok) ->
ok;
is_ok(OkResult = {ok, _}) ->
OkResult;
is_ok(Error = {error, _}) ->
Error;
is_ok(ResL) ->
case
lists:filter(
fun
({ok, _}) -> false;
(ok) -> false;
(_) -> true
end,
ResL
)
of
[] -> {ok, [Res || {ok, Res} <- ResL]};
ErrL -> hd(ErrL)
end.
deobfuscate(NewConf, OldConf) ->
maps:fold(
fun(K, V, Acc) ->
case maps:find(K, OldConf) of
error ->
Acc#{K => V};
{ok, OldV} when is_map(V), is_map(OldV) ->
Acc#{K => deobfuscate(V, OldV)};
{ok, OldV} ->
case emqx_utils:is_redacted(K, V) of
true ->
Acc#{K => OldV};
_ ->
Acc#{K => V}
end
end
end,
#{},
NewConf
).
%% bridge helpers
lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
Nodes = mria:running_nodes(),
case is_ok(emqx_bridge_proto_v5:v2_lookup_from_all_nodes(Nodes, BridgeType, BridgeName)) of
{ok, [{ok, _} | _] = Results} ->
{SuccCode, format_bridge_info([R || {ok, R} <- Results])};
{ok, [{error, not_found} | _]} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
operation_func(all, start) -> v2_start_bridge_to_all_nodes;
operation_func(_Node, start) -> v2_start_bridge_to_node.
call_operation_if_enabled(NodeOrAll, OperFunc, [Nodes, BridgeType, BridgeName]) ->
try is_enabled_bridge(BridgeType, BridgeName) of
false ->
?BRIDGE_NOT_ENABLED;
true ->
call_operation(NodeOrAll, OperFunc, [Nodes, BridgeType, BridgeName])
catch
throw:not_found ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
end.
is_enabled_bridge(BridgeType, BridgeName) ->
try emqx_bridge_v2:lookup(BridgeType, binary_to_existing_atom(BridgeName)) of
{ok, #{raw_config := ConfMap}} ->
maps:get(<<"enable">>, ConfMap, false);
{error, not_found} ->
throw(not_found)
catch
error:badarg ->
%% catch non-existing atom,
%% none-existing atom means it is not available in config PT storage.
throw(not_found)
end.
call_operation(NodeOrAll, OperFunc, Args = [_Nodes, BridgeType, BridgeName]) ->
case is_ok(do_bpapi_call(NodeOrAll, OperFunc, Args)) of
Ok when Ok =:= ok; is_tuple(Ok), element(1, Ok) =:= ok ->
?NO_CONTENT;
{error, not_implemented} ->
?NOT_IMPLEMENTED;
{error, timeout} ->
?BAD_REQUEST(<<"Request timeout">>);
{error, {start_pool_failed, Name, Reason}} ->
Msg = bin(
io_lib:format("Failed to start ~p pool for reason ~p", [Name, redact(Reason)])
),
?BAD_REQUEST(Msg);
{error, not_found} ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
?SLOG(warning, #{
msg => "bridge_inconsistent_in_cluster_for_call_operation",
reason => not_found,
type => BridgeType,
name => BridgeName,
bridge => BridgeId
}),
?SERVICE_UNAVAILABLE(<<"Bridge not found on remote node: ", BridgeId/binary>>);
{error, {node_not_found, Node}} ->
?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>);
{error, Reason} ->
?BAD_REQUEST(redact(Reason))
end.
do_bpapi_call(all, Call, Args) ->
maybe_unwrap(
do_bpapi_call_vsn(emqx_bpapi:supported_version(emqx_bridge), Call, Args)
);
do_bpapi_call(Node, Call, Args) ->
case lists:member(Node, mria:running_nodes()) of
true ->
do_bpapi_call_vsn(emqx_bpapi:supported_version(Node, emqx_bridge), Call, Args);
false ->
{error, {node_not_found, Node}}
end.
do_bpapi_call_vsn(Version, Call, Args) ->
case is_supported_version(Version, Call) of
true ->
apply(emqx_bridge_proto_v5, Call, Args);
false ->
{error, not_implemented}
end.
is_supported_version(Version, Call) ->
lists:member(Version, supported_versions(Call)).
supported_versions(_Call) -> [5].
maybe_unwrap({error, not_implemented}) ->
{error, not_implemented};
maybe_unwrap(RpcMulticallResult) ->
emqx_rpc:unwrap_erpc(RpcMulticallResult).
zip_bridges([BridgesFirstNode | _] = BridgesAllNodes) ->
lists:foldl(
fun(#{type := Type, name := Name}, Acc) ->
Bridges = pick_bridges_by_id(Type, Name, BridgesAllNodes),
[format_bridge_info(Bridges) | Acc]
end,
[],
BridgesFirstNode
).
pick_bridges_by_id(Type, Name, BridgesAllNodes) ->
lists:foldl(
fun(BridgesOneNode, Acc) ->
case
[
Bridge
|| Bridge = #{type := Type0, name := Name0} <- BridgesOneNode,
Type0 == Type,
Name0 == Name
]
of
[BridgeInfo] ->
[BridgeInfo | Acc];
[] ->
?SLOG(warning, #{
msg => "bridge_inconsistent_in_cluster",
reason => not_found,
type => Type,
name => Name,
bridge => emqx_bridge_resource:bridge_id(Type, Name)
}),
Acc
end
end,
[],
BridgesAllNodes
).
format_bridge_info([FirstBridge | _] = Bridges) ->
Res = maps:remove(node, FirstBridge),
NodeStatus = node_status(Bridges),
redact(Res#{
status => aggregate_status(NodeStatus),
node_status => NodeStatus
}).
node_status(Bridges) ->
[maps:with([node, status, status_reason], B) || B <- Bridges].
aggregate_status(AllStatus) ->
Head = fun([A | _]) -> A end,
HeadVal = maps:get(status, Head(AllStatus), connecting),
AllRes = lists:all(fun(#{status := Val}) -> Val == HeadVal end, AllStatus),
case AllRes of
true -> HeadVal;
false -> inconsistent
end.
lookup_from_local_node(BridgeType, BridgeName) ->
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, Res} -> {ok, format_resource(Res, node())};
Error -> Error
end.
%% resource
format_resource(
#{
type := Type,
name := Name,
raw_config := RawConf,
resource_data := ResourceData
},
Node
) ->
redact(
maps:merge(
RawConf#{
type => Type,
name => maps:get(<<"name">>, RawConf, Name),
node => Node
},
format_resource_data(ResourceData)
)
).
format_resource_data(ResData) ->
maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], ResData)).
format_resource_data(error, undefined, Result) ->
Result;
format_resource_data(error, Error, Result) ->
Result#{status_reason => emqx_utils:readable_error_msg(Error)};
format_resource_data(K, V, Result) ->
Result#{K => V}.
create_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 201).
update_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200).
create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
Check =
try
is_binary(BridgeType) andalso emqx_resource:validate_type(BridgeType),
ok = emqx_resource:validate_name(BridgeName)
catch
throw:Error ->
?BAD_REQUEST(map_to_json(Error))
end,
case Check of
ok ->
do_create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode);
BadRequest ->
BadRequest
end.
do_create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
case emqx_bridge_v2:create(BridgeType, BridgeName, Conf) of
{ok, _} ->
lookup_from_all_nodes(BridgeType, BridgeName, HttpStatusCode);
{error, {PreOrPostConfigUpdate, _HandlerMod, Reason}} when
PreOrPostConfigUpdate =:= pre_config_update;
PreOrPostConfigUpdate =:= post_config_update
->
?BAD_REQUEST(map_to_json(redact(Reason)));
{error, Reason} ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.
enable_func(true) -> enable;
enable_func(false) -> disable.
filter_out_request_body(Conf) ->
ExtraConfs = [
<<"id">>,
<<"type">>,
<<"name">>,
<<"status">>,
<<"status_reason">>,
<<"node_status">>,
<<"node">>
],
maps:without(ExtraConfs, Conf).
%% general helpers
bin(S) when is_list(S) ->
list_to_binary(S);
bin(S) when is_atom(S) ->
atom_to_binary(S, utf8);
bin(S) when is_binary(S) ->
S.
map_to_json(M0) ->
%% When dealing with Hocon validation errors, `value' might contain non-serializable
%% values (e.g.: user_lookup_fun), so we try again without that key if serialization
%% fails as a best effort.
M1 = emqx_utils_maps:jsonable_map(M0, fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end),
try
emqx_utils_json:encode(M1)
catch
error:_ ->
M2 = maps:without([value, <<"value">>], M1),
emqx_utils_json:encode(M2)
end.

View File

@ -0,0 +1,179 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_proto_v5).
-behaviour(emqx_bpapi).
-export([
introduced_in/0,
list_bridges_on_nodes/1,
restart_bridge_to_node/3,
start_bridge_to_node/3,
stop_bridge_to_node/3,
lookup_from_all_nodes/3,
get_metrics_from_all_nodes/3,
restart_bridges_to_all_nodes/3,
start_bridges_to_all_nodes/3,
stop_bridges_to_all_nodes/3,
v2_start_bridge_to_node/3,
v2_start_bridge_to_all_nodes/3,
v2_list_bridges_on_nodes/1,
v2_lookup_from_all_nodes/3
]).
-include_lib("emqx/include/bpapi.hrl").
-define(TIMEOUT, 15000).
introduced_in() ->
"5.3.1".
-spec list_bridges_on_nodes([node()]) ->
emqx_rpc:erpc_multicall([emqx_resource:resource_data()]).
list_bridges_on_nodes(Nodes) ->
erpc:multicall(Nodes, emqx_bridge, list, [], ?TIMEOUT).
-type key() :: atom() | binary() | [byte()].
-spec restart_bridge_to_node(node(), key(), key()) ->
term().
restart_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_resource,
restart,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec start_bridge_to_node(node(), key(), key()) ->
term().
start_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_resource,
start,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec stop_bridge_to_node(node(), key(), key()) ->
term().
stop_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_resource,
stop,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec restart_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_resource,
restart,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec start_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
start_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_resource,
start,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec stop_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_resource,
stop,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
lookup_from_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_api,
lookup_from_local_node,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec get_metrics_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall(emqx_metrics_worker:metrics()).
get_metrics_from_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_api,
get_metrics_from_local_node,
[BridgeType, BridgeName],
?TIMEOUT
).
%% V2 Calls
-spec v2_list_bridges_on_nodes([node()]) ->
emqx_rpc:erpc_multicall([emqx_resource:resource_data()]).
v2_list_bridges_on_nodes(Nodes) ->
erpc:multicall(Nodes, emqx_bridge_v2, list, [], ?TIMEOUT).
-spec v2_lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
v2_lookup_from_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_v2_api,
lookup_from_local_node,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec v2_start_bridge_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
v2_start_bridge_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_v2,
start,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec v2_start_bridge_to_node(node(), key(), key()) ->
term().
v2_start_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_v2,
start,
[BridgeType, BridgeName],
?TIMEOUT
).

View File

@ -23,8 +23,6 @@ api_schemas(Method) ->
api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub">>, Method ++ "_producer"),
api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"),
api_ref(emqx_bridge_kafka, <<"kafka_consumer">>, Method ++ "_consumer"),
%% TODO: rename this to `kafka_producer' after alias support is added
%% to hocon; keeping this as just `kafka' for backwards compatibility.
api_ref(emqx_bridge_kafka, <<"kafka">>, Method ++ "_producer"),
api_ref(emqx_bridge_cassandra, <<"cassandra">>, Method),
api_ref(emqx_bridge_mysql, <<"mysql">>, Method),
@ -95,11 +93,10 @@ examples(Method) ->
end,
lists:foldl(Fun, #{}, schema_modules()).
%% TODO: existing atom
resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8));
resource_type(kafka_consumer) -> emqx_bridge_kafka_impl_consumer;
%% TODO: rename this to `kafka_producer' after alias support is added
%% to hocon; keeping this as just `kafka' for backwards compatibility.
resource_type(kafka) -> emqx_bridge_kafka_impl_producer;
resource_type(kafka_producer) -> emqx_bridge_kafka_impl_producer;
resource_type(cassandra) -> emqx_bridge_cassandra_connector;
resource_type(hstreamdb) -> emqx_bridge_hstreamdb_connector;
resource_type(gcp_pubsub) -> emqx_bridge_gcp_pubsub_impl_producer;
@ -235,13 +232,11 @@ mongodb_structs() ->
kafka_structs() ->
[
%% TODO: rename this to `kafka_producer' after alias support
%% is added to hocon; keeping this as just `kafka' for
%% backwards compatibility.
{kafka,
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, kafka_producer)),
#{
aliases => [kafka],
desc => <<"Kafka Producer Bridge Config">>,
required => false,
converter => fun kafka_producer_converter/2

View File

@ -0,0 +1,68 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_enterprise).
-if(?EMQX_RELEASE_EDITION == ee).
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([
api_schemas/1,
examples/1,
fields/1
]).
examples(Method) ->
MergeFun =
fun(Example, Examples) ->
maps:merge(Examples, Example)
end,
Fun =
fun(Module, Examples) ->
ConnectorExamples = erlang:apply(Module, bridge_v2_examples, [Method]),
lists:foldl(MergeFun, Examples, ConnectorExamples)
end,
lists:foldl(Fun, #{}, schema_modules()).
schema_modules() ->
[
emqx_bridge_kafka,
emqx_bridge_azure_event_hub
].
fields(actions) ->
action_structs().
action_structs() ->
[
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, kafka_producer_action)),
#{
desc => <<"Kafka Producer Actions Config">>,
required => false
}
)},
{azure_event_hub_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, actions)),
#{
desc => <<"Azure Event Hub Actions Config">>,
required => false
}
)}
].
api_schemas(Method) ->
[
api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_bridge_v2"),
api_ref(emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_bridge_v2")
].
api_ref(Module, Type, Method) ->
{Type, ref(Module, Method)}.
-else.
-endif.

View File

@ -0,0 +1,171 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("eunit/include/eunit.hrl").
-import(hoconsc, [mk/2, ref/2]).
-export([roots/0, fields/1, desc/1, namespace/0, tags/0]).
-export([
get_response/0,
put_request/0,
post_request/0
]).
-export([enterprise_api_schemas/1]).
-if(?EMQX_RELEASE_EDITION == ee).
enterprise_api_schemas(Method) ->
%% We *must* do this to ensure the module is really loaded, especially when we use
%% `call_hocon' from `nodetool' to generate initial configurations.
_ = emqx_bridge_v2_enterprise:module_info(),
case erlang:function_exported(emqx_bridge_v2_enterprise, api_schemas, 1) of
true -> emqx_bridge_v2_enterprise:api_schemas(Method);
false -> []
end.
enterprise_fields_actions() ->
%% We *must* do this to ensure the module is really loaded, especially when we use
%% `call_hocon' from `nodetool' to generate initial configurations.
_ = emqx_bridge_v2_enterprise:module_info(),
case erlang:function_exported(emqx_bridge_v2_enterprise, fields, 1) of
true ->
emqx_bridge_v2_enterprise:fields(actions);
false ->
[]
end.
-else.
enterprise_api_schemas(_Method) -> [].
enterprise_fields_actions() -> [].
-endif.
%%======================================================================================
%% For HTTP APIs
get_response() ->
api_schema("get").
put_request() ->
api_schema("put").
post_request() ->
api_schema("post").
api_schema(Method) ->
EE = ?MODULE:enterprise_api_schemas(Method),
hoconsc:union(bridge_api_union(EE)).
bridge_api_union(Refs) ->
Index = maps:from_list(Refs),
fun
(all_union_members) ->
maps:values(Index);
({value, V}) ->
case V of
#{<<"type">> := T} ->
case maps:get(T, Index, undefined) of
undefined ->
throw(#{
field_name => type,
value => T,
reason => <<"unknown bridge type">>
});
Ref ->
[Ref]
end;
_ ->
maps:values(Index)
end
end.
%%======================================================================================
%% HOCON Schema Callbacks
%%======================================================================================
namespace() -> "actions".
tags() ->
[<<"Actions">>].
-dialyzer({nowarn_function, roots/0}).
roots() ->
case fields(actions) of
[] ->
[
{actions,
?HOCON(hoconsc:map(name, typerefl:map()), #{importance => ?IMPORTANCE_LOW})}
];
_ ->
[{actions, ?HOCON(?R_REF(actions), #{importance => ?IMPORTANCE_LOW})}]
end.
fields(actions) ->
[] ++ enterprise_fields_actions().
desc(actions) ->
?DESC("desc_bridges_v2");
desc(_) ->
undefined.
-ifdef(TEST).
-include_lib("hocon/include/hocon_types.hrl").
schema_homogeneous_test() ->
case
lists:filtermap(
fun({_Name, Schema}) ->
is_bad_schema(Schema)
end,
fields(actions)
)
of
[] ->
ok;
List ->
throw(List)
end.
is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) ->
Fields = Module:fields(TypeName),
ExpectedFieldNames = common_field_names(),
MissingFileds = lists:filter(
fun(Name) -> lists:keyfind(Name, 1, Fields) =:= false end, ExpectedFieldNames
),
case MissingFileds of
[] ->
false;
_ ->
{true, #{
schema_modle => Module,
type_name => TypeName,
missing_fields => MissingFileds
}}
end.
common_field_names() ->
[
enable, description, local_topic, connector, resource_opts, parameters
].
-endif.

View File

@ -55,7 +55,7 @@ init_per_testcase(_TestCase, Config) ->
end_per_testcase(t_get_basic_usage_info_1, _Config) ->
lists:foreach(
fun({BridgeType, BridgeName}) ->
{ok, _} = emqx_bridge:remove(BridgeType, BridgeName)
ok = emqx_bridge:remove(BridgeType, BridgeName)
end,
[
{webhook, <<"basic_usage_info_webhook">>},

View File

@ -187,7 +187,7 @@ end_per_testcase(_, Config) ->
clear_resources() ->
lists:foreach(
fun(#{type := Type, name := Name}) ->
{ok, _} = emqx_bridge:remove(Type, Name)
ok = emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).

View File

@ -249,32 +249,42 @@ create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts) ->
Error
end.
make_message(Config, MakeMessageFun) ->
BridgeType = ?config(bridge_type, Config),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
BridgeId = emqx_bridge_v2_testlib:bridge_id(Config),
{BridgeId, MakeMessageFun()};
false ->
{send_message, MakeMessageFun()}
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
Message = make_message(Config, MakeMessageFun),
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ResourceId = resource_id(Config),
ReplyFun =
fun(Pid, Result) ->
Pid ! {result, Result}
@ -282,12 +292,13 @@ t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
Message = make_message(Config, MakeMessageFun),
?assertMatch(
{ok, {ok, _}},
?wait_async_action(
@ -301,6 +312,7 @@ t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
@ -342,7 +354,6 @@ t_start_stop(Config, StopTracePoint) ->
t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint).
t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
?check_trace(
begin
%% Check that the bridge probe API doesn't leak atoms.
@ -365,6 +376,7 @@ t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
?assertEqual(AtomsBefore, AtomsAfter),
?assertMatch({ok, _}, emqx_bridge:create(BridgeType, BridgeName, BridgeConfig)),
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
@ -428,6 +440,7 @@ t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
ok
end,
fun(Trace) ->
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% one for each probe, two for real
?assertMatch(
[_, _, #{instance_id := ResourceId}, #{instance_id := ResourceId}],
@ -445,9 +458,9 @@ t_on_get_status(Config, Opts) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
FailureStatus = maps:get(failure_status, Opts, disconnected),
?assertMatch({ok, _}, create_bridge(Config)),
ResourceId = resource_id(Config),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(

View File

@ -0,0 +1,808 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v1_compatibility_layer_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("typerefl/include/types.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
Apps = emqx_cth_suite:start(
app_specs(),
#{work_dir => emqx_cth_suite:work_dir(Config)}
),
emqx_mgmt_api_test_util:init_suite(),
[{apps, Apps} | Config].
end_per_suite(Config) ->
Apps = ?config(apps, Config),
emqx_mgmt_api_test_util:end_suite(),
emqx_cth_suite:stop(Apps),
ok.
app_specs() ->
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_rule_engine
].
init_per_testcase(_TestCase, Config) ->
%% Setting up mocks for fake connector and bridge V2
setup_mocks(),
ets:new(fun_table_name(), [named_table, public]),
%% Create a fake connector
{ok, _} = emqx_connector:create(con_type(), con_name(), con_config()),
[
{mocked_mods, [
emqx_connector_schema,
emqx_connector_resource,
emqx_bridge_v2
]}
| Config
].
end_per_testcase(_TestCase, _Config) ->
ets:delete(fun_table_name()),
delete_all_bridges_and_connectors(),
meck:unload(),
emqx_common_test_helpers:call_janitor(),
ok.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
setup_mocks() ->
MeckOpts = [passthrough, no_link, no_history],
catch meck:new(emqx_connector_schema, MeckOpts),
meck:expect(emqx_connector_schema, fields, 1, con_schema()),
meck:expect(emqx_connector_schema, connector_type_to_bridge_types, 1, [con_type()]),
catch meck:new(emqx_connector_resource, MeckOpts),
meck:expect(emqx_connector_resource, connector_to_resource_type, 1, con_mod()),
catch meck:new(emqx_bridge_v2_schema, MeckOpts),
meck:expect(emqx_bridge_v2_schema, fields, 1, bridge_schema()),
catch meck:new(emqx_bridge_v2, MeckOpts),
meck:expect(emqx_bridge_v2, bridge_v2_type_to_connector_type, 1, con_type()),
meck:expect(emqx_bridge_v2, bridge_v1_type_to_bridge_v2_type, 1, bridge_type()),
IsBridgeV2TypeFun = fun(Type) ->
BridgeV2Type = bridge_type(),
BridgeV2TypeBin = bridge_type_bin(),
case Type of
BridgeV2Type -> true;
BridgeV2TypeBin -> true;
_ -> false
end
end,
meck:expect(emqx_bridge_v2, is_bridge_v2_type, 1, IsBridgeV2TypeFun),
catch meck:new(emqx_bridge_v2_schema, MeckOpts),
meck:expect(
emqx_bridge_v2_schema,
enterprise_api_schemas,
1,
fun(Method) -> [{bridge_type_bin(), hoconsc:ref(?MODULE, "api_" ++ Method)}] end
),
ok.
con_mod() ->
emqx_bridge_v2_test_connector.
con_type() ->
bridge_type().
con_name() ->
my_connector.
bridge_type() ->
test_bridge_type.
bridge_type_bin() ->
atom_to_binary(bridge_type(), utf8).
con_schema() ->
[
{
con_type(),
hoconsc:mk(
hoconsc:map(name, hoconsc:ref(?MODULE, "connector")),
#{
desc => <<"Test Connector Config">>,
required => false
}
)
}
].
fields("connector") ->
[
{enable, hoconsc:mk(any(), #{})},
{resource_opts, hoconsc:mk(map(), #{})}
];
fields("api_post") ->
[
{connector, hoconsc:mk(binary(), #{})},
{name, hoconsc:mk(binary(), #{})},
{type, hoconsc:mk(bridge_type(), #{})},
{send_to, hoconsc:mk(atom(), #{})}
| fields("connector")
].
con_config() ->
#{
<<"enable">> => true,
<<"resource_opts">> => #{
%% Set this to a low value to make the test run faster
<<"health_check_interval">> => 100
}
}.
bridge_schema() ->
bridge_schema(_Opts = #{}).
bridge_schema(Opts) ->
Type = maps:get(bridge_type, Opts, bridge_type()),
[
{
Type,
hoconsc:mk(
hoconsc:map(name, typerefl:map()),
#{
desc => <<"Test Bridge Config">>,
required => false
}
)
}
].
bridge_config() ->
#{
<<"connector">> => atom_to_binary(con_name()),
<<"enable">> => true,
<<"send_to">> => registered_process_name(),
<<"resource_opts">> => #{
<<"resume_interval">> => 100
}
}.
fun_table_name() ->
emqx_bridge_v1_compatibility_layer_SUITE_fun_table.
registered_process_name() ->
my_registered_process.
delete_all_bridges_and_connectors() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing bridge ~p", [{Type, Name}]),
emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
),
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing connector ~p", [{Type, Name}]),
emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
),
update_root_config(#{}),
ok.
%% Hocon does not support placing a fun in a config map so we replace it with a string
wrap_fun(Fun) ->
UniqRef = make_ref(),
UniqRefBin = term_to_binary(UniqRef),
UniqRefStr = iolist_to_binary(base64:encode(UniqRefBin)),
ets:insert(fun_table_name(), {UniqRefStr, Fun}),
UniqRefStr.
unwrap_fun(UniqRefStr) ->
ets:lookup_element(fun_table_name(), UniqRefStr, 2).
update_root_config(RootConf) ->
emqx_conf:update([actions], RootConf, #{override_to => cluster}).
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
ok = emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
),
%% at some point during the tests, sometimes `emqx_bridge:list()'
%% returns an empty list, but `emqx:get_config([bridges])' returns
%% a bunch of orphan test bridges...
lists:foreach(fun emqx_resource:remove/1, emqx_resource:list_instances()),
emqx_config:put([bridges], #{}),
ok.
maybe_json_decode(X) ->
case emqx_utils_json:safe_decode(X, [return_maps]) of
{ok, Decoded} -> Decoded;
{error, _} -> X
end.
request(Method, Path, Params) ->
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
case emqx_mgmt_api_test_util:request_api(Method, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
Body = maybe_json_decode(Body0),
{ok, {Status, Headers, Body}};
{error, {Status, Headers, Body0}} ->
Body =
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
{ok, Decoded0 = #{<<"message">> := Msg0}} ->
Msg = maybe_json_decode(Msg0),
Decoded0#{<<"message">> := Msg};
{ok, Decoded0} ->
Decoded0;
{error, _} ->
Body0
end,
{error, {Status, Headers, Body}};
Error ->
Error
end.
list_bridges_http_api_v1() ->
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
ct:pal("list bridges (http v1)"),
Res = request(get, Path, _Params = []),
ct:pal("list bridges (http v1) result:\n ~p", [Res]),
Res.
list_bridges_http_api_v2() ->
Path = emqx_mgmt_api_test_util:api_path(["actions"]),
ct:pal("list bridges (http v2)"),
Res = request(get, Path, _Params = []),
ct:pal("list bridges (http v2) result:\n ~p", [Res]),
Res.
list_connectors_http() ->
Path = emqx_mgmt_api_test_util:api_path(["connectors"]),
ct:pal("list connectors"),
Res = request(get, Path, _Params = []),
ct:pal("list connectors result:\n ~p", [Res]),
Res.
get_bridge_http_api_v1(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
ct:pal("get bridge (http v1) (~p)", [#{name => Name}]),
Res = request(get, Path, _Params = []),
ct:pal("get bridge (http v1) (~p) result:\n ~p", [#{name => Name}, Res]),
Res.
get_bridge_http_api_v2(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId]),
ct:pal("get bridge (http v2) (~p)", [#{name => Name}]),
Res = request(get, Path, _Params = []),
ct:pal("get bridge (http v2) (~p) result:\n ~p", [#{name => Name}, Res]),
Res.
get_connector_http(Name) ->
ConnectorId = emqx_connector_resource:connector_id(con_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["connectors", ConnectorId]),
ct:pal("get connector (~p)", [#{name => Name, id => ConnectorId}]),
Res = request(get, Path, _Params = []),
ct:pal("get connector (~p) result:\n ~p", [#{name => Name}, Res]),
Res.
create_bridge_http_api_v1(Opts) ->
Name = maps:get(name, Opts),
Overrides = maps:get(overrides, Opts, #{}),
BridgeConfig0 = emqx_utils_maps:deep_merge(bridge_config(), Overrides),
BridgeConfig = maps:without([<<"connector">>], BridgeConfig0),
Params = BridgeConfig#{<<"type">> => bridge_type_bin(), <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
ct:pal("creating bridge (http v1): ~p", [Params]),
Res = request(post, Path, Params),
ct:pal("bridge create (http v1) result:\n ~p", [Res]),
Res.
create_bridge_http_api_v2(Opts) ->
Name = maps:get(name, Opts),
Overrides = maps:get(overrides, Opts, #{}),
BridgeConfig = emqx_utils_maps:deep_merge(bridge_config(), Overrides),
Params = BridgeConfig#{<<"type">> => bridge_type_bin(), <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["actions"]),
ct:pal("creating bridge (http v2): ~p", [Params]),
Res = request(post, Path, Params),
ct:pal("bridge create (http v2) result:\n ~p", [Res]),
Res.
update_bridge_http_api_v1(Opts) ->
Name = maps:get(name, Opts),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Overrides = maps:get(overrides, Opts, #{}),
BridgeConfig0 = emqx_utils_maps:deep_merge(bridge_config(), Overrides),
BridgeConfig = maps:without([<<"connector">>], BridgeConfig0),
Params = BridgeConfig,
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
ct:pal("updating bridge (http v1): ~p", [Params]),
Res = request(put, Path, Params),
ct:pal("bridge update (http v1) result:\n ~p", [Res]),
Res.
delete_bridge_http_api_v1(Opts) ->
Name = maps:get(name, Opts),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
ct:pal("deleting bridge (http v1)"),
Res = request(delete, Path, _Params = []),
ct:pal("bridge delete (http v1) result:\n ~p", [Res]),
Res.
delete_bridge_http_api_v2(Opts) ->
Name = maps:get(name, Opts),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId]),
ct:pal("deleting bridge (http v2)"),
Res = request(delete, Path, _Params = []),
ct:pal("bridge delete (http v2) result:\n ~p", [Res]),
Res.
enable_bridge_http_api_v1(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId, "enable", "true"]),
ct:pal("enabling bridge (http v1)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge enable (http v1) result:\n ~p", [Res]),
Res.
enable_bridge_http_api_v2(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, "enable", "true"]),
ct:pal("enabling bridge (http v2)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge enable (http v2) result:\n ~p", [Res]),
Res.
disable_bridge_http_api_v1(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId, "enable", "false"]),
ct:pal("disabling bridge (http v1)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge disable (http v1) result:\n ~p", [Res]),
Res.
disable_bridge_http_api_v2(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, "enable", "false"]),
ct:pal("disabling bridge (http v2)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge disable (http v2) result:\n ~p", [Res]),
Res.
bridge_operation_http_api_v1(Name, Op0) ->
Op = atom_to_list(Op0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId, Op]),
ct:pal("bridge op ~p (http v1)", [Op]),
Res = request(post, Path, _Params = []),
ct:pal("bridge op ~p (http v1) result:\n ~p", [Op, Res]),
Res.
bridge_operation_http_api_v2(Name, Op0) ->
Op = atom_to_list(Op0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, Op]),
ct:pal("bridge op ~p (http v2)", [Op]),
Res = request(post, Path, _Params = []),
ct:pal("bridge op ~p (http v2) result:\n ~p", [Op, Res]),
Res.
bridge_node_operation_http_api_v1(Name, Node0, Op0) ->
Op = atom_to_list(Op0),
Node = atom_to_list(Node0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["nodes", Node, "bridges", BridgeId, Op]),
ct:pal("bridge node op ~p (http v1)", [{Node, Op}]),
Res = request(post, Path, _Params = []),
ct:pal("bridge node op ~p (http v1) result:\n ~p", [{Node, Op}, Res]),
Res.
bridge_node_operation_http_api_v2(Name, Node0, Op0) ->
Op = atom_to_list(Op0),
Node = atom_to_list(Node0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["nodes", Node, "actions", BridgeId, Op]),
ct:pal("bridge node op ~p (http v2)", [{Node, Op}]),
Res = request(post, Path, _Params = []),
ct:pal("bridge node op ~p (http v2) result:\n ~p", [{Node, Op}, Res]),
Res.
is_rule_enabled(RuleId) ->
{ok, #{enable := Enable}} = emqx_rule_engine:get_rule(RuleId),
Enable.
update_rule_http(RuleId, Params) ->
Path = emqx_mgmt_api_test_util:api_path(["rules", RuleId]),
ct:pal("update rule ~p:\n ~p", [RuleId, Params]),
Res = request(put, Path, Params),
ct:pal("update rule ~p result:\n ~p", [RuleId, Res]),
Res.
enable_rule_http(RuleId) ->
Params = #{<<"enable">> => true},
update_rule_http(RuleId, Params).
%%------------------------------------------------------------------------------
%% Test cases
%%------------------------------------------------------------------------------
t_name_too_long(_Config) ->
LongName = list_to_binary(lists:duplicate(256, $a)),
?assertMatch(
{error,
{{_, 400, _}, _, #{<<"message">> := #{<<"reason">> := <<"Name is too long", _/binary>>}}}},
create_bridge_http_api_v1(#{name => LongName})
),
ok.
t_scenario_1(_Config) ->
%% ===================================================================================
%% Pre-conditions
%% ===================================================================================
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v2()),
%% created in the test case init
?assertMatch({ok, {{_, 200, _}, _, [#{}]}}, list_connectors_http()),
{ok, {{_, 200, _}, _, [#{<<"name">> := PreexistentConnectorName}]}} = list_connectors_http(),
%% ===================================================================================
%% Create a single bridge v2. It should still be listed and functional when using v1
%% APIs.
%% ===================================================================================
NameA = <<"bridgev2a">>,
?assertMatch(
{ok, {{_, 201, _}, _, #{}}},
create_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v2()),
%% created a new one from the v1 API
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), stop)),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), start)
),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), restart)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, stop)),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, node(), start)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, restart)),
{ok, {{_, 200, _}, _, #{<<"connector">> := GeneratedConnName}}} = get_bridge_http_api_v2(NameA),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
%% ===================================================================================
%% Update the bridge using v1 API.
%% ===================================================================================
?assertMatch(
{ok, {{_, 200, _}, _, _}},
update_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v2()),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
%% ===================================================================================
%% Now create a new bridge_v2 pointing to the same connector. It should no longer be
%% functions via v1 API, nor be listed in it. The new bridge must create a new
%% channel, so that this bridge is no longer considered v1.
%% ===================================================================================
NameB = <<"bridgev2b">>,
?assertMatch(
{ok, {{_, 201, _}, _, #{}}},
create_bridge_http_api_v2(#{
name => NameB, overrides => #{<<"connector">> => GeneratedConnName}
})
),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"name">> := _}, #{<<"name">> := _}]}}, list_bridges_http_api_v2()
),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameB)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameB}}}, get_bridge_http_api_v2(NameB)),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
?assertMatch({error, {{_, 400, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 400, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 400, _}, _, _}}, disable_bridge_http_api_v1(NameB)),
?assertMatch({error, {{_, 400, _}, _, _}}, enable_bridge_http_api_v1(NameB)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameB, stop)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameB, start)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameB, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameB, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameB, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameB, restart)),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), stop)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), start)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), restart)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameB, node(), stop)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameB, node(), start)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameB, node(), restart)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, stop)),
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameB, stop)),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, node(), start)
),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameB, node(), start)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, restart)),
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameB, restart)),
%% ===================================================================================
%% Try to delete the original bridge using V1. It should fail and its connector
%% should be preserved.
%% ===================================================================================
?assertMatch(
{error, {{_, 400, _}, _, _}},
delete_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"name">> := _}, #{<<"name">> := _}]}}, list_bridges_http_api_v2()
),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameB)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameB}}}, get_bridge_http_api_v2(NameB)),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
%% ===================================================================================
%% Delete the 2nd new bridge so it appears again in the V1 API.
%% ===================================================================================
?assertMatch(
{ok, {{_, 204, _}, _, _}},
delete_bridge_http_api_v2(#{name => NameB})
),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v2()),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
%% ===================================================================================
%% Delete the last bridge using API v1. The generated connector should also be
%% removed.
%% ===================================================================================
?assertMatch(
{ok, {{_, 204, _}, _, _}},
delete_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v2()),
%% only the pre-existing one should remain.
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"name">> := PreexistentConnectorName}]}},
list_connectors_http()
),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := PreexistentConnectorName}}},
get_connector_http(PreexistentConnectorName)
),
?assertMatch({error, {{_, 404, _}, _, _}}, get_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, get_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, get_connector_http(GeneratedConnName)),
?assertMatch({error, {{_, 404, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
ok.
t_scenario_2(Config) ->
%% ===================================================================================
%% Pre-conditions
%% ===================================================================================
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v2()),
%% created in the test case init
?assertMatch({ok, {{_, 200, _}, _, [#{}]}}, list_connectors_http()),
{ok, {{_, 200, _}, _, [#{<<"name">> := _PreexistentConnectorName}]}} = list_connectors_http(),
%% ===================================================================================
%% Try to create a rule referencing a non-existent bridge. It succeeds, but it's
%% implicitly disabled. Trying to update it later without creating the bridge should
%% allow it to be enabled.
%% ===================================================================================
BridgeName = <<"scenario2">>,
RuleTopic = <<"t/scenario2">>,
{ok, #{<<"id">> := RuleId0}} =
emqx_bridge_v2_testlib:create_rule_and_action_http(
bridge_type(),
RuleTopic,
[
{bridge_name, BridgeName}
| Config
],
#{overrides => #{enable => true}}
),
?assert(is_rule_enabled(RuleId0)),
?assertMatch({ok, {{_, 200, _}, _, _}}, enable_rule_http(RuleId0)),
?assert(is_rule_enabled(RuleId0)),
%% ===================================================================================
%% Now we create the bridge, and attempt to create a new enabled rule. It should
%% start enabled. Also, updating the previous rule to enable it should work now.
%% ===================================================================================
?assertMatch(
{ok, {{_, 201, _}, _, #{}}},
create_bridge_http_api_v1(#{name => BridgeName})
),
{ok, #{<<"id">> := RuleId1}} =
emqx_bridge_v2_testlib:create_rule_and_action_http(
bridge_type(),
RuleTopic,
[
{bridge_name, BridgeName}
| Config
],
#{overrides => #{enable => true}}
),
?assert(is_rule_enabled(RuleId0)),
?assert(is_rule_enabled(RuleId1)),
?assertMatch({ok, {{_, 200, _}, _, _}}, enable_rule_http(RuleId0)),
?assert(is_rule_enabled(RuleId0)),
%% ===================================================================================
%% Creating a rule with mixed existent/non-existent bridges should allow enabling it.
%% ===================================================================================
NonExistentBridgeName = <<"scenario2_not_created">>,
{ok, #{<<"id">> := RuleId2}} =
emqx_bridge_v2_testlib:create_rule_and_action_http(
bridge_type(),
RuleTopic,
[
{bridge_name, BridgeName}
| Config
],
#{
overrides => #{
enable => true,
actions => [
emqx_bridge_resource:bridge_id(
bridge_type(),
BridgeName
),
emqx_bridge_resource:bridge_id(
bridge_type(),
NonExistentBridgeName
)
]
}
}
),
?assert(is_rule_enabled(RuleId2)),
?assertMatch({ok, {{_, 200, _}, _, _}}, enable_rule_http(RuleId2)),
?assert(is_rule_enabled(RuleId2)),
ok.

View File

@ -0,0 +1,862 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
con_mod() ->
emqx_bridge_v2_test_connector.
con_type() ->
bridge_type().
con_name() ->
my_connector.
connector_resource_id() ->
emqx_connector_resource:resource_id(con_type(), con_name()).
bridge_type() ->
test_bridge_type.
con_schema() ->
[
{
con_type(),
hoconsc:mk(
hoconsc:map(name, typerefl:map()),
#{
desc => <<"Test Connector Config">>,
required => false
}
)
}
].
con_config() ->
#{
<<"enable">> => true,
<<"resource_opts">> => #{
%% Set this to a low value to make the test run faster
<<"health_check_interval">> => 100
}
}.
bridge_schema() ->
bridge_schema(_Opts = #{}).
bridge_schema(Opts) ->
Type = maps:get(bridge_type, Opts, bridge_type()),
[
{
Type,
hoconsc:mk(
hoconsc:map(name, typerefl:map()),
#{
desc => <<"Test Bridge Config">>,
required => false
}
)
}
].
bridge_config() ->
#{
<<"connector">> => atom_to_binary(con_name()),
<<"enable">> => true,
<<"send_to">> => registered_process_name(),
<<"resource_opts">> => #{
<<"resume_interval">> => 100
}
}.
fun_table_name() ->
emqx_bridge_v2_SUITE_fun_table.
registered_process_name() ->
my_registered_process.
all() ->
emqx_common_test_helpers:all(?MODULE).
start_apps() ->
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_rule_engine
].
setup_mocks() ->
MeckOpts = [passthrough, no_link, no_history, non_strict],
catch meck:new(emqx_connector_schema, MeckOpts),
meck:expect(emqx_connector_schema, fields, 1, con_schema()),
catch meck:new(emqx_connector_resource, MeckOpts),
meck:expect(emqx_connector_resource, connector_to_resource_type, 1, con_mod()),
catch meck:new(emqx_bridge_v2_schema, MeckOpts),
meck:expect(emqx_bridge_v2_schema, fields, 1, bridge_schema()),
catch meck:new(emqx_bridge_v2, MeckOpts),
BridgeType = bridge_type(),
BridgeTypeBin = atom_to_binary(BridgeType),
meck:expect(
emqx_bridge_v2,
bridge_v2_type_to_connector_type,
fun(Type) when Type =:= BridgeType; Type =:= BridgeTypeBin -> con_type() end
),
meck:expect(emqx_bridge_v2, bridge_v1_type_to_bridge_v2_type, 1, bridge_type()),
meck:expect(emqx_bridge_v2, is_bridge_v2_type, fun(Type) -> Type =:= BridgeType end),
ok.
init_per_suite(Config) ->
Apps = emqx_cth_suite:start(
app_specs(),
#{work_dir => emqx_cth_suite:work_dir(Config)}
),
[{apps, Apps} | Config].
end_per_suite(Config) ->
Apps = ?config(apps, Config),
emqx_cth_suite:stop(Apps),
ok.
app_specs() ->
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_rule_engine
].
init_per_testcase(_TestCase, Config) ->
%% Setting up mocks for fake connector and bridge V2
setup_mocks(),
ets:new(fun_table_name(), [named_table, public]),
%% Create a fake connector
{ok, _} = emqx_connector:create(con_type(), con_name(), con_config()),
[
{mocked_mods, [
emqx_connector_schema,
emqx_connector_resource,
emqx_bridge_v2
]}
| Config
].
end_per_testcase(_TestCase, _Config) ->
ets:delete(fun_table_name()),
delete_all_bridges_and_connectors(),
meck:unload(),
emqx_common_test_helpers:call_janitor(),
ok.
delete_all_bridges_and_connectors() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing bridge ~p", [{Type, Name}]),
emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
),
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing connector ~p", [{Type, Name}]),
emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
),
update_root_config(#{}),
ok.
%% Hocon does not support placing a fun in a config map so we replace it with a string
wrap_fun(Fun) ->
UniqRef = make_ref(),
UniqRefBin = term_to_binary(UniqRef),
UniqRefStr = iolist_to_binary(base64:encode(UniqRefBin)),
ets:insert(fun_table_name(), {UniqRefStr, Fun}),
UniqRefStr.
unwrap_fun(UniqRefStr) ->
ets:lookup_element(fun_table_name(), UniqRefStr, 2).
update_root_config(RootConf) ->
emqx_conf:update([actions], RootConf, #{override_to => cluster}).
update_root_connectors_config(RootConf) ->
emqx_conf:update([connectors], RootConf, #{override_to => cluster}).
t_create_remove(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_list(_) ->
[] = emqx_bridge_v2:list(),
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
1 = length(emqx_bridge_v2:list()),
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge2, bridge_config()),
2 = length(emqx_bridge_v2:list()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
1 = length(emqx_bridge_v2:list()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge2),
0 = length(emqx_bridge_v2:list()),
ok.
t_create_dry_run(_) ->
ok = emqx_bridge_v2:create_dry_run(bridge_type(), bridge_config()).
t_create_dry_run_fail_add_channel(_) ->
Msg = <<"Failed to add channel">>,
OnAddChannel1 = wrap_fun(fun() ->
{error, Msg}
end),
Conf1 = (bridge_config())#{on_add_channel_fun => OnAddChannel1},
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
OnAddChannel2 = wrap_fun(fun() ->
throw(Msg)
end),
Conf2 = (bridge_config())#{on_add_channel_fun => OnAddChannel2},
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf2),
ok.
t_create_dry_run_fail_get_channel_status(_) ->
Msg = <<"Failed to add channel">>,
Fun1 = wrap_fun(fun() ->
{error, Msg}
end),
Conf1 = (bridge_config())#{on_get_channel_status_fun => Fun1},
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
Fun2 = wrap_fun(fun() ->
throw(Msg)
end),
Conf2 = (bridge_config())#{on_get_channel_status_fun => Fun2},
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf2),
ok.
t_create_dry_run_connector_does_not_exist(_) ->
BridgeConf = (bridge_config())#{<<"connector">> => <<"connector_does_not_exist">>},
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), BridgeConf).
t_is_valid_bridge_v1(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
true = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge),
%% Add another channel/bridge to the connector
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge_2, bridge_config()),
false = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
true = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge_2),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge_2),
%% Non existing bridge is a valid Bridge V1
true = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge),
ok.
t_manual_health_check(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
%% Run a health check for the bridge
#{error := undefined, status := connected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_manual_health_check_exception(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> => wrap_fun(fun() -> throw(my_error) end)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
#{error := my_error, status := disconnected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_manual_health_check_exception_error(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> => wrap_fun(fun() -> error(my_error) end)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
#{error := _, status := disconnected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_manual_health_check_error(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> => wrap_fun(fun() -> {error, my_error} end)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
#{error := my_error, status := disconnected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_send_message(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
%% Register name for this process
register(registered_process_name(), self()),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{}),
receive
<<"my_msg">> ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge).
t_send_message_through_rule(_) ->
BridgeName = my_test_bridge,
{ok, _} = emqx_bridge_v2:create(bridge_type(), BridgeName, bridge_config()),
%% Create a rule to send message to the bridge
{ok, _} = emqx_rule_engine:create_rule(
#{
sql => <<"select * from \"t/a\"">>,
id => atom_to_binary(?FUNCTION_NAME),
actions => [
<<
(atom_to_binary(bridge_type()))/binary,
":",
(atom_to_binary(BridgeName))/binary
>>
],
description => <<"bridge_v2 test rule">>
}
),
%% Register name for this process
register(registered_process_name(), self()),
%% Send message to the topic
ClientId = atom_to_binary(?FUNCTION_NAME),
Payload = <<"hello">>,
Msg = emqx_message:make(ClientId, 0, <<"t/a">>, Payload),
emqx:publish(Msg),
receive
#{payload := Payload} ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_rule_engine:delete_rule(atom_to_binary(?FUNCTION_NAME)),
ok = emqx_bridge_v2:remove(bridge_type(), BridgeName),
ok.
t_send_message_through_local_topic(_) ->
%% Bridge configuration with local topic
BridgeName = my_test_bridge,
TopicName = <<"t/b">>,
BridgeConfig = (bridge_config())#{
<<"local_topic">> => TopicName
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), BridgeName, BridgeConfig),
%% Register name for this process
register(registered_process_name(), self()),
%% Send message to the topic
ClientId = atom_to_binary(?FUNCTION_NAME),
Payload = <<"hej">>,
Msg = emqx_message:make(ClientId, 0, TopicName, Payload),
emqx:publish(Msg),
receive
#{payload := Payload} ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), BridgeName),
ok.
t_send_message_unhealthy_channel(_) ->
OnGetStatusResponseETS = ets:new(on_get_status_response_ets, [public]),
ets:insert(OnGetStatusResponseETS, {status_value, {error, my_error}}),
OnGetStatusFun = wrap_fun(fun() ->
ets:lookup_element(OnGetStatusResponseETS, status_value, 2)
end),
Conf = (bridge_config())#{<<"on_get_channel_status_fun">> => OnGetStatusFun},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Register name for this process
register(registered_process_name(), self()),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{timeout => 1}),
receive
Any ->
ct:pal("Received message: ~p", [Any]),
ct:fail("Should not get message here")
after 1 ->
ok
end,
%% Sending should work again after the channel is healthy
ets:insert(OnGetStatusResponseETS, {status_value, connected}),
_ = emqx_bridge_v2:send_message(
bridge_type(),
my_test_bridge,
<<"my_msg">>,
#{}
),
receive
<<"my_msg">> ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge).
t_send_message_unhealthy_connector(_) ->
ResponseETS = ets:new(response_ets, [public]),
ets:insert(ResponseETS, {on_start_value, conf}),
ets:insert(ResponseETS, {on_get_status_value, connecting}),
OnStartFun = wrap_fun(fun(Conf) ->
case ets:lookup_element(ResponseETS, on_start_value, 2) of
conf ->
{ok, Conf};
V ->
V
end
end),
OnGetStatusFun = wrap_fun(fun() ->
ets:lookup_element(ResponseETS, on_get_status_value, 2)
end),
ConConfig = emqx_utils_maps:deep_merge(con_config(), #{
<<"on_start_fun">> => OnStartFun,
<<"on_get_status_fun">> => OnGetStatusFun,
<<"resource_opts">> => #{<<"start_timeout">> => 100}
}),
ConName = ?FUNCTION_NAME,
{ok, _} = emqx_connector:create(con_type(), ConName, ConConfig),
BridgeConf = (bridge_config())#{
<<"connector">> => atom_to_binary(ConName)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, BridgeConf),
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Test that sending does not work when the connector is unhealthy (connecting)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
register(registered_process_name(), self()),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{timeout => 100}),
receive
Any ->
ct:pal("Received message: ~p", [Any]),
ct:fail("Should not get message here")
after 10 ->
ok
end,
%% We should have one alarm
1 = get_bridge_v2_alarm_cnt(),
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Test that sending works again when the connector is healthy (connected)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ets:insert(ResponseETS, {on_get_status_value, connected}),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{timeout => 1000}),
receive
<<"my_msg">> ->
ok
after 1000 ->
ct:fail("Failed to receive message")
end,
%% The alarm should be gone at this point
0 = get_bridge_v2_alarm_cnt(),
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok = emqx_connector:remove(con_type(), ConName),
ets:delete(ResponseETS),
ok.
t_connector_connected_to_connecting_to_connected_no_channel_restart(_) ->
ResponseETS = ets:new(response_ets, [public]),
ets:insert(ResponseETS, {on_start_value, conf}),
ets:insert(ResponseETS, {on_get_status_value, connected}),
OnStartFun = wrap_fun(fun(Conf) ->
case ets:lookup_element(ResponseETS, on_start_value, 2) of
conf ->
{ok, Conf};
V ->
V
end
end),
OnGetStatusFun = wrap_fun(fun() ->
ets:lookup_element(ResponseETS, on_get_status_value, 2)
end),
OnAddChannelCntr = counters:new(1, []),
OnAddChannelFun = wrap_fun(fun(_InstId, ConnectorState, _ChannelId, _ChannelConfig) ->
counters:add(OnAddChannelCntr, 1, 1),
{ok, ConnectorState}
end),
ConConfig = emqx_utils_maps:deep_merge(con_config(), #{
<<"on_start_fun">> => OnStartFun,
<<"on_get_status_fun">> => OnGetStatusFun,
<<"on_add_channel_fun">> => OnAddChannelFun,
<<"resource_opts">> => #{<<"start_timeout">> => 100}
}),
ConName = ?FUNCTION_NAME,
{ok, _} = emqx_connector:create(con_type(), ConName, ConConfig),
BridgeConf = (bridge_config())#{
<<"connector">> => atom_to_binary(ConName)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, BridgeConf),
%% Wait until on_add_channel_fun is called at least once
wait_until(fun() ->
counters:get(OnAddChannelCntr, 1) =:= 1
end),
1 = counters:get(OnAddChannelCntr, 1),
%% We change the status of the connector
ets:insert(ResponseETS, {on_get_status_value, connecting}),
%% Wait until the status is changed
wait_until(fun() ->
{ok, BridgeData} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData) =:= connecting
end),
{ok, BridgeData1} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
ct:pal("Bridge V2 status changed to: ~p", [maps:get(status, BridgeData1)]),
%% We change the status again back to connected
ets:insert(ResponseETS, {on_get_status_value, connected}),
%% Wait until the status is connected again
wait_until(fun() ->
{ok, BridgeData2} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData2) =:= connected
end),
%% On add channel should not have been called again
1 = counters:get(OnAddChannelCntr, 1),
%% We change the status to an error
ets:insert(ResponseETS, {on_get_status_value, {error, my_error}}),
%% Wait until the status is changed
wait_until(fun() ->
{ok, BridgeData2} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData2) =:= disconnected
end),
%% Now we go back to connected
ets:insert(ResponseETS, {on_get_status_value, connected}),
wait_until(fun() ->
{ok, BridgeData2} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData2) =:= connected
end),
%% Now the channel should have been removed and added again
wait_until(fun() ->
counters:get(OnAddChannelCntr, 1) =:= 2
end),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok = emqx_connector:remove(con_type(), ConName),
ets:delete(ResponseETS),
ok.
t_unhealthy_channel_alarm(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> =>
wrap_fun(fun() -> {error, my_error} end)
},
0 = get_bridge_v2_alarm_cnt(),
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
1 = get_bridge_v2_alarm_cnt(),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
0 = get_bridge_v2_alarm_cnt(),
ok.
get_bridge_v2_alarm_cnt() ->
Alarms = emqx_alarm:get_alarms(activated),
FilterFun = fun
(#{name := S}) when is_binary(S) -> string:find(S, "action") =/= nomatch;
(_) -> false
end,
length(lists:filter(FilterFun, Alarms)).
t_load_no_matching_connector(_Config) ->
Conf = bridge_config(),
BridgeTypeBin = atom_to_binary(bridge_type()),
BridgeNameBin0 = <<"my_test_bridge_update">>,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeNameBin0, Conf)),
%% updating to invalid reference
RootConf0 = #{
BridgeTypeBin =>
#{BridgeNameBin0 => Conf#{<<"connector">> := <<"unknown">>}}
},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := my_test_bridge_update,
connector_name := <<"unknown">>,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
update_root_config(RootConf0)
),
%% creating new with invalid reference
BridgeNameBin1 = <<"my_test_bridge_new">>,
RootConf1 = #{
BridgeTypeBin =>
#{BridgeNameBin1 => Conf#{<<"connector">> := <<"unknown">>}}
},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := my_test_bridge_new,
connector_name := <<"unknown">>,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
update_root_config(RootConf1)
),
ok.
%% tests root config handler post config update hook
t_load_config_success(_Config) ->
Conf = bridge_config(),
BridgeType = bridge_type(),
BridgeTypeBin = atom_to_binary(BridgeType),
BridgeName = my_test_bridge_root,
BridgeNameBin = atom_to_binary(BridgeName),
%% pre-condition
?assertEqual(#{}, emqx_config:get([actions])),
%% create
RootConf0 = #{BridgeTypeBin => #{BridgeNameBin => Conf}},
?assertMatch(
{ok, _},
update_root_config(RootConf0)
),
?assertMatch(
{ok, #{
type := BridgeType,
name := BridgeName,
raw_config := #{},
resource_data := #{}
}},
emqx_bridge_v2:lookup(BridgeType, BridgeName)
),
%% update
RootConf1 = #{BridgeTypeBin => #{BridgeNameBin => Conf#{<<"some_key">> => <<"new_value">>}}},
?assertMatch(
{ok, _},
update_root_config(RootConf1)
),
?assertMatch(
{ok, #{
type := BridgeType,
name := BridgeName,
raw_config := #{<<"some_key">> := <<"new_value">>},
resource_data := #{}
}},
emqx_bridge_v2:lookup(BridgeType, BridgeName)
),
%% delete
RootConf2 = #{},
?assertMatch(
{ok, _},
update_root_config(RootConf2)
),
?assertMatch(
{error, not_found},
emqx_bridge_v2:lookup(BridgeType, BridgeName)
),
ok.
t_create_no_matching_connector(_Config) ->
Conf = (bridge_config())#{<<"connector">> => <<"wrong_connector_name">>},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := _,
connector_name := _,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf)
),
ok.
t_create_wrong_connector_type(_Config) ->
meck:expect(
emqx_bridge_v2_schema,
fields,
1,
bridge_schema(#{bridge_type => wrong_type})
),
Conf = bridge_config(),
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := _,
connector_name := _,
bridge_type := wrong_type,
reason := "connector_not_found_or_wrong_type"
}}},
emqx_bridge_v2:create(wrong_type, my_test_bridge, Conf)
),
ok.
t_update_connector_not_found(_Config) ->
Conf = bridge_config(),
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf)),
BadConf = Conf#{<<"connector">> => <<"wrong_connector_name">>},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := _,
connector_name := _,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
emqx_bridge_v2:create(bridge_type(), my_test_bridge, BadConf)
),
ok.
t_remove_single_connector_being_referenced_with_active_channels(_Config) ->
%% we test the connector post config update here because we also need bridges.
Conf = bridge_config(),
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf)),
?assertMatch(
{error, {post_config_update, _HandlerMod, {active_channels, [_ | _]}}},
emqx_connector:remove(con_type(), con_name())
),
ok.
t_remove_single_connector_being_referenced_without_active_channels(_Config) ->
%% we test the connector post config update here because we also need bridges.
Conf = bridge_config(),
BridgeName = my_test_bridge,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
emqx_common_test_helpers:with_mock(
emqx_bridge_v2_test_connector,
on_get_channels,
fun(_ResId) -> [] end,
fun() ->
?assertMatch(ok, emqx_connector:remove(con_type(), con_name())),
%% we no longer have connector data if this happens...
?assertMatch(
{ok, #{resource_data := #{}}},
emqx_bridge_v2:lookup(bridge_type(), BridgeName)
),
ok
end
),
ok.
t_remove_multiple_connectors_being_referenced_with_channels(_Config) ->
Conf = bridge_config(),
BridgeName = my_test_bridge,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
reason := "connector_has_active_channels",
type := _,
connector_name := _,
active_channels := [_ | _]
}}},
update_root_connectors_config(#{})
),
ok.
t_remove_multiple_connectors_being_referenced_without_channels(_Config) ->
Conf = bridge_config(),
BridgeName = my_test_bridge,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
emqx_common_test_helpers:with_mock(
emqx_bridge_v2_test_connector,
on_get_channels,
fun(_ResId) -> [] end,
fun() ->
?assertMatch(
{ok, _},
update_root_connectors_config(#{})
),
%% we no longer have connector data if this happens...
?assertMatch(
{ok, #{resource_data := #{}}},
emqx_bridge_v2:lookup(bridge_type(), BridgeName)
),
ok
end
),
ok.
t_start_operation_when_on_add_channel_gives_error(_Config) ->
Conf = bridge_config(),
BridgeName = my_test_bridge,
emqx_common_test_helpers:with_mock(
emqx_bridge_v2_test_connector,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {error, <<"some_error">>} end,
fun() ->
%% We can crete the bridge event though on_add_channel returns error
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
?assertMatch(
#{
status := disconnected,
error := <<"some_error">>
},
emqx_bridge_v2:health_check(bridge_type(), BridgeName)
),
?assertMatch(
{ok, #{
status := disconnected,
error := <<"some_error">>
}},
emqx_bridge_v2:lookup(bridge_type(), BridgeName)
),
%% emqx_bridge_v2:start/2 should return ok if bridge if connected after
%% start and otherwise and error
?assertMatch({error, _}, emqx_bridge_v2:start(bridge_type(), BridgeName)),
%% Let us change on_add_channel to be successful and try again
ok = meck:expect(
emqx_bridge_v2_test_connector,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {ok, #{}} end
),
?assertMatch(ok, emqx_bridge_v2:start(bridge_type(), BridgeName))
end
),
ok.
%% Helper Functions
wait_until(Fun) ->
wait_until(Fun, 5000).
wait_until(Fun, Timeout) when Timeout >= 0 ->
case Fun() of
true ->
ok;
false ->
IdleTime = 100,
timer:sleep(IdleTime),
wait_until(Fun, Timeout - IdleTime)
end;
wait_until(_, _) ->
ct:fail("Wait until event did not happen").

View File

@ -0,0 +1,966 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_api_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-import(emqx_mgmt_api_test_util, [uri/1]).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/test_macros.hrl").
-define(ROOT, "actions").
-define(CONNECTOR_NAME, <<"my_connector">>).
-define(RESOURCE(NAME, TYPE), #{
<<"enable">> => true,
%<<"ssl">> => #{<<"enable">> => false},
<<"type">> => TYPE,
<<"name">> => NAME
}).
-define(CONNECTOR_TYPE_STR, "kafka_producer").
-define(CONNECTOR_TYPE, <<?CONNECTOR_TYPE_STR>>).
-define(KAFKA_BOOTSTRAP_HOST, <<"127.0.0.1:9092">>).
-define(KAFKA_CONNECTOR(Name, BootstrapHosts), ?RESOURCE(Name, ?CONNECTOR_TYPE)#{
<<"authentication">> => <<"none">>,
<<"bootstrap_hosts">> => BootstrapHosts,
<<"connect_timeout">> => <<"5s">>,
<<"metadata_request_timeout">> => <<"5s">>,
<<"min_metadata_refresh_interval">> => <<"3s">>,
<<"socket_opts">> =>
#{
<<"nodelay">> => true,
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
}
}).
-define(CONNECTOR(Name), ?KAFKA_CONNECTOR(Name, ?KAFKA_BOOTSTRAP_HOST)).
-define(CONNECTOR, ?CONNECTOR(?CONNECTOR_NAME)).
-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))).
-define(BRIDGE_TYPE_STR, "kafka_producer").
-define(BRIDGE_TYPE, <<?BRIDGE_TYPE_STR>>).
-define(KAFKA_BRIDGE(Name, Connector), ?RESOURCE(Name, ?BRIDGE_TYPE)#{
<<"connector">> => Connector,
<<"kafka">> => #{
<<"buffer">> => #{
<<"memory_overload_protection">> => true,
<<"mode">> => <<"hybrid">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_ext_headers">> => [
#{
<<"kafka_ext_header_key">> => <<"clientid">>,
<<"kafka_ext_header_value">> => <<"${clientid}">>
},
#{
<<"kafka_ext_header_key">> => <<"topic">>,
<<"kafka_ext_header_value">> => <<"${topic}">>
}
],
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"kafka_headers">> => <<"${pub_props}">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => 10,
<<"message">> => #{
<<"key">> => <<"${.clientid}">>,
<<"timestamp">> => <<"${.timestamp}">>,
<<"value">> => <<"${.}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"required_acks">> => <<"all_isr">>,
<<"topic">> => <<"kafka-topic">>
},
<<"local_topic">> => <<"mqtt/local/topic">>,
<<"resource_opts">> => #{
<<"health_check_interval">> => <<"32s">>
}
}).
-define(KAFKA_BRIDGE(Name), ?KAFKA_BRIDGE(Name, ?CONNECTOR_NAME)).
-define(KAFKA_BRIDGE_UPDATE(Name, Connector),
maps:without([<<"name">>, <<"type">>], ?KAFKA_BRIDGE(Name, Connector))
).
-define(KAFKA_BRIDGE_UPDATE(Name), ?KAFKA_BRIDGE_UPDATE(Name, ?CONNECTOR_NAME)).
%% -define(BRIDGE_TYPE_MQTT, <<"mqtt">>).
%% -define(MQTT_BRIDGE(SERVER, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_MQTT)#{
%% <<"server">> => SERVER,
%% <<"username">> => <<"user1">>,
%% <<"password">> => <<"">>,
%% <<"proto_ver">> => <<"v5">>,
%% <<"egress">> => #{
%% <<"remote">> => #{
%% <<"topic">> => <<"emqx/${topic}">>,
%% <<"qos">> => <<"${qos}">>,
%% <<"retain">> => false
%% }
%% }
%% }).
%% -define(MQTT_BRIDGE(SERVER), ?MQTT_BRIDGE(SERVER, <<"mqtt_egress_test_bridge">>)).
%% -define(BRIDGE_TYPE_HTTP, <<"kafka">>).
%% -define(HTTP_BRIDGE(URL, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_HTTP)#{
%% <<"url">> => URL,
%% <<"local_topic">> => <<"emqx_webhook/#">>,
%% <<"method">> => <<"post">>,
%% <<"body">> => <<"${payload}">>,
%% <<"headers">> => #{
%% % NOTE
%% % The Pascal-Case is important here.
%% % The reason is kinda ridiculous: `emqx_bridge_resource:create_dry_run/2` converts
%% % bridge config keys into atoms, and the atom 'Content-Type' exists in the ERTS
%% % when this happens (while the 'content-type' does not).
%% <<"Content-Type">> => <<"application/json">>
%% }
%% }).
%% -define(HTTP_BRIDGE(URL), ?HTTP_BRIDGE(URL, ?BRIDGE_NAME)).
%% -define(URL(PORT, PATH),
%% list_to_binary(
%% io_lib:format(
%% "http://localhost:~s/~s",
%% [integer_to_list(PORT), PATH]
%% )
%% )
%% ).
-define(APPSPECS, [
emqx_conf,
emqx,
emqx_auth,
emqx_management,
emqx_connector,
{emqx_bridge, "actions {}"},
{emqx_rule_engine, "rule_engine { rules {} }"}
]).
-define(APPSPEC_DASHBOARD,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
).
-if(?EMQX_RELEASE_EDITION == ee).
%% For now we got only kafka implementing `bridge_v2` and that is enterprise only.
all() ->
[
{group, single},
%{group, cluster_later_join},
{group, cluster}
].
-else.
all() ->
[].
-endif.
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
SingleOnlyTests = [
t_bridges_probe
],
ClusterLaterJoinOnlyTCs = [
% t_cluster_later_join_metrics
],
[
{single, [], AllTCs -- ClusterLaterJoinOnlyTCs},
{cluster_later_join, [], ClusterLaterJoinOnlyTCs},
{cluster, [], (AllTCs -- SingleOnlyTests) -- ClusterLaterJoinOnlyTCs}
].
suite() ->
[{timetrap, {seconds, 60}}].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
init_per_group(cluster = Name, Config) ->
Nodes = [NodePrimary | _] = mk_cluster(Name, Config),
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
%% init_per_group(cluster_later_join = Name, Config) ->
%% Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}),
%% init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
init_per_group(Name, Config) ->
WorkDir = filename:join(?config(priv_dir, Config), Name),
Apps = emqx_cth_suite:start(?APPSPECS ++ [?APPSPEC_DASHBOARD], #{work_dir => WorkDir}),
init_api([{group, single}, {group_apps, Apps}, {node, node()} | Config]).
init_api(Config) ->
Node = ?config(node, Config),
{ok, ApiKey} = erpc:call(Node, emqx_common_test_http, create_default_app, []),
[{api_key, ApiKey} | Config].
mk_cluster(Name, Config) ->
mk_cluster(Name, Config, #{}).
mk_cluster(Name, Config, Opts) ->
Node1Apps = ?APPSPECS ++ [?APPSPEC_DASHBOARD],
Node2Apps = ?APPSPECS,
emqx_cth_cluster:start(
[
{emqx_bridge_v2_api_SUITE_1, Opts#{role => core, apps => Node1Apps}},
{emqx_bridge_v2_api_SUITE_2, Opts#{role => core, apps => Node2Apps}}
],
#{work_dir => filename:join(?config(priv_dir, Config), Name)}
).
end_per_group(Group, Config) when
Group =:= cluster;
Group =:= cluster_later_join
->
ok = emqx_cth_cluster:stop(?config(cluster_nodes, Config));
end_per_group(_, Config) ->
emqx_cth_suite:stop(?config(group_apps, Config)),
ok.
init_per_testcase(_TestCase, Config) ->
case ?config(cluster_nodes, Config) of
undefined ->
init_mocks();
Nodes ->
[erpc:call(Node, ?MODULE, init_mocks, []) || Node <- Nodes]
end,
{ok, 201, _} = request(post, uri(["connectors"]), ?CONNECTOR, Config),
Config.
end_per_testcase(_TestCase, Config) ->
Node = ?config(node, Config),
ok = erpc:call(Node, fun clear_resources/0),
case ?config(cluster_nodes, Config) of
undefined ->
meck:unload();
ClusterNodes ->
[erpc:call(ClusterNode, meck, unload, []) || ClusterNode <- ClusterNodes]
end,
ok = emqx_common_test_helpers:call_janitor(),
ok.
-define(CONNECTOR_IMPL, emqx_bridge_v2_dummy_connector).
init_mocks() ->
meck:new(emqx_connector_ee_schema, [passthrough, no_link]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL),
meck:new(?CONNECTOR_IMPL, [non_strict, no_link]),
meck:expect(?CONNECTOR_IMPL, callback_mode, 0, async_if_possible),
meck:expect(
?CONNECTOR_IMPL,
on_start,
fun
(<<"connector:", ?CONNECTOR_TYPE_STR, ":bad_", _/binary>>, _C) ->
{ok, bad_connector_state};
(_I, _C) ->
{ok, connector_state}
end
),
meck:expect(?CONNECTOR_IMPL, on_stop, 2, ok),
meck:expect(
?CONNECTOR_IMPL,
on_get_status,
fun
(_, bad_connector_state) -> connecting;
(_, _) -> connected
end
),
meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
ok = meck:expect(?CONNECTOR_IMPL, on_get_channels, fun(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId)
end),
[?CONNECTOR_IMPL, emqx_connector_ee_schema].
clear_resources() ->
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
),
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
%% We have to pretend testing a kafka bridge since at this point that's the
%% only one that's implemented.
t_bridges_lifecycle(Config) ->
%% assert we there's no bridges at first
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
{ok, 404, _} = request(get, uri([?ROOT, "foo"]), Config),
{ok, 404, _} = request(get, uri([?ROOT, "kafka_producer:foo"]), Config),
%% need a var for patterns below
BridgeName = ?BRIDGE_NAME,
?assertMatch(
{ok, 201, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _],
<<"connector">> := ?CONNECTOR_NAME,
<<"kafka">> := #{},
<<"local_topic">> := _,
<<"resource_opts">> := _
}},
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
)
),
%% list all bridges, assert bridge is in it
?assertMatch(
{ok, 200, [
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}
]},
request_json(get, uri([?ROOT]), Config)
),
%% list all bridges, assert bridge is in it
?assertMatch(
{ok, 200, [
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}
]},
request_json(get, uri([?ROOT]), Config)
),
%% get the bridge by id
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
?assertMatch(
{ok, 200, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}},
request_json(get, uri([?ROOT, BridgeID]), Config)
),
?assertMatch(
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := _
}},
request_json(post, uri([?ROOT, BridgeID, "brababbel"]), Config)
),
%% update bridge config
{ok, 201, _} = request(post, uri(["connectors"]), ?CONNECTOR(<<"foobla">>), Config),
?assertMatch(
{ok, 200, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"connector">> := <<"foobla">>,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}},
request_json(
put,
uri([?ROOT, BridgeID]),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME, <<"foobla">>),
Config
)
),
%% update bridge with unknown connector name
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message1
}} =
request_json(
put,
uri([?ROOT, BridgeID]),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME, <<"does_not_exist">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message1)
),
%% update bridge with connector of wrong type
{ok, 201, _} =
request(
post,
uri(["connectors"]),
(?CONNECTOR(<<"foobla2">>))#{
<<"type">> => <<"azure_event_hub_producer">>,
<<"authentication">> => #{
<<"username">> => <<"emqxuser">>,
<<"password">> => <<"topSecret">>,
<<"mechanism">> => <<"plain">>
},
<<"ssl">> => #{
<<"enable">> => true,
<<"server_name_indication">> => <<"auto">>,
<<"verify">> => <<"verify_none">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
},
Config
),
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message2
}} =
request_json(
put,
uri([?ROOT, BridgeID]),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME, <<"foobla2">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message2)
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% try create with unknown connector name
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message3
}} =
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME, <<"does_not_exist">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message3)
),
%% try create bridge with connector of wrong type
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message4
}} =
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME, <<"foobla2">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message4)
),
%% make sure nothing has been created above
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% update a deleted bridge returns an error
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := _
}},
request_json(
put,
uri([?ROOT, BridgeID]),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME),
Config
)
),
%% deleting a non-existing bridge should result in an error
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := _
}},
request_json(delete, uri([?ROOT, BridgeID]), Config)
),
%% try delete unknown bridge id
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := <<"Invalid bridge ID", _/binary>>
}},
request_json(delete, uri([?ROOT, "foo"]), Config)
),
%% Try create bridge with bad characters as name
{ok, 400, _} = request(post, uri([?ROOT]), ?KAFKA_BRIDGE(<<"隋达"/utf8>>), Config),
{ok, 400, _} = request(post, uri([?ROOT]), ?KAFKA_BRIDGE(<<"a.b">>), Config),
ok.
t_start_bridge_unknown_node(Config) ->
{ok, 404, _} =
request(
post,
uri(["nodes", "thisbetterbenotanatomyet", ?ROOT, "kafka_producer:foo", start]),
Config
),
{ok, 404, _} =
request(
post,
uri(["nodes", "undefined", ?ROOT, "kafka_producer:foo", start]),
Config
).
t_start_bridge_node(Config) ->
do_start_bridge(node, Config).
t_start_bridge_cluster(Config) ->
do_start_bridge(cluster, Config).
do_start_bridge(TestType, Config) ->
%% assert we there's no bridges at first
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
Name = atom_to_binary(TestType),
?assertMatch(
{ok, 201, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _]
}},
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(Name),
Config
)
),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
%% start again
{ok, 204, <<>>} = request(post, {operation, TestType, start, BridgeID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri([?ROOT, BridgeID]), Config)
),
%% start a started bridge
{ok, 204, <<>>} = request(post, {operation, TestType, start, BridgeID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri([?ROOT, BridgeID]), Config)
),
{ok, 400, _} = request(post, {operation, TestType, invalidop, BridgeID}, Config),
%% Make start bridge fail
expect_on_all_nodes(
?CONNECTOR_IMPL,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {error, <<"my_error">>} end,
Config
),
connector_operation(Config, ?BRIDGE_TYPE, ?CONNECTOR_NAME, stop),
connector_operation(Config, ?BRIDGE_TYPE, ?CONNECTOR_NAME, start),
{ok, 400, _} = request(post, {operation, TestType, start, BridgeID}, Config),
%% Make start bridge succeed
expect_on_all_nodes(
?CONNECTOR_IMPL,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {ok, connector_state} end,
Config
),
%% try to start again
{ok, 204, <<>>} = request(post, {operation, TestType, start, BridgeID}, Config),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% Fail parse-id check
{ok, 404, _} = request(post, {operation, TestType, start, <<"wreckbook_fugazi">>}, Config),
%% Looks ok but doesn't exist
{ok, 404, _} = request(post, {operation, TestType, start, <<"webhook:cptn_hook">>}, Config),
ok.
expect_on_all_nodes(Mod, Function, Fun, Config) ->
case ?config(cluster_nodes, Config) of
undefined ->
ok = meck:expect(Mod, Function, Fun);
Nodes ->
[erpc:call(Node, meck, expect, [Mod, Function, Fun]) || Node <- Nodes]
end,
ok.
connector_operation(Config, ConnectorType, ConnectorName, OperationName) ->
case ?config(group, Config) of
cluster ->
case ?config(cluster_nodes, Config) of
undefined ->
Node = ?config(node, Config),
ok = rpc:call(
Node,
emqx_connector_resource,
OperationName,
[ConnectorType, ConnectorName],
500
);
Nodes ->
erpc:multicall(
Nodes,
emqx_connector_resource,
OperationName,
[ConnectorType, ConnectorName],
500
)
end;
_ ->
ok = emqx_connector_resource:OperationName(ConnectorType, ConnectorName)
end.
%% t_start_stop_inconsistent_bridge_node(Config) ->
%% start_stop_inconsistent_bridge(node, Config).
%% t_start_stop_inconsistent_bridge_cluster(Config) ->
%% start_stop_inconsistent_bridge(cluster, Config).
%% start_stop_inconsistent_bridge(Type, Config) ->
%% Node = ?config(node, Config),
%% erpc:call(Node, fun() ->
%% meck:new(emqx_bridge_resource, [passthrough, no_link]),
%% meck:expect(
%% emqx_bridge_resource,
%% stop,
%% fun
%% (_, <<"bridge_not_found">>) -> {error, not_found};
%% (BridgeType, Name) -> meck:passthrough([BridgeType, Name])
%% end
%% )
%% end),
%% emqx_common_test_helpers:on_exit(fun() ->
%% erpc:call(Node, fun() ->
%% meck:unload([emqx_bridge_resource])
%% end)
%% end),
%% {ok, 201, _Bridge} = request(
%% post,
%% uri([?ROOT]),
%% ?KAFKA_BRIDGE(<<"bridge_not_found">>),
%% Config
%% ),
%% {ok, 503, _} = request(
%% post, {operation, Type, stop, <<"kafka:bridge_not_found">>}, Config
%% ).
%% [TODO] This is a mess, need to clarify what the actual behavior needs to be
%% like.
%% t_enable_disable_bridges(Config) ->
%% %% assert we there's no bridges at first
%% {ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% Name = ?BRIDGE_NAME,
%% ?assertMatch(
%% {ok, 201, #{
%% <<"type">> := ?BRIDGE_TYPE,
%% <<"name">> := Name,
%% <<"enable">> := true,
%% <<"status">> := <<"connected">>,
%% <<"node_status">> := [_ | _]
%% }},
%% request_json(
%% post,
%% uri([?ROOT]),
%% ?KAFKA_BRIDGE(Name),
%% Config
%% )
%% ),
%% BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
%% %% disable it
%% meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connecting),
%% {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"stopped">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% enable again
%% meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
%% {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"connected">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% enable an already started bridge
%% {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"connected">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% disable it again
%% {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), Config),
%% %% bad param
%% {ok, 404, _} = request(put, enable_path(foo, BridgeID), Config),
%% {ok, 404, _} = request(put, enable_path(true, "foo"), Config),
%% {ok, 404, _} = request(put, enable_path(true, "webhook:foo"), Config),
%% {ok, 400, Res} = request(post, {operation, node, start, BridgeID}, <<>>, fun json/1, Config),
%% ?assertEqual(
%% #{
%% <<"code">> => <<"BAD_REQUEST">>,
%% <<"message">> => <<"Forbidden operation, bridge not enabled">>
%% },
%% Res
%% ),
%% {ok, 400, Res} = request(
%% post, {operation, cluster, start, BridgeID}, <<>>, fun json/1, Config
%% ),
%% %% enable a stopped bridge
%% {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"connected">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% delete the bridge
%% {ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
%% {ok, 200, []} = request_json(get, uri([?ROOT]), Config).
t_bridges_probe(Config) ->
{ok, 204, <<>>} = request(
post,
uri(["actions_probe"]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
%% second time with same name is ok since no real bridge created
{ok, 204, <<>>} = request(
post,
uri(["actions_probe"]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
meck:expect(?CONNECTOR_IMPL, on_start, 2, {error, on_start_error}),
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["actions_probe"]),
?KAFKA_BRIDGE(<<"broken_bridge">>, <<"brokenhost:1234">>),
Config
)
),
meck:expect(?CONNECTOR_IMPL, on_start, 2, {ok, bridge_state}),
?assertMatch(
{ok, 400, #{<<"code">> := <<"BAD_REQUEST">>}},
request_json(
post,
uri(["actions_probe"]),
?RESOURCE(<<"broken_bridge">>, <<"unknown_type">>),
Config
)
),
ok.
t_cascade_delete_actions(Config) ->
%% assert we there's no bridges at first
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% then we add a a bridge, using POST
%% POST /actions/ will create a bridge
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
{ok, 201, _} = request(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
{ok, 201, #{<<"id">> := RuleId}} = request_json(
post,
uri(["rules"]),
#{
<<"name">> => <<"t_http_crud_apis">>,
<<"enable">> => true,
<<"actions">> => [BridgeID],
<<"sql">> => <<"SELECT * from \"t\"">>
},
Config
),
%% delete the bridge will also delete the actions from the rules
{ok, 204, _} = request(
delete,
uri([?ROOT, BridgeID]) ++ "?also_delete_dep_actions=true",
Config
),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
?assertMatch(
{ok, 200, #{<<"actions">> := []}},
request_json(get, uri(["rules", RuleId]), Config)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), Config),
{ok, 201, _} = request(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
{ok, 201, _} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"t_http_crud_apis">>,
<<"enable">> => true,
<<"actions">> => [BridgeID],
<<"sql">> => <<"SELECT * from \"t\"">>
},
Config
),
{ok, 400, _} = request(
delete,
uri([?ROOT, BridgeID]),
Config
),
{ok, 200, [_]} = request_json(get, uri([?ROOT]), Config),
%% Cleanup
{ok, 204, _} = request(
delete,
uri([?ROOT, BridgeID]) ++ "?also_delete_dep_actions=true",
Config
),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config).
%%% helpers
listen_on_random_port() ->
SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}],
case gen_tcp:listen(0, SockOpts) of
{ok, Sock} ->
{ok, Port} = inet:port(Sock),
{Port, Sock};
{error, Reason} when Reason /= eaddrinuse ->
{error, Reason}
end.
request(Method, URL, Config) ->
request(Method, URL, [], Config).
request(Method, {operation, Type, Op, BridgeID}, Body, Config) ->
URL = operation_path(Type, Op, BridgeID, Config),
request(Method, URL, Body, Config);
request(Method, URL, Body, Config) ->
AuthHeader = emqx_common_test_http:auth_header(?config(api_key, Config)),
Opts = #{compatible_mode => true, httpc_req_opts => [{body_format, binary}]},
emqx_mgmt_api_test_util:request_api(Method, URL, [], AuthHeader, Body, Opts).
request(Method, URL, Body, Decoder, Config) ->
case request(Method, URL, Body, Config) of
{ok, Code, Response} ->
case Decoder(Response) of
{error, _} = Error -> Error;
Decoded -> {ok, Code, Decoded}
end;
Otherwise ->
Otherwise
end.
request_json(Method, URLLike, Config) ->
request(Method, URLLike, [], fun json/1, Config).
request_json(Method, URLLike, Body, Config) ->
request(Method, URLLike, Body, fun json/1, Config).
operation_path(node, Oper, BridgeID, Config) ->
uri(["nodes", ?config(node, Config), ?ROOT, BridgeID, Oper]);
operation_path(cluster, Oper, BridgeID, _Config) ->
uri([?ROOT, BridgeID, Oper]).
enable_path(Enable, BridgeID) ->
uri([?ROOT, BridgeID, "enable", Enable]).
publish_message(Topic, Body, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, publish, [emqx_message:make(Topic, Body)]).
update_config(Path, Value, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, update_config, [Path, Value]).
get_raw_config(Path, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, get_raw_config, [Path]).
add_user_auth(Chain, AuthenticatorID, User, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx_authentication, add_user, [Chain, AuthenticatorID, User]).
delete_user_auth(Chain, AuthenticatorID, User, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx_authentication, delete_user, [Chain, AuthenticatorID, User]).
str(S) when is_list(S) -> S;
str(S) when is_binary(S) -> binary_to_list(S).
json(B) when is_binary(B) ->
case emqx_utils_json:safe_decode(B, [return_maps]) of
{ok, Term} ->
Term;
{error, Reason} = Error ->
ct:pal("Failed to decode json: ~p~n~p", [Reason, B]),
Error
end.

View File

@ -0,0 +1,31 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% this module is only intended to be mocked
-module(emqx_bridge_v2_dummy_connector).
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_add_channel/4,
on_get_channel_status/3
]).
callback_mode() -> error(unexpected).
on_start(_, _) -> error(unexpected).
on_stop(_, _) -> error(unexpected).
on_add_channel(_, _, _, _) -> error(unexpected).
on_get_channel_status(_, _, _) -> error(unexpected).

View File

@ -0,0 +1,137 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_test_connector).
-behaviour(emqx_resource).
-export([
query_mode/1,
callback_mode/0,
on_start/2,
on_stop/2,
on_query/3,
on_query_async/4,
on_get_status/2,
on_add_channel/4,
on_remove_channel/3,
on_get_channels/1,
on_get_channel_status/3
]).
query_mode(_Config) ->
sync.
callback_mode() ->
always_sync.
on_start(
_InstId,
#{on_start_fun := FunRef} = Conf
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun(Conf);
on_start(_InstId, _Config) ->
{ok, #{}}.
on_add_channel(
_InstId,
_State,
_ChannelId,
#{on_add_channel_fun := FunRef}
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun();
on_add_channel(
InstId,
#{on_add_channel_fun := FunRef} = ConnectorState,
ChannelId,
ChannelConfig
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun(InstId, ConnectorState, ChannelId, ChannelConfig);
on_add_channel(
_InstId,
State,
ChannelId,
ChannelConfig
) ->
Channels = maps:get(channels, State, #{}),
NewChannels = maps:put(ChannelId, ChannelConfig, Channels),
NewState = maps:put(channels, NewChannels, State),
{ok, NewState}.
on_stop(_InstanceId, _State) ->
ok.
on_remove_channel(
_InstId,
State,
ChannelId
) ->
Channels = maps:get(channels, State, #{}),
NewChannels = maps:remove(ChannelId, Channels),
NewState = maps:put(channels, NewChannels, State),
{ok, NewState}.
on_query(
_InstId,
{ChannelId, Message},
ConnectorState
) ->
Channels = maps:get(channels, ConnectorState, #{}),
%% Lookup the channel
ChannelState = maps:get(ChannelId, Channels, not_found),
SendTo = maps:get(send_to, ChannelState),
SendTo ! Message,
ok.
on_get_channels(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId).
on_query_async(
_InstId,
{_MessageTag, _Message},
_AsyncReplyFn,
_ConnectorState
) ->
throw(not_implemented).
on_get_status(
_InstId,
#{on_get_status_fun := FunRef}
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun();
on_get_status(
_InstId,
_State
) ->
connected.
on_get_channel_status(
_ResId,
ChannelId,
State
) ->
Channels = maps:get(channels, State, #{}),
ChannelState = maps:get(ChannelId, Channels, #{}),
case ChannelState of
#{on_get_channel_status_fun := FunRef} ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun();
_ ->
connected
end.

View File

@ -0,0 +1,516 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_testlib).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
%% ct setup helpers
init_per_suite(Config, Apps) ->
[{start_apps, Apps} | Config].
end_per_suite(Config) ->
delete_all_bridges_and_connectors(),
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?config(start_apps, Config))),
_ = application:stop(emqx_connector),
ok.
init_per_group(TestGroup, BridgeType, Config) ->
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
application:load(emqx_bridge),
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps(?config(start_apps, Config)),
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
UniqueNum = integer_to_binary(erlang:unique_integer([positive])),
MQTTTopic = <<"mqtt/topic/abc", UniqueNum/binary>>,
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{mqtt_topic, MQTTTopic},
{test_group, TestGroup},
{bridge_type, BridgeType}
| Config
].
end_per_group(Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
% delete_all_bridges(),
ok.
init_per_testcase(TestCase, Config0, BridgeConfigCb) ->
ct:timetrap(timer:seconds(60)),
delete_all_bridges_and_connectors(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
BridgeTopic =
<<
(atom_to_binary(TestCase))/binary,
UniqueNum/binary
>>,
TestGroup = ?config(test_group, Config0),
Config = [{bridge_topic, BridgeTopic} | Config0],
{Name, ConfigString, BridgeConfig} = BridgeConfigCb(
TestCase, TestGroup, Config
),
ok = snabbkaffe:start_trace(),
[
{bridge_name, Name},
{bridge_config_string, ConfigString},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
%% in CI, apparently this needs more time since the
%% machines struggle with all the containers running...
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
delete_all_bridges_and_connectors() ->
delete_all_bridges(),
delete_all_connectors().
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
).
delete_all_connectors() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
).
%% test helpers
parse_and_check(BridgeType, BridgeName, ConfigString) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{BridgeName := BridgeConfig}}} = RawConf,
BridgeConfig.
bridge_id(Config) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
ConnectorId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
<<"action:", BridgeId/binary, ":", ConnectorId/binary>>.
resource_id(Config) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
emqx_bridge_resource:resource_id(BridgeType, BridgeName).
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
create_bridge(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
ConnectorName = ?config(connector_name, Config),
ConnectorType = ?config(connector_type, Config),
ConnectorConfig = ?config(connector_config, Config),
{ok, _} =
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig),
ct:pal("creating bridge with config: ~p", [BridgeConfig]),
emqx_bridge_v2:create(BridgeType, BridgeName, BridgeConfig).
create_bridge_api(Config) ->
create_bridge_api(Config, _Overrides = #{}).
create_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
ConnectorName = ?config(connector_name, Config),
ConnectorType = ?config(connector_type, Config),
ConnectorConfig = ?config(connector_config, Config),
{ok, _Connector} =
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => BridgeName},
Path = emqx_mgmt_api_test_util:api_path(["actions"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("creating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge create result: ~p", [Res]),
Res.
update_bridge_api(Config) ->
update_bridge_api(Config, _Overrides = #{}).
update_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("updating bridge (via http): ~p", [BridgeConfig]),
Res =
case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, BridgeConfig, Opts) of
{ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])};
Error -> Error
end,
ct:pal("bridge update result: ~p", [Res]),
Res.
op_bridge_api(Op, BridgeType, BridgeName) ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, Op]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("calling bridge ~p (via http): ~p", [BridgeId, Op]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, "", Opts) of
{ok, {Status = {_, 204, _}, Headers, Body}} ->
{ok, {Status, Headers, Body}};
{ok, {Status, Headers, Body}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body, [return_maps])}};
{error, {Status, Headers, Body}} ->
{error, {Status, Headers, emqx_utils_json:decode(Body, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge op result: ~p", [Res]),
Res.
probe_bridge_api(Config) ->
probe_bridge_api(Config, _Overrides = #{}).
probe_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
probe_bridge_api(BridgeType, BridgeName, BridgeConfig).
probe_bridge_api(BridgeType, BridgeName, BridgeConfig) ->
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => BridgeName},
Path = emqx_mgmt_api_test_util:api_path(["actions_probe"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("probing bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0};
Error -> Error
end,
ct:pal("bridge probe result: ~p", [Res]),
Res.
try_decode_error(Body0) ->
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
{ok, #{<<"message">> := Msg0} = Body1} ->
case emqx_utils_json:safe_decode(Msg0, [return_maps]) of
{ok, Msg1} -> Body1#{<<"message">> := Msg1};
{error, _} -> Body1
end;
{ok, Body1} ->
Body1;
{error, _} ->
Body0
end.
create_rule_and_action_http(BridgeType, RuleTopic, Config) ->
create_rule_and_action_http(BridgeType, RuleTopic, Config, _Opts = #{}).
create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts) ->
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
SQL = maps:get(sql, Opts, <<"SELECT * FROM \"", RuleTopic/binary, "\"">>),
Params0 = #{
enable => true,
sql => SQL,
actions => [BridgeId]
},
Overrides = maps:get(overrides, Opts, #{}),
Params = emqx_utils_maps:deep_merge(Params0, Overrides),
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ct:pal("rule action params: ~p", [Params]),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res0} ->
Res = #{<<"id">> := RuleId} = emqx_utils_json:decode(Res0, [return_maps]),
on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
{ok, Res};
Error ->
Error
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
BridgeId = bridge_id(Config),
Message = {BridgeId, MakeMessageFun()},
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ReplyFun =
fun(Pid, Result) ->
Pid ! {result, Result}
end,
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
BridgeId = bridge_id(Config),
Message = {BridgeId, MakeMessageFun()},
?assertMatch(
{ok, {ok, _}},
?wait_async_action(
emqx_resource:query(ResourceId, Message, #{
async_reply_fun => {ReplyFun, [self()]}
}),
#{?snk_kind := TracePoint, instance_id := ResourceId},
5_000
)
),
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
receive
{result, Result} -> IsSuccessCheck(Result)
after 5_000 ->
throw(timeout)
end,
ok.
t_create_via_http(Config) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
%% lightweight matrix testing some configs
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
ok
end,
[]
),
ok.
t_start_stop(Config, StopTracePoint) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig = ?config(bridge_config, Config),
ConnectorName = ?config(connector_name, Config),
ConnectorType = ?config(connector_type, Config),
ConnectorConfig = ?config(connector_config, Config),
?assertMatch(
{ok, _},
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig)
),
?check_trace(
begin
ProbeRes0 = probe_bridge_api(
BridgeType,
BridgeName,
BridgeConfig
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
%% Check that the bridge probe API doesn't leak atoms.
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
ProbeRes1 = probe_bridge_api(
BridgeType,
BridgeName,
BridgeConfig
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
?assertMatch({ok, _}, emqx_bridge_v2:create(BridgeType, BridgeName, BridgeConfig)),
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% `start` bridge to trigger `already_started`
?assertMatch(
{ok, {{_, 204, _}, _Headers, []}},
emqx_bridge_v2_testlib:op_bridge_api("start", BridgeType, BridgeName)
),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
%% Not supported anymore
%% ?assertMatch(
%% {{ok, _}, {ok, _}},
%% ?wait_async_action(
%% emqx_bridge_v2_testlib:op_bridge_api("stop", BridgeType, BridgeName),
%% #{?snk_kind := StopTracePoint},
%% 5_000
%% )
%% ),
%% ?assertEqual(
%% {error, resource_is_stopped}, emqx_resource_manager:health_check(ResourceId)
%% ),
%% ?assertMatch(
%% {ok, {{_, 204, _}, _Headers, []}},
%% emqx_bridge_v2_testlib:op_bridge_api("stop", BridgeType, BridgeName)
%% ),
%% ?assertEqual(
%% {error, resource_is_stopped}, emqx_resource_manager:health_check(ResourceId)
%% ),
%% ?assertMatch(
%% {ok, {{_, 204, _}, _Headers, []}},
%% emqx_bridge_v2_testlib:op_bridge_api("start", BridgeType, BridgeName)
%% ),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% Disable the connector, which will also stop it.
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_connector:disable_enable(disable, ConnectorType, ConnectorName),
#{?snk_kind := StopTracePoint},
5_000
)
),
ok
end,
fun(Trace) ->
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% one for each probe, one for real
?assertMatch(
[_, _, #{instance_id := ResourceId}],
?of_kind(StopTracePoint, Trace)
),
ok
end
),
ok.
t_on_get_status(Config) ->
t_on_get_status(Config, _Opts = #{}).
t_on_get_status(Config, Opts) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
FailureStatus = maps:get(failure_status, Opts, disconnected),
?assertMatch({ok, _}, create_bridge(Config)),
ResourceId = resource_id(Config),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(500),
?retry(
_Interval0 = 200,
_Attempts0 = 10,
?assertEqual({ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId))
)
end),
%% Check that it recovers itself.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ok.

View File

@ -1,6 +1,6 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.7"}}}
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}}
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_azure_event_hub, [
{description, "EMQX Enterprise Azure Event Hub Bridge"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{applications, [
kernel,

View File

@ -7,7 +7,7 @@
-include_lib("hocon/include/hoconsc.hrl").
-behaviour(hocon_schema).
-behaviour(emqx_bridge_resource).
-behaviour(emqx_connector_resource).
%% `hocon_schema' API
-export([
@ -18,14 +18,22 @@
]).
%% emqx_bridge_enterprise "unofficial" API
-export([conn_bridge_examples/1]).
-export([
bridge_v2_examples/1,
conn_bridge_examples/1,
connector_examples/1
]).
%% emqx_connector_resource behaviour callbacks
-export([connector_config/1]).
-export([producer_converter/2, host_opts/0]).
-import(hoconsc, [mk/2, enum/1, ref/2]).
-define(AEH_CONNECTOR_TYPE, azure_event_hub_producer).
-define(AEH_CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>).
%%-------------------------------------------------------------------------------------------------
%% `hocon_schema' API
%%-------------------------------------------------------------------------------------------------
@ -34,12 +42,50 @@ namespace() -> "bridge_azure_event_hub".
roots() -> ["config_producer"].
fields("put_connector") ->
Fields = override(
emqx_bridge_kafka:fields("put_connector"),
connector_overrides()
),
override_documentations(Fields);
fields("get_connector") ->
emqx_bridge_schema:status_fields() ++
fields("post_connector");
fields("post_connector") ->
Fields = override(
emqx_bridge_kafka:fields("post_connector"),
connector_overrides()
),
override_documentations(Fields);
fields("put_bridge_v2") ->
Fields = override(
emqx_bridge_kafka:fields("put_bridge_v2"),
bridge_v2_overrides()
),
override_documentations(Fields);
fields("get_bridge_v2") ->
emqx_bridge_schema:status_fields() ++
fields("post_bridge_v2");
fields("post_bridge_v2") ->
Fields = override(
emqx_bridge_kafka:fields("post_bridge_v2"),
bridge_v2_overrides()
),
override_documentations(Fields);
fields("post_producer") ->
Fields = override(
emqx_bridge_kafka:fields("post_producer"),
producer_overrides()
),
override_documentations(Fields);
fields("config_bridge_v2") ->
fields(actions);
fields("config_connector") ->
Fields = override(
emqx_bridge_kafka:fields("config_connector"),
connector_overrides()
),
override_documentations(Fields);
fields("config_producer") ->
Fields = override(
emqx_bridge_kafka:fields(kafka_producer),
@ -52,9 +98,9 @@ fields(auth_username_password) ->
auth_overrides()
),
override_documentations(Fields);
fields("ssl_client_opts") ->
fields(ssl_client_opts) ->
Fields = override(
emqx_schema:fields("ssl_client_opts"),
emqx_bridge_kafka:ssl_client_opts_fields(),
ssl_overrides()
),
override_documentations(Fields);
@ -68,19 +114,36 @@ fields(kafka_message) ->
Fields0 = emqx_bridge_kafka:fields(kafka_message),
Fields = proplists:delete(timestamp, Fields0),
override_documentations(Fields);
fields(actions) ->
Fields =
override(
emqx_bridge_kafka:producer_opts(),
bridge_v2_overrides()
) ++
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{connector,
mk(binary(), #{
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})},
{description, emqx_schema:description_schema()}
],
override_documentations(Fields);
fields(Method) ->
Fields = emqx_bridge_kafka:fields(Method),
override_documentations(Fields).
desc("config") ->
?DESC("desc_config");
desc("config_connector") ->
?DESC("desc_config");
desc("config_producer") ->
?DESC("desc_config");
desc("ssl_client_opts") ->
emqx_schema:desc("ssl_client_opts");
desc("get_producer") ->
desc("get_" ++ Type) when Type == "producer"; Type == "connector"; Type == "bridge_v2" ->
["Configuration for Azure Event Hub using `GET` method."];
desc("put_producer") ->
desc("put_" ++ Type) when Type == "producer"; Type == "connector"; Type == "bridge_v2" ->
["Configuration for Azure Event Hub using `PUT` method."];
desc("post_producer") ->
desc("post_" ++ Type) when Type == "producer"; Type == "connector"; Type == "bridge_v2" ->
["Configuration for Azure Event Hub using `POST` method."];
desc(Name) ->
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
@ -90,7 +153,29 @@ struct_names() ->
[
auth_username_password,
kafka_message,
producer_kafka_opts
producer_kafka_opts,
actions,
ssl_client_opts
].
bridge_v2_examples(Method) ->
[
#{
?AEH_CONNECTOR_TYPE_BIN => #{
summary => <<"Azure Event Hub Bridge v2">>,
value => values({Method, bridge_v2})
}
}
].
connector_examples(Method) ->
[
#{
?AEH_CONNECTOR_TYPE_BIN => #{
summary => <<"Azure Event Hub Connector">>,
value => values({Method, connector})
}
}
].
conn_bridge_examples(Method) ->
@ -104,11 +189,65 @@ conn_bridge_examples(Method) ->
].
values({get, AEHType}) ->
values({post, AEHType});
values({post, AEHType}) ->
maps:merge(values(common_config), values(AEHType));
values({put, AEHType}) ->
values({post, AEHType});
maps:merge(
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
},
values({post, AEHType})
);
values({post, bridge_v2}) ->
maps:merge(
values(producer),
#{
enable => true,
connector => <<"my_azure_event_hub_producer_connector">>,
name => <<"my_azure_event_hub_producer_bridge">>,
type => ?AEH_CONNECTOR_TYPE_BIN
}
);
values({post, connector}) ->
maps:merge(
values(common_config),
#{
name => <<"my_azure_event_hub_producer_connector">>,
type => ?AEH_CONNECTOR_TYPE_BIN,
ssl => #{
enable => true,
server_name_indication => <<"auto">>,
verify => <<"verify_none">>,
versions => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
}
);
values({post, producer}) ->
maps:merge(
#{
name => <<"my_azure_event_hub_producer">>,
type => <<"azure_event_hub_producer">>
},
maps:merge(
values(common_config),
values(producer)
)
);
values({put, connector}) ->
values(common_config);
values({put, bridge_v2}) ->
maps:merge(
values(producer),
#{
enable => true,
connector => <<"my_azure_event_hub_producer_connector">>
}
);
values({put, producer}) ->
values({post, producer});
values(common_config) ->
#{
authentication => #{
@ -128,12 +267,11 @@ values(common_config) ->
};
values(producer) ->
#{
kafka => #{
parameters => #{
topic => <<"topic">>,
message => #{
key => <<"${.clientid}">>,
value => <<"${.}">>,
timestamp => <<"${.timestamp}">>
value => <<"${.}">>
},
max_batch_bytes => <<"896KB">>,
partition_strategy => <<"random">>,
@ -163,7 +301,7 @@ values(producer) ->
}.
%%-------------------------------------------------------------------------------------------------
%% `emqx_bridge_resource' API
%% `emqx_connector_resource' API
%%-------------------------------------------------------------------------------------------------
connector_config(Config) ->
@ -182,6 +320,43 @@ connector_config(Config) ->
ref(Name) ->
hoconsc:ref(?MODULE, Name).
connector_overrides() ->
#{
authentication =>
mk(
ref(auth_username_password),
#{
default => #{},
required => true,
desc => ?DESC("authentication")
}
),
bootstrap_hosts =>
mk(
binary(),
#{
required => true,
validator => emqx_schema:servers_validator(
host_opts(), _Required = true
)
}
),
ssl => mk(
ref(ssl_client_opts),
#{
required => true,
default => #{<<"enable">> => true}
}
),
type => mk(
?AEH_CONNECTOR_TYPE,
#{
required => true,
desc => ?DESC("connector_type")
}
)
}.
producer_overrides() ->
#{
authentication =>
@ -203,15 +378,40 @@ producer_overrides() ->
)
}
),
%% NOTE: field 'kafka' is renamed to 'parameters' since e5.3.1
%% We will keep 'kafka' for backward compatibility.
%% TODO: delete this override when we upgrade bridge schema json to 0.2.0
%% See emqx_conf:bridge_schema_json/0
kafka =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
ssl => mk(ref("ssl_client_opts"), #{default => #{<<"enable">> => true}}),
parameters =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
type => mk(azure_event_hub_producer, #{required => true})
}.
bridge_v2_overrides() ->
#{
parameters =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
type => mk(
?AEH_CONNECTOR_TYPE,
#{
required => true,
desc => ?DESC("bridge_v2_type")
}
)
}.
auth_overrides() ->
#{
mechanism =>
@ -228,19 +428,11 @@ auth_overrides() ->
})
}.
%% Kafka has SSL disabled by default
%% Azure must use SSL
ssl_overrides() ->
#{
%% FIXME: change this once the config option is defined
%% "cacerts" => mk(boolean(), #{default => true}),
"enable" => mk(true, #{default => true}),
"server_name_indication" =>
mk(
hoconsc:union([disable, auto, string()]),
#{
example => auto,
default => <<"auto">>
}
)
"enable" => mk(true, #{default => true})
}.
kafka_producer_overrides() ->

View File

@ -13,7 +13,6 @@
-define(BRIDGE_TYPE, azure_event_hub_producer).
-define(BRIDGE_TYPE_BIN, <<"azure_event_hub_producer">>).
-define(KAFKA_BRIDGE_TYPE, kafka).
-define(APPS, [emqx_resource, emqx_bridge, emqx_rule_engine]).
-import(emqx_common_test_helpers, [on_exit/1]).
@ -41,6 +40,7 @@ init_per_suite(Config) ->
emqx_resource,
emqx_bridge_azure_event_hub,
emqx_bridge,
emqx_rule_engine,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
],
#{work_dir => ?config(priv_dir, Config)}
@ -281,8 +281,6 @@ t_sync_query(Config) ->
t_same_name_azure_kafka_bridges(AehConfig) ->
ConfigKafka = lists:keyreplace(bridge_type, 1, AehConfig, {bridge_type, ?KAFKA_BRIDGE_TYPE}),
BridgeName = ?config(bridge_name, AehConfig),
AehResourceId = emqx_bridge_testlib:resource_id(AehConfig),
KafkaResourceId = emqx_bridge_testlib:resource_id(ConfigKafka),
TracePoint = emqx_bridge_kafka_impl_producer_sync_query,
%% creates the AEH bridge and check it's working
ok = emqx_bridge_testlib:t_sync_query(
@ -293,6 +291,8 @@ t_same_name_azure_kafka_bridges(AehConfig) ->
),
%% than creates a Kafka bridge with same name and delete it after creation
ok = emqx_bridge_testlib:t_create_via_http(ConfigKafka),
AehResourceId = emqx_bridge_testlib:resource_id(AehConfig),
KafkaResourceId = emqx_bridge_testlib:resource_id(ConfigKafka),
%% check that both bridges are healthy
?assertEqual({ok, connected}, emqx_resource_manager:health_check(AehResourceId)),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(KafkaResourceId)),
@ -307,7 +307,8 @@ t_same_name_azure_kafka_bridges(AehConfig) ->
% check that AEH bridge is still working
?check_trace(
begin
Message = {send_message, make_message()},
BridgeId = emqx_bridge_v2_testlib:bridge_id(AehConfig),
Message = {BridgeId, make_message()},
?assertEqual(ok, emqx_resource:simple_sync_query(AehResourceId, Message)),
ok
end,

View File

@ -0,0 +1,343 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_azure_event_hub_v2_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(BRIDGE_TYPE, azure_event_hub_producer).
-define(BRIDGE_TYPE_BIN, <<"azure_event_hub_producer">>).
-define(CONNECTOR_TYPE, azure_event_hub_producer).
-define(CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>).
-define(KAFKA_BRIDGE_TYPE, kafka_producer).
-import(emqx_common_test_helpers, [on_exit/1]).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "toxiproxy.emqx.net"),
KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9295")),
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
ProxyName = "kafka_sasl_ssl",
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of
true ->
Apps = emqx_cth_suite:start(
[
emqx_conf,
emqx,
emqx_management,
emqx_resource,
emqx_bridge_azure_event_hub,
emqx_bridge,
emqx_rule_engine,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
],
#{work_dir => ?config(priv_dir, Config)}
),
{ok, Api} = emqx_common_test_http:create_default_app(),
[
{tc_apps, Apps},
{api, Api},
{proxy_name, ProxyName},
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{kafka_host, KafkaHost},
{kafka_port, KafkaPort}
| Config
];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_kafka);
_ ->
{skip, no_kafka}
end
end.
end_per_suite(Config) ->
Apps = ?config(tc_apps, Config),
emqx_cth_suite:stop(Apps),
ok.
init_per_testcase(TestCase, Config) ->
common_init_per_testcase(TestCase, Config).
common_init_per_testcase(TestCase, Config) ->
ct:timetrap(timer:seconds(60)),
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_config:delete_override_conf_files(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]),
KafkaHost = ?config(kafka_host, Config),
KafkaPort = ?config(kafka_port, Config),
KafkaTopic = Name,
ConnectorConfig = connector_config(Name, KafkaHost, KafkaPort),
{BridgeConfig, ExtraConfig} = bridge_config(Name, Name, KafkaTopic),
ensure_topic(Config, KafkaTopic, _Opts = #{}),
ok = snabbkaffe:start_trace(),
ExtraConfig ++
[
{connector_type, ?CONNECTOR_TYPE},
{connector_name, Name},
{connector_config, ConnectorConfig},
{bridge_type, ?BRIDGE_TYPE},
{bridge_name, Name},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
connector_config(Name, KafkaHost, KafkaPort) ->
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"bootstrap_hosts">> => iolist_to_binary([KafkaHost, ":", integer_to_binary(KafkaPort)]),
<<"authentication">> =>
#{
<<"mechanism">> => <<"plain">>,
<<"username">> => <<"emqxuser">>,
<<"password">> => <<"password">>
},
<<"connect_timeout">> => <<"5s">>,
<<"socket_opts">> =>
#{
<<"nodelay">> => true,
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
},
<<"ssl">> =>
#{
<<"cacertfile">> => shared_secret(client_cacertfile),
<<"certfile">> => shared_secret(client_certfile),
<<"keyfile">> => shared_secret(client_keyfile),
<<"ciphers">> => [],
<<"depth">> => 10,
<<"enable">> => true,
<<"hibernate_after">> => <<"5s">>,
<<"log_level">> => <<"notice">>,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"server_name_indication">> => <<"disable">>,
%% currently, it seems our CI kafka certs fail peer verification
<<"verify">> => <<"verify_none">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
parse_and_check_connector_config(InnerConfigMap, Name).
parse_and_check_connector_config(InnerConfigMap, Name) ->
TypeBin = ?CONNECTOR_TYPE_BIN,
RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}},
#{<<"connectors">> := #{TypeBin := #{Name := Config}}} =
hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{
required => false, atom_key => false
}),
ct:pal("parsed config: ~p", [Config]),
InnerConfigMap.
bridge_config(Name, ConnectorId, KafkaTopic) ->
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"connector">> => ConnectorId,
<<"kafka">> =>
#{
<<"buffer">> =>
#{
<<"memory_overload_protection">> => true,
<<"mode">> => <<"memory">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => <<"10">>,
<<"message">> =>
#{
<<"key">> => <<"${.clientid}">>,
<<"value">> => <<"${.}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"query_mode">> => <<"async">>,
<<"required_acks">> => <<"all_isr">>,
<<"sync_query_timeout">> => <<"5s">>,
<<"topic">> => KafkaTopic
},
<<"local_topic">> => <<"t/aeh">>
%%,
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
ExtraConfig =
[{kafka_topic, KafkaTopic}],
{parse_and_check_bridge_config(InnerConfigMap, Name), ExtraConfig}.
%% check it serializes correctly
serde_roundtrip(InnerConfigMap0) ->
IOList = hocon_pp:do(InnerConfigMap0, #{}),
{ok, InnerConfigMap} = hocon:binary(IOList),
InnerConfigMap.
parse_and_check_bridge_config(InnerConfigMap, Name) ->
TypeBin = ?BRIDGE_TYPE_BIN,
RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}},
hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}),
InnerConfigMap.
shared_secret_path() ->
os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret").
shared_secret(client_keyfile) ->
filename:join([shared_secret_path(), "client.key"]);
shared_secret(client_certfile) ->
filename:join([shared_secret_path(), "client.crt"]);
shared_secret(client_cacertfile) ->
filename:join([shared_secret_path(), "ca.crt"]);
shared_secret(rig_keytab) ->
filename:join([shared_secret_path(), "rig.keytab"]).
ensure_topic(Config, KafkaTopic, Opts) ->
KafkaHost = ?config(kafka_host, Config),
KafkaPort = ?config(kafka_port, Config),
NumPartitions = maps:get(num_partitions, Opts, 3),
Endpoints = [{KafkaHost, KafkaPort}],
TopicConfigs = [
#{
name => KafkaTopic,
num_partitions => NumPartitions,
replication_factor => 1,
assignments => [],
configs => []
}
],
RequestConfig = #{timeout => 5_000},
ConnConfig =
#{
ssl => emqx_tls_lib:to_client_opts(
#{
keyfile => shared_secret(client_keyfile),
certfile => shared_secret(client_certfile),
cacertfile => shared_secret(client_cacertfile),
verify => verify_none,
enable => true
}
),
sasl => {plain, <<"emqxuser">>, <<"password">>}
},
case brod:create_topics(Endpoints, TopicConfigs, RequestConfig, ConnConfig) of
ok -> ok;
{error, topic_already_exists} -> ok
end.
make_message() ->
Time = erlang:unique_integer(),
BinTime = integer_to_binary(Time),
Payload = emqx_guid:to_hexstr(emqx_guid:gen()),
#{
clientid => BinTime,
payload => Payload,
timestamp => Time
}.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_start_stop(Config) ->
emqx_bridge_v2_testlib:t_start_stop(Config, kafka_producer_stopped),
ok.
t_create_via_http(Config) ->
emqx_bridge_v2_testlib:t_create_via_http(Config),
ok.
t_on_get_status(Config) ->
emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}),
ok.
t_sync_query(Config) ->
ok = emqx_bridge_v2_testlib:t_sync_query(
Config,
fun make_message/0,
fun(Res) -> ?assertEqual(ok, Res) end,
emqx_bridge_kafka_impl_producer_sync_query
),
ok.
t_same_name_azure_kafka_bridges(Config) ->
BridgeName = ?config(bridge_name, Config),
TracePoint = emqx_bridge_kafka_impl_producer_sync_query,
%% creates the AEH bridge and check it's working
ok = emqx_bridge_v2_testlib:t_sync_query(
Config,
fun make_message/0,
fun(Res) -> ?assertEqual(ok, Res) end,
TracePoint
),
%% then create a Kafka bridge with same name and delete it after creation
ConfigKafka0 = lists:keyreplace(bridge_type, 1, Config, {bridge_type, ?KAFKA_BRIDGE_TYPE}),
ConfigKafka = lists:keyreplace(
connector_type, 1, ConfigKafka0, {connector_type, ?KAFKA_BRIDGE_TYPE}
),
ok = emqx_bridge_v2_testlib:t_create_via_http(ConfigKafka),
AehResourceId = emqx_bridge_v2_testlib:resource_id(Config),
KafkaResourceId = emqx_bridge_v2_testlib:resource_id(ConfigKafka),
%% check that both bridges are healthy
?assertEqual({ok, connected}, emqx_resource_manager:health_check(AehResourceId)),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(KafkaResourceId)),
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_connector:disable_enable(disable, ?KAFKA_BRIDGE_TYPE, BridgeName),
#{?snk_kind := kafka_producer_stopped},
5_000
)
),
% check that AEH bridge is still working
?check_trace(
begin
BridgeId = emqx_bridge_v2_testlib:bridge_id(Config),
Message = {BridgeId, make_message()},
?assertEqual(ok, emqx_resource:simple_sync_query(AehResourceId, Message)),
ok
end,
fun(Trace) ->
?assertMatch([#{instance_id := AehResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.

View File

@ -177,8 +177,7 @@ make_bridge(Config) ->
delete_bridge() ->
Type = <<"clickhouse">>,
Name = atom_to_binary(?MODULE),
{ok, _} = emqx_bridge:remove(Type, Name),
ok.
ok = emqx_bridge:remove(Type, Name).
reset_table(Config) ->
ClickhouseConnection = proplists:get_value(clickhouse_connection, Config),

View File

@ -222,13 +222,8 @@ encode_payload(State, Selected) ->
OrderingKey = render_key(OrderingKeyTemplate, Selected),
Attributes = proc_attributes(AttributesTemplate, Selected),
Payload0 = #{data => base64:encode(Data)},
Payload1 = put_if(Payload0, attributes, Attributes, map_size(Attributes) > 0),
put_if(Payload1, 'orderingKey', OrderingKey, OrderingKey =/= <<>>).
put_if(Acc, K, V, true) ->
Acc#{K => V};
put_if(Acc, _K, _V, false) ->
Acc.
Payload1 = emqx_utils_maps:put_if(Payload0, attributes, Attributes, map_size(Attributes) > 0),
emqx_utils_maps:put_if(Payload1, 'orderingKey', OrderingKey, OrderingKey =/= <<>>).
-spec render_payload(emqx_placeholder:tmpl_token(), map()) -> binary().
render_payload([] = _Template, Selected) ->

View File

@ -891,7 +891,7 @@ t_start_stop(Config) ->
{ok, _} = snabbkaffe:receive_events(SRef0),
?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
?assertMatch({ok, _}, remove_bridge(Config)),
?assertMatch(ok, remove_bridge(Config)),
ok
end,
[

Some files were not shown because too many files have changed in this diff Show More