Merge pull request #11900 from id/1108-sync-r53

sync r53
This commit is contained in:
Ivan Dyachkov 2023-11-08 11:38:07 +01:00 committed by GitHub
commit 1e07ea0339
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 2787 additions and 983 deletions

View File

@ -16,7 +16,7 @@ env:
jobs:
sanity-checks:
runs-on: ${{ fromJSON(github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]') }}
runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04"
outputs:
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
@ -24,8 +24,6 @@ jobs:
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
version-emqx: ${{ steps.matrix.outputs.version-emqx }}
version-emqx-enterprise: ${{ steps.matrix.outputs.version-emqx-enterprise }}
runner_labels: ${{ github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]' }}
xl_runner_labels: ${{ github.repository_owner == 'emqx' && '["self-hosted","ephemeral-xl","linux","x64"]' || '["ubuntu-22.04"]' }}
builder: "ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04"
builder_vsn: "5.2-3"
otp_vsn: "25.3.2-2"
@ -116,7 +114,7 @@ jobs:
echo "version-emqx-enterprise=$(./pkg-vsn.sh emqx-enterprise)" | tee -a $GITHUB_OUTPUT
compile:
runs-on: ${{ fromJSON(needs.sanity-checks.outputs.xl_runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral-xl","linux","x64"]') }}
container: ${{ needs.sanity-checks.outputs.builder }}
needs:
- sanity-checks
@ -155,7 +153,6 @@ jobs:
- compile
uses: ./.github/workflows/run_emqx_app_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.xl_runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
before_ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }}
after_ref: ${{ github.sha }}
@ -166,7 +163,6 @@ jobs:
- compile
uses: ./.github/workflows/run_test_cases.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
ct-matrix: ${{ needs.sanity-checks.outputs.ct-matrix }}
ct-host: ${{ needs.sanity-checks.outputs.ct-host }}
@ -178,7 +174,6 @@ jobs:
- compile
uses: ./.github/workflows/static_checks.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
ct-matrix: ${{ needs.sanity-checks.outputs.ct-matrix }}
@ -187,7 +182,6 @@ jobs:
- sanity-checks
uses: ./.github/workflows/build_slim_packages.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
builder_vsn: ${{ needs.sanity-checks.outputs.builder_vsn }}
otp_vsn: ${{ needs.sanity-checks.outputs.otp_vsn }}
@ -198,7 +192,6 @@ jobs:
- sanity-checks
uses: ./.github/workflows/build_docker_for_test.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
otp_vsn: ${{ needs.sanity-checks.outputs.otp_vsn }}
elixir_vsn: ${{ needs.sanity-checks.outputs.elixir_vsn }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
@ -209,8 +202,6 @@ jobs:
- sanity-checks
- build_slim_packages
uses: ./.github/workflows/spellcheck.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
run_conf_tests:
needs:
@ -218,7 +209,6 @@ jobs:
- compile
uses: ./.github/workflows/run_conf_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
check_deps_integrity:
@ -226,7 +216,6 @@ jobs:
- sanity-checks
uses: ./.github/workflows/check_deps_integrity.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
run_jmeter_tests:
@ -235,7 +224,6 @@ jobs:
- build_docker_for_test
uses: ./.github/workflows/run_jmeter_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
run_docker_tests:
@ -244,7 +232,6 @@ jobs:
- build_docker_for_test
uses: ./.github/workflows/run_docker_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
version-emqx-enterprise: ${{ needs.sanity-checks.outputs.version-emqx-enterprise }}
@ -254,6 +241,5 @@ jobs:
- build_docker_for_test
uses: ./.github/workflows/run_helm_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
version-emqx-enterprise: ${{ needs.sanity-checks.outputs.version-emqx-enterprise }}

View File

@ -19,7 +19,7 @@ env:
jobs:
prepare:
runs-on: ${{ fromJSON(github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]') }}
runs-on: ubuntu-22.04
container: 'ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04'
outputs:
profile: ${{ steps.parse-git-ref.outputs.profile }}
@ -29,7 +29,6 @@ jobs:
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
ct-host: ${{ steps.matrix.outputs.ct-host }}
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
runner_labels: ${{ github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]' }}
builder: 'ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04'
builder_vsn: '5.2-3'
otp_vsn: '25.3.2-2'
@ -108,7 +107,6 @@ jobs:
otp_vsn: ${{ needs.prepare.outputs.otp_vsn }}
elixir_vsn: ${{ needs.prepare.outputs.elixir_vsn }}
builder_vsn: ${{ needs.prepare.outputs.builder_vsn }}
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
secrets: inherit
build_slim_packages:
@ -117,7 +115,6 @@ jobs:
- prepare
uses: ./.github/workflows/build_slim_packages.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
builder_vsn: ${{ needs.prepare.outputs.builder_vsn }}
otp_vsn: ${{ needs.prepare.outputs.otp_vsn }}
@ -125,7 +122,7 @@ jobs:
compile:
if: needs.prepare.outputs.release != 'true'
runs-on: ${{ fromJSON(needs.prepare.outputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ needs.prepare.outputs.builder }}
needs:
- prepare
@ -164,7 +161,6 @@ jobs:
- compile
uses: ./.github/workflows/run_emqx_app_tests.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
before_ref: ${{ github.event.before }}
after_ref: ${{ github.sha }}
@ -176,7 +172,6 @@ jobs:
- compile
uses: ./.github/workflows/run_test_cases.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
ct-matrix: ${{ needs.prepare.outputs.ct-matrix }}
ct-host: ${{ needs.prepare.outputs.ct-host }}
@ -189,7 +184,6 @@ jobs:
- compile
uses: ./.github/workflows/run_conf_tests.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
static_checks:
@ -199,6 +193,5 @@ jobs:
- compile
uses: ./.github/workflows/static_checks.yaml
with:
runner_labels: ${{ needs.prepare.outputs.runner_labels }}
builder: ${{ needs.prepare.outputs.builder }}
ct-matrix: ${{ needs.prepare.outputs.ct-matrix }}

View File

@ -28,9 +28,6 @@ on:
builder_vsn:
required: true
type: string
runner_labels:
required: true
type: string
secrets:
DOCKER_HUB_USER:
required: true
@ -70,17 +67,13 @@ on:
required: false
type: string
default: '5.2-3'
runner_labels:
required: false
type: string
default: '["self-hosted","ephemeral","linux","x64"]'
permissions:
contents: read
jobs:
docker:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
strategy:
fail-fast: false

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
otp_vsn:
required: true
type: string
@ -28,7 +25,7 @@ permissions:
jobs:
docker:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
env:
EMQX_NAME: ${{ matrix.profile }}
PKG_VSN: ${{ startsWith(matrix.profile, 'emqx-enterprise') && inputs.version-emqx-enterprise || inputs.version-emqx }}

View File

@ -115,6 +115,7 @@ jobs:
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
mac:
strategy:
@ -149,9 +150,10 @@ jobs:
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
linux:
runs-on: ['self-hosted', 'ephemeral', 'linux', "${{ matrix.arch }}"]
runs-on: [self-hosted, ephemeral, linux, "${{ matrix.arch }}"]
# always run in builder container because the host might have the wrong OTP version etc.
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
container:
@ -199,8 +201,6 @@ jobs:
shell: bash
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.ref }}
@ -246,6 +246,7 @@ jobs:
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
publish_artifacts:
runs-on: ubuntu-latest

View File

@ -12,7 +12,7 @@ on:
jobs:
linux:
if: github.repository_owner == 'emqx'
runs-on: ['self-hosted', 'ephemeral', 'linux', "${{ matrix.arch }}"]
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container:
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
@ -21,7 +21,6 @@ jobs:
matrix:
profile:
- ['emqx', 'master']
- ['emqx-enterprise', 'release-52']
- ['emqx-enterprise', 'release-53']
otp:
- 25.3.2-2
@ -77,6 +76,7 @@ jobs:
with:
name: ${{ matrix.profile[0] }}
path: _packages/${{ matrix.profile[0] }}/
retention-days: 7
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
@ -100,7 +100,6 @@ jobs:
otp:
- 25.3.2-2
os:
- macos-13
- macos-12-arm64
steps:

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -27,10 +24,6 @@ on:
inputs:
ref:
required: false
runner_labels:
required: false
type: string
default: '["self-hosted","ephemeral", "linux", "x64"]'
builder:
required: false
type: string
@ -50,7 +43,7 @@ on:
jobs:
linux:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
env:
EMQX_NAME: ${{ matrix.profile[0] }}
@ -113,7 +106,6 @@ jobs:
otp:
- ${{ inputs.otp_vsn }}
os:
- macos-11
- macos-12-arm64
runs-on: ${{ matrix.os }}

View File

@ -3,9 +3,6 @@ name: Check integrity of rebar and mix dependencies
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -15,7 +12,7 @@ permissions:
jobs:
check_deps_integrity:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
steps:
- uses: actions/checkout@v3

View File

@ -14,7 +14,7 @@ permissions:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
timeout-minutes: 360
permissions:
actions: read

View File

@ -17,7 +17,7 @@ permissions:
jobs:
rerun-failed-jobs:
if: github.repository_owner == 'emqx'
runs-on: ['self-hosted', 'linux', 'x64', 'ephemeral']
runs-on: ubuntu-22.04
permissions:
checks: read
actions: write

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -19,7 +16,7 @@ permissions:
jobs:
run_conf_tests:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
strategy:
fail-fast: false
@ -48,4 +45,4 @@ jobs:
with:
name: logs-${{ matrix.profile }}
path: _build/${{ matrix.profile }}/rel/emqx/logs
retention-days: 7

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
version-emqx:
required: true
type: string
@ -22,7 +19,7 @@ permissions:
jobs:
basic-tests:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
defaults:
run:
shell: bash
@ -66,7 +63,7 @@ jobs:
docker compose rm -fs
paho-mqtt-testing:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
defaults:
run:
shell: bash

View File

@ -10,9 +10,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -31,7 +28,7 @@ permissions:
jobs:
run_emqx_app_tests:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
defaults:
@ -66,3 +63,4 @@ jobs:
with:
name: logs-emqx-app-tests
path: apps/emqx/_build/test/logs
retention-days: 7

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
version-emqx:
required: true
type: string
@ -22,7 +19,7 @@ permissions:
jobs:
helm_test:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
defaults:
run:
shell: bash

View File

@ -3,16 +3,13 @@ name: JMeter integration tests
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
version-emqx:
required: true
type: string
jobs:
jmeter_artifact:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps:
- name: Cache Jmeter
id: cache-jmeter
@ -39,9 +36,10 @@ jobs:
with:
name: apache-jmeter.tgz
path: /tmp/apache-jmeter.tgz
retention-days: 3
advanced_feat:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -90,9 +88,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
pgsql_authn_authz:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -156,9 +155,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
mysql_authn_authz:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -215,9 +215,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
JWT_authn:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -266,9 +267,10 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3
built_in_database_authn_authz:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
strategy:
fail-fast: false
@ -309,3 +311,4 @@ jobs:
with:
name: jmeter_logs
path: ./jmeter_logs
retention-days: 3

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner:
required: true
type: string
builder:
required: true
type: string
@ -19,7 +16,7 @@ permissions:
jobs:
relup_test_plan:
runs-on: ["${{ inputs.runner }}", 'linux', 'x64', 'ephemeral']
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
outputs:
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
@ -57,12 +54,13 @@ jobs:
_packages
scripts
.ci
retention-days: 7
relup_test_run:
needs:
- relup_test_plan
if: needs.relup_test_plan.outputs.OLD_VERSIONS != '[]'
runs-on: ["${{ inputs.runner }}", 'linux', 'x64', 'ephemeral']
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
strategy:
fail-fast: false
matrix:
@ -120,3 +118,4 @@ jobs:
name: debug_data
path: |
lux_logs
retention-days: 3

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -28,7 +25,7 @@ env:
jobs:
eunit_and_proper:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "eunit_and_proper (${{ matrix.profile }})"
strategy:
fail-fast: false
@ -68,9 +65,10 @@ jobs:
with:
name: coverdata
path: _build/test/cover
retention-days: 7
ct_docker:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "${{ matrix.app }}-${{ matrix.suitegroup }} (${{ matrix.profile }})"
strategy:
fail-fast: false
@ -111,6 +109,7 @@ jobs:
with:
name: coverdata
path: _build/test/cover
retention-days: 7
- name: compress logs
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
@ -119,9 +118,10 @@ jobs:
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
path: logs.tar.gz
retention-days: 7
ct:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "${{ matrix.app }}-${{ matrix.suitegroup }} (${{ matrix.profile }})"
strategy:
fail-fast: false
@ -156,6 +156,7 @@ jobs:
name: coverdata
path: _build/test/cover
if-no-files-found: warn # do not fail if no coverdata found
retention-days: 7
- name: compress logs
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
@ -164,13 +165,14 @@ jobs:
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
path: logs.tar.gz
retention-days: 7
tests_passed:
needs:
- eunit_and_proper
- ct
- ct_docker
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ubuntu-22.04
strategy:
fail-fast: false
steps:
@ -181,7 +183,7 @@ jobs:
- eunit_and_proper
- ct
- ct_docker
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ inputs.builder }}
strategy:
fail-fast: false
@ -221,7 +223,7 @@ jobs:
# do this in a separate job
upload_coverdata:
needs: make_cover
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ubuntu-22.04
steps:
- name: Coveralls Finished
env:

View File

@ -6,10 +6,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
permissions:
contents: read
@ -21,7 +17,7 @@ jobs:
profile:
- emqx
- emqx-enterprise
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps:
- uses: actions/download-artifact@v3
with:

View File

@ -14,7 +14,7 @@ permissions:
jobs:
stale:
if: github.repository_owner == 'emqx'
runs-on: ['self-hosted', 'linux', 'x64', 'ephemeral']
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
permissions:
issues: write
pull-requests: none

View File

@ -7,9 +7,6 @@ concurrency:
on:
workflow_call:
inputs:
runner_labels:
required: true
type: string
builder:
required: true
type: string
@ -25,7 +22,7 @@ permissions:
jobs:
static_checks:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON('["self-hosted","ephemeral","linux","x64"]') || 'ubuntu-22.04' }}
name: "static_checks (${{ matrix.profile }})"
strategy:
fail-fast: false

View File

@ -35,7 +35,7 @@
-define(EMQX_RELEASE_CE, "5.3.1-alpha.1").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.3.1-alpha.2").
-define(EMQX_RELEASE_EE, "5.3.1-alpha.4").
%% The HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -30,7 +30,7 @@
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.7"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.16"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.19"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},

View File

@ -703,7 +703,7 @@ atom(Bin) when is_binary(Bin), size(Bin) > 255 ->
erlang:throw(
iolist_to_binary(
io_lib:format(
"Name is is too long."
"Name is too long."
" Please provide a shorter name (<= 255 bytes)."
" The name that is too long: \"~s\"",
[Bin]

View File

@ -169,7 +169,11 @@
-export([namespace/0, roots/0, roots/1, fields/1, desc/1, tags/0]).
-export([conf_get/2, conf_get/3, keys/2, filter/1]).
-export([
server_ssl_opts_schema/2, client_ssl_opts_schema/1, ciphers_schema/1, tls_versions_schema/1
server_ssl_opts_schema/2,
client_ssl_opts_schema/1,
ciphers_schema/1,
tls_versions_schema/1,
description_schema/0
]).
-export([password_converter/2, bin_str_converter/2]).
-export([authz_fields/0]).
@ -3649,3 +3653,14 @@ default_mem_check_interval() ->
true -> <<"60s">>;
false -> disabled
end.
description_schema() ->
sc(
string(),
#{
default => <<"">>,
desc => ?DESC(description),
required => false,
importance => ?IMPORTANCE_LOW
}
).

View File

@ -191,45 +191,50 @@ unload_hook() ->
on_message_publish(Message = #message{topic = Topic, flags = Flags}) ->
case maps:get(sys, Flags, false) of
false ->
{Msg, _} = emqx_rule_events:eventmsg_publish(Message),
send_to_matched_egress_bridges(Topic, Msg);
send_to_matched_egress_bridges(Topic, Message);
true ->
ok
end,
{ok, Message}.
send_to_matched_egress_bridges(Topic, Msg) ->
MatchedBridgeIds = get_matched_egress_bridges(Topic),
lists:foreach(
fun(Id) ->
try send_message(Id, Msg) of
{error, Reason} ->
?SLOG(error, #{
msg => "send_message_to_bridge_failed",
bridge => Id,
error => Reason
});
_ ->
ok
catch
throw:Reason ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
reason => emqx_utils:redact(Reason)
});
Err:Reason:ST ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
error => Err,
reason => emqx_utils:redact(Reason),
stacktrace => emqx_utils:redact(ST)
})
end
end,
MatchedBridgeIds
).
send_to_matched_egress_bridges(Topic, Message) ->
case get_matched_egress_bridges(Topic) of
[] ->
ok;
Ids ->
{Msg, _} = emqx_rule_events:eventmsg_publish(Message),
send_to_matched_egress_bridges_loop(Topic, Msg, Ids)
end.
send_to_matched_egress_bridges_loop(_Topic, _Msg, []) ->
ok;
send_to_matched_egress_bridges_loop(Topic, Msg, [Id | Ids]) ->
try send_message(Id, Msg) of
{error, Reason} ->
?SLOG(error, #{
msg => "send_message_to_bridge_failed",
bridge => Id,
error => Reason
});
_ ->
ok
catch
throw:Reason ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
reason => emqx_utils:redact(Reason)
});
Err:Reason:ST ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
error => Err,
reason => emqx_utils:redact(Reason),
stacktrace => emqx_utils:redact(ST)
})
end,
send_to_matched_egress_bridges_loop(Topic, Msg, Ids).
send_message(BridgeId, Message) ->
{BridgeType, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId),
@ -571,6 +576,7 @@ flatten_confs(Conf0) ->
do_flatten_confs(Type, Conf0) ->
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
%% TODO: create a topic index for this
get_matched_egress_bridges(Topic) ->
Bridges = emqx:get_config([bridges], #{}),
maps:fold(

View File

@ -387,6 +387,7 @@ schema("/bridges/:id/enable/:enable") ->
responses =>
#{
204 => <<"Success">>,
400 => error_schema('BAD_REQUEST', non_compat_bridge_msg()),
404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
@ -507,7 +508,7 @@ schema("/bridges_probe") ->
case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of
<<"true">> -> [rule_actions, connector];
true -> [rule_actions, connector];
_ -> []
_ -> [connector]
end,
case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDelete) of
ok ->
@ -529,7 +530,7 @@ schema("/bridges_probe") ->
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST('ALREADY_EXISTS', non_compat_bridge_msg())
?BAD_REQUEST(non_compat_bridge_msg())
end
).
@ -667,6 +668,10 @@ get_metrics_from_local_node(BridgeType0, BridgeName) ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST(non_compat_bridge_msg());
{error, bridge_not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end
@ -747,7 +752,7 @@ is_bridge_enabled_v1(BridgeType, BridgeName) ->
is_bridge_enabled_v2(BridgeV1Type, BridgeName) ->
BridgeV2Type = emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
try emqx:get_config([bridges_v2, BridgeV2Type, binary_to_existing_atom(BridgeName)]) of
try emqx:get_config([actions, BridgeV2Type, binary_to_existing_atom(BridgeName)]) of
ConfMap ->
maps:get(enable, ConfMap, true)
catch
@ -895,7 +900,7 @@ format_resource(
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
%% The defaults are already filled in
RawConf;
downgrade_raw_conf(Type, RawConf);
false ->
fill_defaults(Type, RawConf)
end,
@ -1073,7 +1078,7 @@ call_operation(NodeOrAll, OperFunc, Args = [_Nodes, BridgeType, BridgeName]) ->
?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>);
{error, {unhealthy_target, Message}} ->
?BAD_REQUEST(Message);
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
{error, Reason} ->
?BAD_REQUEST(redact(Reason))
end.
@ -1159,3 +1164,19 @@ upgrade_type(Type) ->
downgrade_type(Type) ->
emqx_bridge_lib:downgrade_type(Type).
%% TODO: move it to callback
downgrade_raw_conf(kafka_producer, RawConf) ->
rename(<<"parameters">>, <<"kafka">>, RawConf);
downgrade_raw_conf(azure_event_hub_producer, RawConf) ->
rename(<<"parameters">>, <<"kafka">>, RawConf);
downgrade_raw_conf(_Type, RawConf) ->
RawConf.
rename(OldKey, NewKey, Map) ->
case maps:find(OldKey, Map) of
{ok, Value} ->
maps:remove(OldKey, maps:put(NewKey, Value, Map));
error ->
Map
end.

View File

@ -102,21 +102,15 @@ bridge_id(BridgeType, BridgeName) ->
<<Type/binary, ":", Name/binary>>.
parse_bridge_id(BridgeId) ->
parse_bridge_id(BridgeId, #{atom_name => true}).
parse_bridge_id(bin(BridgeId), #{atom_name => true}).
-spec parse_bridge_id(list() | binary() | atom(), #{atom_name => boolean()}) ->
-spec parse_bridge_id(binary() | atom(), #{atom_name => boolean()}) ->
{atom(), atom() | binary()}.
parse_bridge_id(<<"bridge:", ID/binary>>, Opts) ->
parse_bridge_id(ID, Opts);
parse_bridge_id(BridgeId, Opts) ->
case string:split(bin(BridgeId), ":", all) of
[Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
[Bridge, Type, Name] when Bridge =:= <<"bridge">>; Bridge =:= "bridge" ->
{to_type_atom(Type), validate_name(Name, Opts)};
_ ->
invalid_data(
<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>
)
end.
{Type, Name} = emqx_resource:parse_resource_id(BridgeId, Opts),
{emqx_bridge_lib:upgrade_type(Type), Name}.
bridge_hookpoint(BridgeId) ->
<<"$bridges/", (bin(BridgeId))/binary>>.
@ -126,48 +120,9 @@ bridge_hookpoint_to_bridge_id(?BRIDGE_HOOKPOINT(BridgeId)) ->
bridge_hookpoint_to_bridge_id(_) ->
{error, bad_bridge_hookpoint}.
validate_name(Name0, Opts) ->
Name = unicode:characters_to_list(Name0, utf8),
case is_list(Name) andalso Name =/= [] of
true ->
case lists:all(fun is_id_char/1, Name) of
true ->
case maps:get(atom_name, Opts, true) of
% NOTE
% Rule may be created before bridge, thus not `list_to_existing_atom/1`,
% also it is infrequent user input anyway.
true -> list_to_atom(Name);
false -> Name0
end;
false ->
invalid_data(<<"bad name: ", Name0/binary>>)
end;
false ->
invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end.
-spec invalid_data(binary()) -> no_return().
invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true;
is_id_char(C) when C >= $A andalso C =< $Z -> true;
is_id_char($_) -> true;
is_id_char($-) -> true;
is_id_char($.) -> true;
is_id_char(_) -> false.
to_type_atom(<<"kafka">>) ->
%% backward compatible
kafka_producer;
to_type_atom(Type) ->
try
erlang:binary_to_existing_atom(Type, utf8)
catch
_:_ ->
invalid_data(<<"unknown bridge type: ", Type/binary>>)
end.
reset_metrics(ResourceId) ->
%% TODO we should not create atoms here
{Type, Name} = parse_bridge_id(ResourceId),

View File

@ -24,7 +24,9 @@
-include_lib("emqx_resource/include/emqx_resource.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(ROOT_KEY, bridges_v2).
%% Note: this is strange right now, because it lives in `emqx_bridge_v2', but it shall be
%% refactored into a new module/application with appropriate name.
-define(ROOT_KEY, actions).
%% Loading and unloading config when EMQX starts and stops
-export([
@ -38,7 +40,11 @@
list/0,
lookup/2,
create/3,
remove/2
remove/2,
%% The following is the remove function that is called by the HTTP API
%% It also checks for rule action dependencies and optionally removes
%% them
check_deps_and_remove/3
]).
%% Operations
@ -173,20 +179,24 @@ lookup(Type, Name) ->
Channels = maps:get(added_channels, InstanceData, #{}),
BridgeV2Id = id(Type, Name, BridgeConnector),
ChannelStatus = maps:get(BridgeV2Id, Channels, undefined),
DisplayBridgeV2Status =
{DisplayBridgeV2Status, ErrorMsg} =
case ChannelStatus of
{error, undefined} -> <<"Unknown reason">>;
{error, Reason} -> emqx_utils:readable_error_msg(Reason);
connected -> <<"connected">>;
connecting -> <<"connecting">>;
Error -> emqx_utils:readable_error_msg(Error)
#{status := connected} ->
{connected, <<"">>};
#{status := Status, error := undefined} ->
{Status, <<"Unknown reason">>};
#{status := Status, error := Error} ->
{Status, emqx_utils:readable_error_msg(Error)};
undefined ->
{disconnected, <<"Pending installation">>}
end,
{ok, #{
type => Type,
name => Name,
raw_config => RawConf,
resource_data => InstanceData,
status => DisplayBridgeV2Status
status => DisplayBridgeV2Status,
error => ErrorMsg
}}
end.
@ -227,6 +237,25 @@ remove(BridgeType, BridgeName) ->
{error, Reason} -> {error, Reason}
end.
check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActions) ->
AlsoDelete =
case AlsoDeleteActions of
true -> [rule_actions];
false -> []
end,
case
emqx_bridge_lib:maybe_withdraw_rule_action(
BridgeType,
BridgeName,
AlsoDelete
)
of
ok ->
remove(BridgeType, BridgeName);
{error, Reason} ->
{error, Reason}
end.
%%--------------------------------------------------------------------
%% Helpers for CRUD API
%%--------------------------------------------------------------------
@ -407,39 +436,71 @@ disable_enable(Action, BridgeType, BridgeName) when
%% Manually start connector. This function can speed up reconnection when
%% waiting for auto reconnection. The function forwards the start request to
%% its connector.
%% its connector. Returns ok if the status of the bridge is connected after
%% starting the connector. Returns {error, Reason} if the status of the bridge
%% is something else than connected after starting the connector or if an
%% error occurred when the connector was started.
-spec start(term(), term()) -> ok | {error, Reason :: term()}.
start(BridgeV2Type, Name) ->
ConnectorOpFun = fun(ConnectorType, ConnectorName) ->
emqx_connector_resource:start(ConnectorType, ConnectorName)
end,
connector_operation_helper(BridgeV2Type, Name, ConnectorOpFun).
connector_operation_helper(BridgeV2Type, Name, ConnectorOpFun, true).
connector_operation_helper(BridgeV2Type, Name, ConnectorOpFun) ->
connector_operation_helper(BridgeV2Type, Name, ConnectorOpFun, DoHealthCheck) ->
connector_operation_helper_with_conf(
BridgeV2Type,
Name,
lookup_conf(BridgeV2Type, Name),
ConnectorOpFun
ConnectorOpFun,
DoHealthCheck
).
connector_operation_helper_with_conf(
_BridgeV2Type,
_Name,
{error, bridge_not_found} = Error,
_ConnectorOpFun
_ConnectorOpFun,
_DoHealthCheck
) ->
Error;
connector_operation_helper_with_conf(
_BridgeV2Type,
_Name,
#{enable := false},
_ConnectorOpFun
_ConnectorOpFun,
_DoHealthCheck
) ->
ok;
connector_operation_helper_with_conf(
BridgeV2Type,
Name,
#{connector := ConnectorName},
ConnectorOpFun
ConnectorOpFun,
DoHealthCheck
) ->
ConnectorType = connector_type(BridgeV2Type),
ConnectorOpFun(ConnectorType, ConnectorName).
ConnectorOpFunResult = ConnectorOpFun(ConnectorType, ConnectorName),
case {DoHealthCheck, ConnectorOpFunResult} of
{false, _} ->
ConnectorOpFunResult;
{true, {error, Reason}} ->
{error, Reason};
{true, ok} ->
case health_check(BridgeV2Type, Name) of
#{status := connected} ->
ok;
{error, Reason} ->
{error, Reason};
#{status := Status, error := Reason} ->
Msg = io_lib:format(
"Connector started but bridge (~s:~s) is not connected. "
"Bridge Status: ~p, Error: ~p",
[bin(BridgeV2Type), bin(Name), Status, Reason]
),
{error, iolist_to_binary(Msg)}
end
end.
reset_metrics(Type, Name) ->
reset_metrics_helper(Type, Name, lookup_conf(Type, Name)).
@ -476,16 +537,21 @@ do_send_msg_with_enabled_config(
BridgeType, BridgeName, Message, QueryOpts0, Config
) ->
QueryMode = get_query_mode(BridgeType, Config),
ConnectorName = maps:get(connector, Config),
ConnectorResId = emqx_connector_resource:resource_id(BridgeType, ConnectorName),
QueryOpts = maps:merge(
emqx_bridge:query_opts(Config),
QueryOpts0#{
query_mode => QueryMode,
query_mode_cache_override => false
connector_resource_id => ConnectorResId,
query_mode => QueryMode
}
),
BridgeV2Id = id(BridgeType, BridgeName),
emqx_resource:query(BridgeV2Id, {BridgeV2Id, Message}, QueryOpts).
-spec health_check(BridgeType :: term(), BridgeName :: term()) ->
#{status := term(), error := term()} | {error, Reason :: term()}.
health_check(BridgeType, BridgeName) ->
case lookup_conf(BridgeType, BridgeName) of
#{
@ -526,10 +592,10 @@ create_dry_run_helper(BridgeType, ConnectorRawConf, BridgeV2RawConf) ->
ConnectorId, ChannelTestId
),
case HealthCheckResult of
{error, Reason} ->
{error, Reason};
_ ->
ok
#{status := connected} ->
ok;
#{status := Status, error := Error} ->
{error, {Status, Error}}
end
end
end,
@ -538,7 +604,7 @@ create_dry_run_helper(BridgeType, ConnectorRawConf, BridgeV2RawConf) ->
create_dry_run(Type, Conf0) ->
Conf1 = maps:without([<<"name">>], Conf0),
TypeBin = bin(Type),
RawConf = #{<<"bridges_v2">> => #{TypeBin => #{<<"temp_name">> => Conf1}}},
RawConf = #{<<"actions">> => #{TypeBin => #{<<"temp_name">> => Conf1}}},
%% Check config
try
_ =
@ -679,7 +745,7 @@ parse_id(Id) ->
case binary:split(Id, <<":">>, [global]) of
[Type, Name] ->
{Type, Name};
[<<"bridge_v2">>, Type, Name | _] ->
[<<"action">>, Type, Name | _] ->
{Type, Name};
_X ->
error({error, iolist_to_binary(io_lib:format("Invalid id: ~p", [Id]))})
@ -723,7 +789,7 @@ id(BridgeType, BridgeName) ->
id(BridgeType, BridgeName, ConnectorName) ->
ConnectorType = bin(connector_type(BridgeType)),
<<"bridge_v2:", (bin(BridgeType))/binary, ":", (bin(BridgeName))/binary, ":connector:",
<<"action:", (bin(BridgeType))/binary, ":", (bin(BridgeName))/binary, ":connector:",
(bin(ConnectorType))/binary, ":", (bin(ConnectorName))/binary>>.
connector_type(Type) ->
@ -745,8 +811,8 @@ bridge_v2_type_to_connector_type(azure_event_hub_producer) ->
%%====================================================================
import_config(RawConf) ->
%% bridges v2 structure
emqx_bridge:import_config(RawConf, <<"bridges_v2">>, ?ROOT_KEY, config_key_path()).
%% actions structure
emqx_bridge:import_config(RawConf, <<"actions">>, ?ROOT_KEY, config_key_path()).
%%====================================================================
%% Config Update Handler API
@ -771,8 +837,8 @@ pre_config_update(_Path, Conf, _OldConfig) when is_map(Conf) ->
operation_to_enable(disable) -> false;
operation_to_enable(enable) -> true.
%% This top level handler will be triggered when the bridges_v2 path is updated
%% with calls to emqx_conf:update([bridges_v2], BridgesConf, #{}).
%% This top level handler will be triggered when the actions path is updated
%% with calls to emqx_conf:update([actions], BridgesConf, #{}).
%%
%% A public API that can trigger this is:
%% bin/emqx ctl conf load data/configs/cluster.hocon
@ -939,7 +1005,7 @@ unpack_bridge_conf(Type, PackedConf, TopLevelConf) ->
%% Check if the bridge can be converted to a valid bridge v1
%%
%% * The corresponding bridge v2 should exist
%% * The connector for the bridge v2 should have exactly on channel
%% * The connector for the bridge v2 should have exactly one channel
is_valid_bridge_v1(BridgeV1Type, BridgeName) ->
BridgeV2Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
case lookup_conf(BridgeV2Type, BridgeName) of
@ -986,7 +1052,7 @@ list_and_transform_to_bridge_v1() ->
[B || B <- Bridges, B =/= not_bridge_v1_compatible_error()].
lookup_and_transform_to_bridge_v1(BridgeV1Type, Name) ->
case is_valid_bridge_v1(BridgeV1Type, Name) of
case ?MODULE:is_valid_bridge_v1(BridgeV1Type, Name) of
true ->
Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
case lookup(Type, Name) of
@ -1024,7 +1090,7 @@ lookup_and_transform_to_bridge_v1_helper(
BridgeV2RawConfig2 = fill_defaults(
BridgeV2Type,
BridgeV2RawConfig1,
<<"bridges_v2">>,
<<"actions">>,
emqx_bridge_v2_schema
),
BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2),
@ -1032,6 +1098,7 @@ lookup_and_transform_to_bridge_v1_helper(
BridgeV1Tmp = maps:put(raw_config, BridgeV1Config2, BridgeV2),
BridgeV1 = maps:remove(status, BridgeV1Tmp),
BridgeV2Status = maps:get(status, BridgeV2, undefined),
BridgeV2Error = maps:get(error, BridgeV2, undefined),
ResourceData1 = maps:get(resource_data, BridgeV1, #{}),
%% Replace id in resouce data
BridgeV1Id = <<"bridge:", (bin(BridgeV1Type))/binary, ":", (bin(BridgeName))/binary>>,
@ -1040,12 +1107,12 @@ lookup_and_transform_to_bridge_v1_helper(
case ConnectorStatus of
connected ->
case BridgeV2Status of
<<"connected">> ->
connected ->
%% No need to modify the status
{ok, BridgeV1#{resource_data => ResourceData2}};
NotConnected ->
ResourceData3 = maps:put(status, connecting, ResourceData2),
ResourceData4 = maps:put(error, NotConnected, ResourceData3),
ResourceData3 = maps:put(status, NotConnected, ResourceData2),
ResourceData4 = maps:put(error, BridgeV2Error, ResourceData3),
BridgeV1Final = maps:put(resource_data, ResourceData4, BridgeV1),
{ok, BridgeV1Final}
end;
@ -1068,21 +1135,29 @@ split_bridge_v1_config_and_create(BridgeV1Type, BridgeName, RawConf) ->
case lookup_conf(BridgeV2Type, BridgeName) of
{error, _} ->
%% If the bridge v2 does not exist, it is a valid bridge v1
split_bridge_v1_config_and_create_helper(BridgeV1Type, BridgeName, RawConf);
PreviousRawConf = undefined,
split_bridge_v1_config_and_create_helper(
BridgeV1Type, BridgeName, RawConf, PreviousRawConf
);
_Conf ->
case is_valid_bridge_v1(BridgeV1Type, BridgeName) of
case ?MODULE:is_valid_bridge_v1(BridgeV1Type, BridgeName) of
true ->
%% Using remove + create as update, hence do not delete deps.
RemoveDeps = [],
PreviousRawConf = emqx:get_raw_config(
[?ROOT_KEY, BridgeV2Type, BridgeName], undefined
),
bridge_v1_check_deps_and_remove(BridgeV1Type, BridgeName, RemoveDeps),
split_bridge_v1_config_and_create_helper(BridgeV1Type, BridgeName, RawConf);
split_bridge_v1_config_and_create_helper(
BridgeV1Type, BridgeName, RawConf, PreviousRawConf
);
false ->
%% If the bridge v2 exists, it is not a valid bridge v1
{error, non_compatible_bridge_v2_exists}
end
end.
split_bridge_v1_config_and_create_helper(BridgeV1Type, BridgeName, RawConf) ->
split_bridge_v1_config_and_create_helper(BridgeV1Type, BridgeName, RawConf, PreviousRawConf) ->
#{
connector_type := ConnectorType,
connector_name := NewConnectorName,
@ -1091,16 +1166,14 @@ split_bridge_v1_config_and_create_helper(BridgeV1Type, BridgeName, RawConf) ->
bridge_v2_name := BridgeName,
bridge_v2_conf := NewBridgeV2RawConf
} =
split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf),
%% TODO should we really create an atom here?
ConnectorNameAtom = binary_to_atom(NewConnectorName),
case emqx_connector:create(ConnectorType, ConnectorNameAtom, NewConnectorRawConf) of
split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf, PreviousRawConf),
case emqx_connector:create(ConnectorType, NewConnectorName, NewConnectorRawConf) of
{ok, _} ->
case create(BridgeType, BridgeName, NewBridgeV2RawConf) of
{ok, _} = Result ->
Result;
{error, Reason1} ->
case emqx_connector:remove(ConnectorType, ConnectorNameAtom) of
case emqx_connector:remove(ConnectorType, NewConnectorName) of
ok ->
{error, Reason1};
{error, Reason2} ->
@ -1118,14 +1191,14 @@ split_bridge_v1_config_and_create_helper(BridgeV1Type, BridgeName, RawConf) ->
Error
end.
split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf) ->
split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf, PreviousRawConf) ->
%% Create fake global config for the transformation and then call
%% emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2/1
%% `emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2/1'
BridgeV2Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
ConnectorType = connector_type(BridgeV2Type),
%% Needed so name confligts will ba avoided
%% Needed to avoid name conflicts
CurrentConnectorsConfig = emqx:get_raw_config([connectors], #{}),
FakeGlobalConfig = #{
FakeGlobalConfig0 = #{
<<"connectors">> => CurrentConnectorsConfig,
<<"bridges">> => #{
bin(BridgeV1Type) => #{
@ -1133,6 +1206,13 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf) ->
}
}
},
FakeGlobalConfig =
emqx_utils_maps:put_if(
FakeGlobalConfig0,
bin(?ROOT_KEY),
#{bin(BridgeV2Type) => #{bin(BridgeName) => PreviousRawConf}},
PreviousRawConf =/= undefined
),
Output = emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(
FakeGlobalConfig
),
@ -1145,34 +1225,21 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf) ->
],
Output
),
ConnectorsBefore =
maps:keys(
emqx_utils_maps:deep_get(
[
<<"connectors">>,
bin(ConnectorType)
],
FakeGlobalConfig,
#{}
)
),
ConnectorsAfter =
maps:keys(
emqx_utils_maps:deep_get(
[
<<"connectors">>,
bin(ConnectorType)
],
Output
)
),
[NewConnectorName] = ConnectorsAfter -- ConnectorsBefore,
ConnectorName = emqx_utils_maps:deep_get(
[
bin(?ROOT_KEY),
bin(BridgeV2Type),
bin(BridgeName),
<<"connector">>
],
Output
),
NewConnectorRawConf =
emqx_utils_maps:deep_get(
[
<<"connectors">>,
bin(ConnectorType),
bin(NewConnectorName)
bin(ConnectorName)
],
Output
),
@ -1180,10 +1247,10 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf) ->
NewFakeGlobalConfig = #{
<<"connectors">> => #{
bin(ConnectorType) => #{
bin(NewConnectorName) => NewConnectorRawConf
bin(ConnectorName) => NewConnectorRawConf
}
},
<<"bridges_v2">> => #{
<<"actions">> => #{
bin(BridgeV2Type) => #{
bin(BridgeName) => NewBridgeV2RawConf
}
@ -1199,7 +1266,7 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf) ->
_ ->
#{
connector_type => ConnectorType,
connector_name => NewConnectorName,
connector_name => ConnectorName,
connector_conf => NewConnectorRawConf,
bridge_v2_type => BridgeV2Type,
bridge_v2_name => BridgeName,
@ -1214,6 +1281,7 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf) ->
bridge_v1_create_dry_run(BridgeType, RawConfig0) ->
RawConf = maps:without([<<"name">>], RawConfig0),
TmpName = iolist_to_binary([?TEST_ID_PREFIX, emqx_utils:gen_id(8)]),
PreviousRawConf = undefined,
#{
connector_type := _ConnectorType,
connector_name := _NewConnectorName,
@ -1221,7 +1289,7 @@ bridge_v1_create_dry_run(BridgeType, RawConfig0) ->
bridge_v2_type := BridgeV2Type,
bridge_v2_name := _BridgeName,
bridge_v2_conf := BridgeV2RawConf
} = split_and_validate_bridge_v1_config(BridgeType, TmpName, RawConf),
} = split_and_validate_bridge_v1_config(BridgeType, TmpName, RawConf, PreviousRawConf),
create_dry_run_helper(BridgeV2Type, ConnectorRawConf, BridgeV2RawConf).
bridge_v1_check_deps_and_remove(BridgeV1Type, BridgeName, RemoveDeps) ->
@ -1336,28 +1404,30 @@ bridge_v1_restart(BridgeV1Type, Name) ->
ConnectorOpFun = fun(ConnectorType, ConnectorName) ->
emqx_connector_resource:restart(ConnectorType, ConnectorName)
end,
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun).
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun, true).
bridge_v1_stop(BridgeV1Type, Name) ->
ConnectorOpFun = fun(ConnectorType, ConnectorName) ->
emqx_connector_resource:stop(ConnectorType, ConnectorName)
end,
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun).
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun, false).
bridge_v1_start(BridgeV1Type, Name) ->
ConnectorOpFun = fun(ConnectorType, ConnectorName) ->
emqx_connector_resource:start(ConnectorType, ConnectorName)
end,
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun).
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun, true).
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun) ->
bridge_v1_operation_helper(BridgeV1Type, Name, ConnectorOpFun, DoHealthCheck) ->
BridgeV2Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
case emqx_bridge_v2:is_valid_bridge_v1(BridgeV1Type, Name) of
true ->
connector_operation_helper_with_conf(
BridgeV2Type,
Name,
lookup_conf(BridgeV2Type, Name),
ConnectorOpFun
ConnectorOpFun,
DoHealthCheck
);
false ->
{error, not_bridge_v1_compatible}
@ -1373,10 +1443,10 @@ bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
extract_connector_id_from_bridge_v2_id(Id) ->
case binary:split(Id, <<":">>, [global]) of
[<<"bridge_v2">>, _Type, _Name, <<"connector">>, ConnectorType, ConnecorName] ->
[<<"action">>, _Type, _Name, <<"connector">>, ConnectorType, ConnecorName] ->
<<"connector:", ConnectorType/binary, ":", ConnecorName/binary>>;
_X ->
error({error, iolist_to_binary(io_lib:format("Invalid bridge V2 ID: ~p", [Id]))})
error({error, iolist_to_binary(io_lib:format("Invalid action ID: ~p", [Id]))})
end.
to_existing_atom(X) ->

View File

@ -35,12 +35,12 @@
%% API callbacks
-export([
'/bridges_v2'/2,
'/bridges_v2/:id'/2,
'/bridges_v2/:id/enable/:enable'/2,
'/bridges_v2/:id/:operation'/2,
'/nodes/:node/bridges_v2/:id/:operation'/2,
'/bridges_v2_probe'/2
'/actions'/2,
'/actions/:id'/2,
'/actions/:id/enable/:enable'/2,
'/actions/:id/:operation'/2,
'/nodes/:node/actions/:id/:operation'/2,
'/actions_probe'/2
]).
%% BpAPI
@ -67,19 +67,19 @@
end
).
namespace() -> "bridge_v2".
namespace() -> "actions".
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
paths() ->
[
"/bridges_v2",
"/bridges_v2/:id",
"/bridges_v2/:id/enable/:enable",
"/bridges_v2/:id/:operation",
"/nodes/:node/bridges_v2/:id/:operation",
"/bridges_v2_probe"
"/actions",
"/actions/:id",
"/actions/:id/enable/:enable",
"/actions/:id/:operation",
"/nodes/:node/actions/:id/:operation",
"/actions_probe"
].
error_schema(Code, Message) when is_atom(Code) ->
@ -123,6 +123,18 @@ param_path_id() ->
}
)}.
param_qs_delete_cascade() ->
{also_delete_dep_actions,
mk(
boolean(),
#{
in => query,
required => false,
default => false,
desc => ?DESC("desc_qs_also_delete_dep_actions")
}
)}.
param_path_operation_cluster() ->
{operation,
mk(
@ -171,11 +183,11 @@ param_path_enable() ->
}
)}.
schema("/bridges_v2") ->
schema("/actions") ->
#{
'operationId' => '/bridges_v2',
'operationId' => '/actions',
get => #{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
summary => <<"List bridges">>,
description => ?DESC("desc_api1"),
responses => #{
@ -186,7 +198,7 @@ schema("/bridges_v2") ->
}
},
post => #{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
summary => <<"Create bridge">>,
description => ?DESC("desc_api2"),
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
@ -199,11 +211,11 @@ schema("/bridges_v2") ->
}
}
};
schema("/bridges_v2/:id") ->
schema("/actions/:id") ->
#{
'operationId' => '/bridges_v2/:id',
'operationId' => '/actions/:id',
get => #{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
summary => <<"Get bridge">>,
description => ?DESC("desc_api3"),
parameters => [param_path_id()],
@ -213,7 +225,7 @@ schema("/bridges_v2/:id") ->
}
},
put => #{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
summary => <<"Update bridge">>,
description => ?DESC("desc_api4"),
parameters => [param_path_id()],
@ -228,10 +240,10 @@ schema("/bridges_v2/:id") ->
}
},
delete => #{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
summary => <<"Delete bridge">>,
description => ?DESC("desc_api5"),
parameters => [param_path_id()],
parameters => [param_path_id(), param_qs_delete_cascade()],
responses => #{
204 => <<"Bridge deleted">>,
400 => error_schema(
@ -243,12 +255,12 @@ schema("/bridges_v2/:id") ->
}
}
};
schema("/bridges_v2/:id/enable/:enable") ->
schema("/actions/:id/enable/:enable") ->
#{
'operationId' => '/bridges_v2/:id/enable/:enable',
'operationId' => '/actions/:id/enable/:enable',
put =>
#{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
summary => <<"Enable or disable bridge">>,
desc => ?DESC("desc_enable_bridge"),
parameters => [param_path_id(), param_path_enable()],
@ -262,11 +274,11 @@ schema("/bridges_v2/:id/enable/:enable") ->
}
}
};
schema("/bridges_v2/:id/:operation") ->
schema("/actions/:id/:operation") ->
#{
'operationId' => '/bridges_v2/:id/:operation',
'operationId' => '/actions/:id/:operation',
post => #{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
summary => <<"Manually start a bridge">>,
description => ?DESC("desc_api7"),
parameters => [
@ -284,12 +296,12 @@ schema("/bridges_v2/:id/:operation") ->
}
}
};
schema("/nodes/:node/bridges_v2/:id/:operation") ->
schema("/nodes/:node/actions/:id/:operation") ->
#{
'operationId' => '/nodes/:node/bridges_v2/:id/:operation',
'operationId' => '/nodes/:node/actions/:id/:operation',
post => #{
tags => [<<"bridges_v2">>],
summary => <<"Manually start a bridge">>,
tags => [<<"actions">>],
summary => <<"Manually start a bridge on a given node">>,
description => ?DESC("desc_api8"),
parameters => [
param_path_node(),
@ -310,11 +322,11 @@ schema("/nodes/:node/bridges_v2/:id/:operation") ->
}
}
};
schema("/bridges_v2_probe") ->
schema("/actions_probe") ->
#{
'operationId' => '/bridges_v2_probe',
'operationId' => '/actions_probe',
post => #{
tags => [<<"bridges_v2">>],
tags => [<<"actions">>],
desc => ?DESC("desc_api9"),
summary => <<"Test creating bridge">>,
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
@ -328,7 +340,7 @@ schema("/bridges_v2_probe") ->
}
}.
'/bridges_v2'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) ->
'/actions'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) ->
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
?BAD_REQUEST('ALREADY_EXISTS', <<"bridge already exists">>);
@ -336,7 +348,7 @@ schema("/bridges_v2_probe") ->
Conf = filter_out_request_body(Conf0),
create_bridge(BridgeType, BridgeName, Conf)
end;
'/bridges_v2'(get, _Params) ->
'/actions'(get, _Params) ->
Nodes = mria:running_nodes(),
NodeReplies = emqx_bridge_proto_v5:v2_list_bridges_on_nodes(Nodes),
case is_ok(NodeReplies) of
@ -350,9 +362,9 @@ schema("/bridges_v2_probe") ->
?INTERNAL_ERROR(Reason)
end.
'/bridges_v2/:id'(get, #{bindings := #{id := Id}}) ->
'/actions/:id'(get, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, lookup_from_all_nodes(BridgeType, BridgeName, 200));
'/bridges_v2/:id'(put, #{bindings := #{id := Id}, body := Conf0}) ->
'/actions/:id'(put, #{bindings := #{id := Id}, body := Conf0}) ->
Conf1 = filter_out_request_body(Conf0),
?TRY_PARSE_ID(
Id,
@ -365,19 +377,35 @@ schema("/bridges_v2_probe") ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
end
);
'/bridges_v2/:id'(delete, #{bindings := #{id := Id}}) ->
'/actions/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) ->
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
case emqx_bridge_v2:remove(BridgeType, BridgeName) of
AlsoDeleteActions =
case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of
<<"true">> -> true;
true -> true;
_ -> false
end,
case
emqx_bridge_v2:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActions)
of
ok ->
?NO_CONTENT;
{error, {active_channels, Channels}} ->
?BAD_REQUEST(
{<<"Cannot delete bridge while there are active channels defined for this bridge">>,
Channels}
);
{error, #{
reason := rules_depending_on_this_bridge,
rule_ids := RuleIds
}} ->
RuleIdLists = [binary_to_list(iolist_to_binary(X)) || X <- RuleIds],
RulesStr = string:join(RuleIdLists, ", "),
Msg = io_lib:format(
"Cannot delete bridge while active rules are depending on it: ~s\n"
"Append ?also_delete_dep_actions=true to the request URL to delete "
"rule actions that depend on this bridge as well.",
[RulesStr]
),
?BAD_REQUEST(iolist_to_binary(Msg));
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
@ -388,13 +416,13 @@ schema("/bridges_v2_probe") ->
end
).
'/bridges_v2/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
'/actions/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:disable_enable(enable_func(Enable), BridgeType, BridgeName) of
{ok, _} ->
?NO_CONTENT;
{error, {pre_config_update, _, not_found}} ->
{error, {pre_config_update, _, bridge_not_found}} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, {_, _, timeout}} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
@ -405,7 +433,7 @@ schema("/bridges_v2_probe") ->
end
).
'/bridges_v2/:id/:operation'(post, #{
'/actions/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op}
}) ->
@ -418,7 +446,7 @@ schema("/bridges_v2_probe") ->
end
).
'/nodes/:node/bridges_v2/:id/:operation'(post, #{
'/nodes/:node/actions/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op, node := Node}
}) ->
@ -433,8 +461,8 @@ schema("/bridges_v2_probe") ->
end
).
'/bridges_v2_probe'(post, Request) ->
RequestMeta = #{module => ?MODULE, method => post, path => "/bridges_v2_probe"},
'/actions_probe'(post, Request) ->
RequestMeta = #{module => ?MODULE, method => post, path => "/actions_probe"},
case emqx_dashboard_swagger:filter_check_request_and_translate_body(Request, RequestMeta) of
{ok, #{body := #{<<"type">> := ConnType} = Params}} ->
Params1 = maybe_deobfuscate_bridge_probe(Params),
@ -578,9 +606,7 @@ call_operation(NodeOrAll, OperFunc, Args = [_Nodes, BridgeType, BridgeName]) ->
?SERVICE_UNAVAILABLE(<<"Bridge not found on remote node: ", BridgeId/binary>>);
{error, {node_not_found, Node}} ->
?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>);
{error, {unhealthy_target, Message}} ->
?BAD_REQUEST(Message);
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
{error, Reason} ->
?BAD_REQUEST(redact(Reason))
end.
@ -716,10 +742,31 @@ update_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200).
create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
Check =
try
is_binary(BridgeType) andalso emqx_resource:validate_type(BridgeType),
ok = emqx_resource:validate_name(BridgeName)
catch
throw:Error ->
?BAD_REQUEST(map_to_json(Error))
end,
case Check of
ok ->
do_create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode);
BadRequest ->
BadRequest
end.
do_create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
case emqx_bridge_v2:create(BridgeType, BridgeName, Conf) of
{ok, _} ->
lookup_from_all_nodes(BridgeType, BridgeName, HttpStatusCode);
{error, Reason} when is_map(Reason) ->
{error, {PreOrPostConfigUpdate, _HandlerMod, Reason}} when
PreOrPostConfigUpdate =:= pre_config_update;
PreOrPostConfigUpdate =:= post_config_update
->
?BAD_REQUEST(map_to_json(redact(Reason)));
{error, Reason} ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.

View File

@ -31,24 +31,24 @@ schema_modules() ->
emqx_bridge_azure_event_hub
].
fields(bridges_v2) ->
bridge_v2_structs().
fields(actions) ->
action_structs().
bridge_v2_structs() ->
action_structs() ->
[
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, kafka_producer_action)),
#{
desc => <<"Kafka Producer Bridge V2 Config">>,
desc => <<"Kafka Producer Actions Config">>,
required => false
}
)},
{azure_event_hub_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, bridge_v2)),
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, actions)),
#{
desc => <<"Azure Event Hub Bridge V2 Config">>,
desc => <<"Azure Event Hub Actions Config">>,
required => false
}
)}

View File

@ -18,6 +18,7 @@
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("eunit/include/eunit.hrl").
-import(hoconsc, [mk/2, ref/2]).
@ -29,6 +30,8 @@
post_request/0
]).
-export([enterprise_api_schemas/1]).
-if(?EMQX_RELEASE_EDITION == ee).
enterprise_api_schemas(Method) ->
%% We *must* do this to ensure the module is really loaded, especially when we use
@ -45,7 +48,7 @@ enterprise_fields_actions() ->
_ = emqx_bridge_v2_enterprise:module_info(),
case erlang:function_exported(emqx_bridge_v2_enterprise, fields, 1) of
true ->
emqx_bridge_v2_enterprise:fields(bridges_v2);
emqx_bridge_v2_enterprise:fields(actions);
false ->
[]
end.
@ -70,7 +73,7 @@ post_request() ->
api_schema("post").
api_schema(Method) ->
EE = enterprise_api_schemas(Method),
EE = ?MODULE:enterprise_api_schemas(Method),
hoconsc:union(bridge_api_union(EE)).
bridge_api_union(Refs) ->
@ -100,28 +103,69 @@ bridge_api_union(Refs) ->
%% HOCON Schema Callbacks
%%======================================================================================
namespace() -> "bridges_v2".
namespace() -> "actions".
tags() ->
[<<"Bridge V2">>].
[<<"Actions">>].
-dialyzer({nowarn_function, roots/0}).
roots() ->
case fields(bridges_v2) of
case fields(actions) of
[] ->
[
{bridges_v2,
{actions,
?HOCON(hoconsc:map(name, typerefl:map()), #{importance => ?IMPORTANCE_LOW})}
];
_ ->
[{bridges_v2, ?HOCON(?R_REF(bridges_v2), #{importance => ?IMPORTANCE_LOW})}]
[{actions, ?HOCON(?R_REF(actions), #{importance => ?IMPORTANCE_LOW})}]
end.
fields(bridges_v2) ->
fields(actions) ->
[] ++ enterprise_fields_actions().
desc(bridges_v2) ->
desc(actions) ->
?DESC("desc_bridges_v2");
desc(_) ->
undefined.
-ifdef(TEST).
-include_lib("hocon/include/hocon_types.hrl").
schema_homogeneous_test() ->
case
lists:filtermap(
fun({_Name, Schema}) ->
is_bad_schema(Schema)
end,
fields(actions)
)
of
[] ->
ok;
List ->
throw(List)
end.
is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) ->
Fields = Module:fields(TypeName),
ExpectedFieldNames = common_field_names(),
MissingFileds = lists:filter(
fun(Name) -> lists:keyfind(Name, 1, Fields) =:= false end, ExpectedFieldNames
),
case MissingFileds of
[] ->
false;
_ ->
{true, #{
schema_modle => Module,
type_name => TypeName,
missing_fields => MissingFileds
}}
end.
common_field_names() ->
[
enable, description, local_topic, connector, resource_opts, parameters
].
-endif.

View File

@ -0,0 +1,808 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v1_compatibility_layer_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("typerefl/include/types.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
Apps = emqx_cth_suite:start(
app_specs(),
#{work_dir => emqx_cth_suite:work_dir(Config)}
),
emqx_mgmt_api_test_util:init_suite(),
[{apps, Apps} | Config].
end_per_suite(Config) ->
Apps = ?config(apps, Config),
emqx_mgmt_api_test_util:end_suite(),
emqx_cth_suite:stop(Apps),
ok.
app_specs() ->
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_rule_engine
].
init_per_testcase(_TestCase, Config) ->
%% Setting up mocks for fake connector and bridge V2
setup_mocks(),
ets:new(fun_table_name(), [named_table, public]),
%% Create a fake connector
{ok, _} = emqx_connector:create(con_type(), con_name(), con_config()),
[
{mocked_mods, [
emqx_connector_schema,
emqx_connector_resource,
emqx_bridge_v2
]}
| Config
].
end_per_testcase(_TestCase, _Config) ->
ets:delete(fun_table_name()),
delete_all_bridges_and_connectors(),
meck:unload(),
emqx_common_test_helpers:call_janitor(),
ok.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
setup_mocks() ->
MeckOpts = [passthrough, no_link, no_history],
catch meck:new(emqx_connector_schema, MeckOpts),
meck:expect(emqx_connector_schema, fields, 1, con_schema()),
meck:expect(emqx_connector_schema, connector_type_to_bridge_types, 1, [con_type()]),
catch meck:new(emqx_connector_resource, MeckOpts),
meck:expect(emqx_connector_resource, connector_to_resource_type, 1, con_mod()),
catch meck:new(emqx_bridge_v2_schema, MeckOpts),
meck:expect(emqx_bridge_v2_schema, fields, 1, bridge_schema()),
catch meck:new(emqx_bridge_v2, MeckOpts),
meck:expect(emqx_bridge_v2, bridge_v2_type_to_connector_type, 1, con_type()),
meck:expect(emqx_bridge_v2, bridge_v1_type_to_bridge_v2_type, 1, bridge_type()),
IsBridgeV2TypeFun = fun(Type) ->
BridgeV2Type = bridge_type(),
BridgeV2TypeBin = bridge_type_bin(),
case Type of
BridgeV2Type -> true;
BridgeV2TypeBin -> true;
_ -> false
end
end,
meck:expect(emqx_bridge_v2, is_bridge_v2_type, 1, IsBridgeV2TypeFun),
catch meck:new(emqx_bridge_v2_schema, MeckOpts),
meck:expect(
emqx_bridge_v2_schema,
enterprise_api_schemas,
1,
fun(Method) -> [{bridge_type_bin(), hoconsc:ref(?MODULE, "api_" ++ Method)}] end
),
ok.
con_mod() ->
emqx_bridge_v2_test_connector.
con_type() ->
bridge_type().
con_name() ->
my_connector.
bridge_type() ->
test_bridge_type.
bridge_type_bin() ->
atom_to_binary(bridge_type(), utf8).
con_schema() ->
[
{
con_type(),
hoconsc:mk(
hoconsc:map(name, hoconsc:ref(?MODULE, "connector")),
#{
desc => <<"Test Connector Config">>,
required => false
}
)
}
].
fields("connector") ->
[
{enable, hoconsc:mk(any(), #{})},
{resource_opts, hoconsc:mk(map(), #{})}
];
fields("api_post") ->
[
{connector, hoconsc:mk(binary(), #{})},
{name, hoconsc:mk(binary(), #{})},
{type, hoconsc:mk(bridge_type(), #{})},
{send_to, hoconsc:mk(atom(), #{})}
| fields("connector")
].
con_config() ->
#{
<<"enable">> => true,
<<"resource_opts">> => #{
%% Set this to a low value to make the test run faster
<<"health_check_interval">> => 100
}
}.
bridge_schema() ->
bridge_schema(_Opts = #{}).
bridge_schema(Opts) ->
Type = maps:get(bridge_type, Opts, bridge_type()),
[
{
Type,
hoconsc:mk(
hoconsc:map(name, typerefl:map()),
#{
desc => <<"Test Bridge Config">>,
required => false
}
)
}
].
bridge_config() ->
#{
<<"connector">> => atom_to_binary(con_name()),
<<"enable">> => true,
<<"send_to">> => registered_process_name(),
<<"resource_opts">> => #{
<<"resume_interval">> => 100
}
}.
fun_table_name() ->
emqx_bridge_v1_compatibility_layer_SUITE_fun_table.
registered_process_name() ->
my_registered_process.
delete_all_bridges_and_connectors() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing bridge ~p", [{Type, Name}]),
emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
),
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing connector ~p", [{Type, Name}]),
emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
),
update_root_config(#{}),
ok.
%% Hocon does not support placing a fun in a config map so we replace it with a string
wrap_fun(Fun) ->
UniqRef = make_ref(),
UniqRefBin = term_to_binary(UniqRef),
UniqRefStr = iolist_to_binary(base64:encode(UniqRefBin)),
ets:insert(fun_table_name(), {UniqRefStr, Fun}),
UniqRefStr.
unwrap_fun(UniqRefStr) ->
ets:lookup_element(fun_table_name(), UniqRefStr, 2).
update_root_config(RootConf) ->
emqx_conf:update([actions], RootConf, #{override_to => cluster}).
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
ok = emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
),
%% at some point during the tests, sometimes `emqx_bridge:list()'
%% returns an empty list, but `emqx:get_config([bridges])' returns
%% a bunch of orphan test bridges...
lists:foreach(fun emqx_resource:remove/1, emqx_resource:list_instances()),
emqx_config:put([bridges], #{}),
ok.
maybe_json_decode(X) ->
case emqx_utils_json:safe_decode(X, [return_maps]) of
{ok, Decoded} -> Decoded;
{error, _} -> X
end.
request(Method, Path, Params) ->
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
case emqx_mgmt_api_test_util:request_api(Method, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
Body = maybe_json_decode(Body0),
{ok, {Status, Headers, Body}};
{error, {Status, Headers, Body0}} ->
Body =
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
{ok, Decoded0 = #{<<"message">> := Msg0}} ->
Msg = maybe_json_decode(Msg0),
Decoded0#{<<"message">> := Msg};
{ok, Decoded0} ->
Decoded0;
{error, _} ->
Body0
end,
{error, {Status, Headers, Body}};
Error ->
Error
end.
list_bridges_http_api_v1() ->
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
ct:pal("list bridges (http v1)"),
Res = request(get, Path, _Params = []),
ct:pal("list bridges (http v1) result:\n ~p", [Res]),
Res.
list_bridges_http_api_v2() ->
Path = emqx_mgmt_api_test_util:api_path(["actions"]),
ct:pal("list bridges (http v2)"),
Res = request(get, Path, _Params = []),
ct:pal("list bridges (http v2) result:\n ~p", [Res]),
Res.
list_connectors_http() ->
Path = emqx_mgmt_api_test_util:api_path(["connectors"]),
ct:pal("list connectors"),
Res = request(get, Path, _Params = []),
ct:pal("list connectors result:\n ~p", [Res]),
Res.
get_bridge_http_api_v1(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
ct:pal("get bridge (http v1) (~p)", [#{name => Name}]),
Res = request(get, Path, _Params = []),
ct:pal("get bridge (http v1) (~p) result:\n ~p", [#{name => Name}, Res]),
Res.
get_bridge_http_api_v2(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId]),
ct:pal("get bridge (http v2) (~p)", [#{name => Name}]),
Res = request(get, Path, _Params = []),
ct:pal("get bridge (http v2) (~p) result:\n ~p", [#{name => Name}, Res]),
Res.
get_connector_http(Name) ->
ConnectorId = emqx_connector_resource:connector_id(con_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["connectors", ConnectorId]),
ct:pal("get connector (~p)", [#{name => Name, id => ConnectorId}]),
Res = request(get, Path, _Params = []),
ct:pal("get connector (~p) result:\n ~p", [#{name => Name}, Res]),
Res.
create_bridge_http_api_v1(Opts) ->
Name = maps:get(name, Opts),
Overrides = maps:get(overrides, Opts, #{}),
BridgeConfig0 = emqx_utils_maps:deep_merge(bridge_config(), Overrides),
BridgeConfig = maps:without([<<"connector">>], BridgeConfig0),
Params = BridgeConfig#{<<"type">> => bridge_type_bin(), <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
ct:pal("creating bridge (http v1): ~p", [Params]),
Res = request(post, Path, Params),
ct:pal("bridge create (http v1) result:\n ~p", [Res]),
Res.
create_bridge_http_api_v2(Opts) ->
Name = maps:get(name, Opts),
Overrides = maps:get(overrides, Opts, #{}),
BridgeConfig = emqx_utils_maps:deep_merge(bridge_config(), Overrides),
Params = BridgeConfig#{<<"type">> => bridge_type_bin(), <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["actions"]),
ct:pal("creating bridge (http v2): ~p", [Params]),
Res = request(post, Path, Params),
ct:pal("bridge create (http v2) result:\n ~p", [Res]),
Res.
update_bridge_http_api_v1(Opts) ->
Name = maps:get(name, Opts),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Overrides = maps:get(overrides, Opts, #{}),
BridgeConfig0 = emqx_utils_maps:deep_merge(bridge_config(), Overrides),
BridgeConfig = maps:without([<<"connector">>], BridgeConfig0),
Params = BridgeConfig,
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
ct:pal("updating bridge (http v1): ~p", [Params]),
Res = request(put, Path, Params),
ct:pal("bridge update (http v1) result:\n ~p", [Res]),
Res.
delete_bridge_http_api_v1(Opts) ->
Name = maps:get(name, Opts),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
ct:pal("deleting bridge (http v1)"),
Res = request(delete, Path, _Params = []),
ct:pal("bridge delete (http v1) result:\n ~p", [Res]),
Res.
delete_bridge_http_api_v2(Opts) ->
Name = maps:get(name, Opts),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId]),
ct:pal("deleting bridge (http v2)"),
Res = request(delete, Path, _Params = []),
ct:pal("bridge delete (http v2) result:\n ~p", [Res]),
Res.
enable_bridge_http_api_v1(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId, "enable", "true"]),
ct:pal("enabling bridge (http v1)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge enable (http v1) result:\n ~p", [Res]),
Res.
enable_bridge_http_api_v2(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, "enable", "true"]),
ct:pal("enabling bridge (http v2)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge enable (http v2) result:\n ~p", [Res]),
Res.
disable_bridge_http_api_v1(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId, "enable", "false"]),
ct:pal("disabling bridge (http v1)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge disable (http v1) result:\n ~p", [Res]),
Res.
disable_bridge_http_api_v2(Name) ->
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, "enable", "false"]),
ct:pal("disabling bridge (http v2)"),
Res = request(put, Path, _Params = []),
ct:pal("bridge disable (http v2) result:\n ~p", [Res]),
Res.
bridge_operation_http_api_v1(Name, Op0) ->
Op = atom_to_list(Op0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId, Op]),
ct:pal("bridge op ~p (http v1)", [Op]),
Res = request(post, Path, _Params = []),
ct:pal("bridge op ~p (http v1) result:\n ~p", [Op, Res]),
Res.
bridge_operation_http_api_v2(Name, Op0) ->
Op = atom_to_list(Op0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, Op]),
ct:pal("bridge op ~p (http v2)", [Op]),
Res = request(post, Path, _Params = []),
ct:pal("bridge op ~p (http v2) result:\n ~p", [Op, Res]),
Res.
bridge_node_operation_http_api_v1(Name, Node0, Op0) ->
Op = atom_to_list(Op0),
Node = atom_to_list(Node0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["nodes", Node, "bridges", BridgeId, Op]),
ct:pal("bridge node op ~p (http v1)", [{Node, Op}]),
Res = request(post, Path, _Params = []),
ct:pal("bridge node op ~p (http v1) result:\n ~p", [{Node, Op}, Res]),
Res.
bridge_node_operation_http_api_v2(Name, Node0, Op0) ->
Op = atom_to_list(Op0),
Node = atom_to_list(Node0),
BridgeId = emqx_bridge_resource:bridge_id(bridge_type(), Name),
Path = emqx_mgmt_api_test_util:api_path(["nodes", Node, "actions", BridgeId, Op]),
ct:pal("bridge node op ~p (http v2)", [{Node, Op}]),
Res = request(post, Path, _Params = []),
ct:pal("bridge node op ~p (http v2) result:\n ~p", [{Node, Op}, Res]),
Res.
is_rule_enabled(RuleId) ->
{ok, #{enable := Enable}} = emqx_rule_engine:get_rule(RuleId),
Enable.
update_rule_http(RuleId, Params) ->
Path = emqx_mgmt_api_test_util:api_path(["rules", RuleId]),
ct:pal("update rule ~p:\n ~p", [RuleId, Params]),
Res = request(put, Path, Params),
ct:pal("update rule ~p result:\n ~p", [RuleId, Res]),
Res.
enable_rule_http(RuleId) ->
Params = #{<<"enable">> => true},
update_rule_http(RuleId, Params).
%%------------------------------------------------------------------------------
%% Test cases
%%------------------------------------------------------------------------------
t_name_too_long(_Config) ->
LongName = list_to_binary(lists:duplicate(256, $a)),
?assertMatch(
{error,
{{_, 400, _}, _, #{<<"message">> := #{<<"reason">> := <<"Name is too long", _/binary>>}}}},
create_bridge_http_api_v1(#{name => LongName})
),
ok.
t_scenario_1(_Config) ->
%% ===================================================================================
%% Pre-conditions
%% ===================================================================================
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v2()),
%% created in the test case init
?assertMatch({ok, {{_, 200, _}, _, [#{}]}}, list_connectors_http()),
{ok, {{_, 200, _}, _, [#{<<"name">> := PreexistentConnectorName}]}} = list_connectors_http(),
%% ===================================================================================
%% Create a single bridge v2. It should still be listed and functional when using v1
%% APIs.
%% ===================================================================================
NameA = <<"bridgev2a">>,
?assertMatch(
{ok, {{_, 201, _}, _, #{}}},
create_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v2()),
%% created a new one from the v1 API
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), stop)),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), start)
),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), restart)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, stop)),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, node(), start)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, restart)),
{ok, {{_, 200, _}, _, #{<<"connector">> := GeneratedConnName}}} = get_bridge_http_api_v2(NameA),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
%% ===================================================================================
%% Update the bridge using v1 API.
%% ===================================================================================
?assertMatch(
{ok, {{_, 200, _}, _, _}},
update_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v2()),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
%% ===================================================================================
%% Now create a new bridge_v2 pointing to the same connector. It should no longer be
%% functions via v1 API, nor be listed in it. The new bridge must create a new
%% channel, so that this bridge is no longer considered v1.
%% ===================================================================================
NameB = <<"bridgev2b">>,
?assertMatch(
{ok, {{_, 201, _}, _, #{}}},
create_bridge_http_api_v2(#{
name => NameB, overrides => #{<<"connector">> => GeneratedConnName}
})
),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"name">> := _}, #{<<"name">> := _}]}}, list_bridges_http_api_v2()
),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameB)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameB}}}, get_bridge_http_api_v2(NameB)),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
?assertMatch({error, {{_, 400, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 400, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 400, _}, _, _}}, disable_bridge_http_api_v1(NameB)),
?assertMatch({error, {{_, 400, _}, _, _}}, enable_bridge_http_api_v1(NameB)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameB, stop)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameB, start)),
?assertMatch({error, {{_, 400, _}, _, _}}, bridge_operation_http_api_v1(NameB, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameB, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameB, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameB, restart)),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), stop)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), start)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameA, node(), restart)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameB, node(), stop)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameB, node(), start)
),
?assertMatch(
{error, {{_, 400, _}, _, _}}, bridge_node_operation_http_api_v1(NameB, node(), restart)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, stop)),
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameB, stop)),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, node(), start)
),
?assertMatch(
{ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameB, node(), start)
),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameA, restart)),
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_node_operation_http_api_v2(NameB, restart)),
%% ===================================================================================
%% Try to delete the original bridge using V1. It should fail and its connector
%% should be preserved.
%% ===================================================================================
?assertMatch(
{error, {{_, 400, _}, _, _}},
delete_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"name">> := _}, #{<<"name">> := _}]}}, list_bridges_http_api_v2()
),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, #{}}}, get_bridge_http_api_v1(NameB)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameB}}}, get_bridge_http_api_v2(NameB)),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
%% ===================================================================================
%% Delete the 2nd new bridge so it appears again in the V1 API.
%% ===================================================================================
?assertMatch(
{ok, {{_, 204, _}, _, _}},
delete_bridge_http_api_v2(#{name => NameB})
),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, [#{<<"name">> := NameA}]}}, list_bridges_http_api_v2()),
?assertMatch({ok, {{_, 200, _}, _, [#{}, #{}]}}, list_connectors_http()),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 200, _}, _, #{<<"name">> := NameA}}}, get_bridge_http_api_v2(NameA)),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := GeneratedConnName}}},
get_connector_http(GeneratedConnName)
),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({ok, {{_, 204, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
%% ===================================================================================
%% Delete the last bridge using API v1. The generated connector should also be
%% removed.
%% ===================================================================================
?assertMatch(
{ok, {{_, 204, _}, _, _}},
delete_bridge_http_api_v1(#{name => NameA})
),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v2()),
%% only the pre-existing one should remain.
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"name">> := PreexistentConnectorName}]}},
list_connectors_http()
),
?assertMatch(
{ok, {{_, 200, _}, _, #{<<"name">> := PreexistentConnectorName}}},
get_connector_http(PreexistentConnectorName)
),
?assertMatch({error, {{_, 404, _}, _, _}}, get_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, get_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, get_connector_http(GeneratedConnName)),
?assertMatch({error, {{_, 404, _}, _, _}}, disable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, enable_bridge_http_api_v1(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, disable_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, enable_bridge_http_api_v2(NameA)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v1(NameA, stop)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v1(NameA, start)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v1(NameA, restart)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v2(NameA, stop)),
?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v2(NameA, start)),
%% TODO: currently, only `start' op is supported by the v2 API.
%% ?assertMatch({error, {{_, 404, _}, _, _}}, bridge_operation_http_api_v2(NameA, restart)),
ok.
t_scenario_2(Config) ->
%% ===================================================================================
%% Pre-conditions
%% ===================================================================================
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v1()),
?assertMatch({ok, {{_, 200, _}, _, []}}, list_bridges_http_api_v2()),
%% created in the test case init
?assertMatch({ok, {{_, 200, _}, _, [#{}]}}, list_connectors_http()),
{ok, {{_, 200, _}, _, [#{<<"name">> := _PreexistentConnectorName}]}} = list_connectors_http(),
%% ===================================================================================
%% Try to create a rule referencing a non-existent bridge. It succeeds, but it's
%% implicitly disabled. Trying to update it later without creating the bridge should
%% allow it to be enabled.
%% ===================================================================================
BridgeName = <<"scenario2">>,
RuleTopic = <<"t/scenario2">>,
{ok, #{<<"id">> := RuleId0}} =
emqx_bridge_v2_testlib:create_rule_and_action_http(
bridge_type(),
RuleTopic,
[
{bridge_name, BridgeName}
| Config
],
#{overrides => #{enable => true}}
),
?assert(is_rule_enabled(RuleId0)),
?assertMatch({ok, {{_, 200, _}, _, _}}, enable_rule_http(RuleId0)),
?assert(is_rule_enabled(RuleId0)),
%% ===================================================================================
%% Now we create the bridge, and attempt to create a new enabled rule. It should
%% start enabled. Also, updating the previous rule to enable it should work now.
%% ===================================================================================
?assertMatch(
{ok, {{_, 201, _}, _, #{}}},
create_bridge_http_api_v1(#{name => BridgeName})
),
{ok, #{<<"id">> := RuleId1}} =
emqx_bridge_v2_testlib:create_rule_and_action_http(
bridge_type(),
RuleTopic,
[
{bridge_name, BridgeName}
| Config
],
#{overrides => #{enable => true}}
),
?assert(is_rule_enabled(RuleId0)),
?assert(is_rule_enabled(RuleId1)),
?assertMatch({ok, {{_, 200, _}, _, _}}, enable_rule_http(RuleId0)),
?assert(is_rule_enabled(RuleId0)),
%% ===================================================================================
%% Creating a rule with mixed existent/non-existent bridges should allow enabling it.
%% ===================================================================================
NonExistentBridgeName = <<"scenario2_not_created">>,
{ok, #{<<"id">> := RuleId2}} =
emqx_bridge_v2_testlib:create_rule_and_action_http(
bridge_type(),
RuleTopic,
[
{bridge_name, BridgeName}
| Config
],
#{
overrides => #{
enable => true,
actions => [
emqx_bridge_resource:bridge_id(
bridge_type(),
BridgeName
),
emqx_bridge_resource:bridge_id(
bridge_type(),
NonExistentBridgeName
)
]
}
}
),
?assert(is_rule_enabled(RuleId2)),
?assertMatch({ok, {{_, 200, _}, _, _}}, enable_rule_http(RuleId2)),
?assert(is_rule_enabled(RuleId2)),
ok.

View File

@ -207,7 +207,7 @@ unwrap_fun(UniqRefStr) ->
ets:lookup_element(fun_table_name(), UniqRefStr, 2).
update_root_config(RootConf) ->
emqx_conf:update([bridges_v2], RootConf, #{override_to => cluster}).
emqx_conf:update([actions], RootConf, #{override_to => cluster}).
update_root_connectors_config(RootConf) ->
emqx_conf:update([connectors], RootConf, #{override_to => cluster}).
@ -238,12 +238,12 @@ t_create_dry_run_fail_add_channel(_) ->
{error, Msg}
end),
Conf1 = (bridge_config())#{on_add_channel_fun => OnAddChannel1},
{error, Msg} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
OnAddChannel2 = wrap_fun(fun() ->
throw(Msg)
end),
Conf2 = (bridge_config())#{on_add_channel_fun => OnAddChannel2},
{error, Msg} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf2),
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf2),
ok.
t_create_dry_run_fail_get_channel_status(_) ->
@ -252,7 +252,7 @@ t_create_dry_run_fail_get_channel_status(_) ->
{error, Msg}
end),
Conf1 = (bridge_config())#{on_get_channel_status_fun => Fun1},
{error, Msg} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
Fun2 = wrap_fun(fun() ->
throw(Msg)
end),
@ -280,7 +280,9 @@ t_is_valid_bridge_v1(_) ->
t_manual_health_check(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
%% Run a health check for the bridge
connected = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
#{error := undefined, status := connected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
@ -290,7 +292,9 @@ t_manual_health_check_exception(_) ->
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
{error, _} = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
#{error := my_error, status := disconnected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
@ -300,7 +304,9 @@ t_manual_health_check_exception_error(_) ->
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
{error, _} = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
#{error := _, status := disconnected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
@ -310,7 +316,9 @@ t_manual_health_check_error(_) ->
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
{error, my_error} = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
#{error := my_error, status := disconnected} = emqx_bridge_v2:health_check(
bridge_type(), my_test_bridge
),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
@ -484,6 +492,83 @@ t_send_message_unhealthy_connector(_) ->
ets:delete(ResponseETS),
ok.
t_connector_connected_to_connecting_to_connected_no_channel_restart(_) ->
ResponseETS = ets:new(response_ets, [public]),
ets:insert(ResponseETS, {on_start_value, conf}),
ets:insert(ResponseETS, {on_get_status_value, connected}),
OnStartFun = wrap_fun(fun(Conf) ->
case ets:lookup_element(ResponseETS, on_start_value, 2) of
conf ->
{ok, Conf};
V ->
V
end
end),
OnGetStatusFun = wrap_fun(fun() ->
ets:lookup_element(ResponseETS, on_get_status_value, 2)
end),
OnAddChannelCntr = counters:new(1, []),
OnAddChannelFun = wrap_fun(fun(_InstId, ConnectorState, _ChannelId, _ChannelConfig) ->
counters:add(OnAddChannelCntr, 1, 1),
{ok, ConnectorState}
end),
ConConfig = emqx_utils_maps:deep_merge(con_config(), #{
<<"on_start_fun">> => OnStartFun,
<<"on_get_status_fun">> => OnGetStatusFun,
<<"on_add_channel_fun">> => OnAddChannelFun,
<<"resource_opts">> => #{<<"start_timeout">> => 100}
}),
ConName = ?FUNCTION_NAME,
{ok, _} = emqx_connector:create(con_type(), ConName, ConConfig),
BridgeConf = (bridge_config())#{
<<"connector">> => atom_to_binary(ConName)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, BridgeConf),
%% Wait until on_add_channel_fun is called at least once
wait_until(fun() ->
counters:get(OnAddChannelCntr, 1) =:= 1
end),
1 = counters:get(OnAddChannelCntr, 1),
%% We change the status of the connector
ets:insert(ResponseETS, {on_get_status_value, connecting}),
%% Wait until the status is changed
wait_until(fun() ->
{ok, BridgeData} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData) =:= connecting
end),
{ok, BridgeData1} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
ct:pal("Bridge V2 status changed to: ~p", [maps:get(status, BridgeData1)]),
%% We change the status again back to connected
ets:insert(ResponseETS, {on_get_status_value, connected}),
%% Wait until the status is connected again
wait_until(fun() ->
{ok, BridgeData2} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData2) =:= connected
end),
%% On add channel should not have been called again
1 = counters:get(OnAddChannelCntr, 1),
%% We change the status to an error
ets:insert(ResponseETS, {on_get_status_value, {error, my_error}}),
%% Wait until the status is changed
wait_until(fun() ->
{ok, BridgeData2} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData2) =:= disconnected
end),
%% Now we go back to connected
ets:insert(ResponseETS, {on_get_status_value, connected}),
wait_until(fun() ->
{ok, BridgeData2} = emqx_bridge_v2:lookup(bridge_type(), my_test_bridge),
maps:get(status, BridgeData2) =:= connected
end),
%% Now the channel should have been removed and added again
wait_until(fun() ->
counters:get(OnAddChannelCntr, 1) =:= 2
end),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok = emqx_connector:remove(con_type(), ConName),
ets:delete(ResponseETS),
ok.
t_unhealthy_channel_alarm(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> =>
@ -499,7 +584,7 @@ t_unhealthy_channel_alarm(_) ->
get_bridge_v2_alarm_cnt() ->
Alarms = emqx_alarm:get_alarms(activated),
FilterFun = fun
(#{name := S}) when is_binary(S) -> string:find(S, "bridge_v2") =/= nomatch;
(#{name := S}) when is_binary(S) -> string:find(S, "action") =/= nomatch;
(_) -> false
end,
length(lists:filter(FilterFun, Alarms)).
@ -554,7 +639,7 @@ t_load_config_success(_Config) ->
BridgeNameBin = atom_to_binary(BridgeName),
%% pre-condition
?assertEqual(#{}, emqx_config:get([bridges_v2])),
?assertEqual(#{}, emqx_config:get([actions])),
%% create
RootConf0 = #{BridgeTypeBin => #{BridgeNameBin => Conf}},
@ -720,3 +805,58 @@ t_remove_multiple_connectors_being_referenced_without_channels(_Config) ->
end
),
ok.
t_start_operation_when_on_add_channel_gives_error(_Config) ->
Conf = bridge_config(),
BridgeName = my_test_bridge,
emqx_common_test_helpers:with_mock(
emqx_bridge_v2_test_connector,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {error, <<"some_error">>} end,
fun() ->
%% We can crete the bridge event though on_add_channel returns error
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
?assertMatch(
#{
status := disconnected,
error := <<"some_error">>
},
emqx_bridge_v2:health_check(bridge_type(), BridgeName)
),
?assertMatch(
{ok, #{
status := disconnected,
error := <<"some_error">>
}},
emqx_bridge_v2:lookup(bridge_type(), BridgeName)
),
%% emqx_bridge_v2:start/2 should return ok if bridge if connected after
%% start and otherwise and error
?assertMatch({error, _}, emqx_bridge_v2:start(bridge_type(), BridgeName)),
%% Let us change on_add_channel to be successful and try again
ok = meck:expect(
emqx_bridge_v2_test_connector,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {ok, #{}} end
),
?assertMatch(ok, emqx_bridge_v2:start(bridge_type(), BridgeName))
end
),
ok.
%% Helper Functions
wait_until(Fun) ->
wait_until(Fun, 5000).
wait_until(Fun, Timeout) when Timeout >= 0 ->
case Fun() of
true ->
ok;
false ->
IdleTime = 100,
timer:sleep(IdleTime),
wait_until(Fun, Timeout - IdleTime)
end;
wait_until(_, _) ->
ct:fail("Wait until event did not happen").

View File

@ -24,7 +24,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/test_macros.hrl").
-define(ROOT, "bridges_v2").
-define(ROOT, "actions").
-define(CONNECTOR_NAME, <<"my_connector">>).
@ -100,6 +100,11 @@
}).
-define(KAFKA_BRIDGE(Name), ?KAFKA_BRIDGE(Name, ?CONNECTOR_NAME)).
-define(KAFKA_BRIDGE_UPDATE(Name, Connector),
maps:without([<<"name">>, <<"type">>], ?KAFKA_BRIDGE(Name, Connector))
).
-define(KAFKA_BRIDGE_UPDATE(Name), ?KAFKA_BRIDGE_UPDATE(Name, ?CONNECTOR_NAME)).
%% -define(BRIDGE_TYPE_MQTT, <<"mqtt">>).
%% -define(MQTT_BRIDGE(SERVER, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_MQTT)#{
%% <<"server">> => SERVER,
@ -147,7 +152,9 @@
emqx,
emqx_auth,
emqx_management,
{emqx_bridge, "bridges_v2 {}"}
emqx_connector,
{emqx_bridge, "actions {}"},
{emqx_rule_engine, "rule_engine { rules {} }"}
]).
-define(APPSPEC_DASHBOARD,
@ -214,8 +221,8 @@ mk_cluster(Name, Config, Opts) ->
Node2Apps = ?APPSPECS,
emqx_cth_cluster:start(
[
{emqx_bridge_api_SUITE_1, Opts#{role => core, apps => Node1Apps}},
{emqx_bridge_api_SUITE_2, Opts#{role => core, apps => Node2Apps}}
{emqx_bridge_v2_api_SUITE_1, Opts#{role => core, apps => Node1Apps}},
{emqx_bridge_v2_api_SUITE_2, Opts#{role => core, apps => Node2Apps}}
],
#{work_dir => filename:join(?config(priv_dir, Config), Name)}
).
@ -251,7 +258,7 @@ end_per_testcase(_TestCase, Config) ->
ok = emqx_common_test_helpers:call_janitor(),
ok.
-define(CONNECTOR_IMPL, dummy_connector_impl).
-define(CONNECTOR_IMPL, emqx_bridge_v2_dummy_connector).
init_mocks() ->
meck:new(emqx_connector_ee_schema, [passthrough, no_link]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL),
@ -279,6 +286,9 @@ init_mocks() ->
meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
ok = meck:expect(?CONNECTOR_IMPL, on_get_channels, fun(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId)
end),
[?CONNECTOR_IMPL, emqx_connector_ee_schema].
clear_resources() ->
@ -394,18 +404,102 @@ t_bridges_lifecycle(Config) ->
request_json(
put,
uri([?ROOT, BridgeID]),
maps:without(
[<<"type">>, <<"name">>],
?KAFKA_BRIDGE(?BRIDGE_NAME, <<"foobla">>)
),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME, <<"foobla">>),
Config
)
),
%% update bridge with unknown connector name
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message1
}} =
request_json(
put,
uri([?ROOT, BridgeID]),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME, <<"does_not_exist">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message1)
),
%% update bridge with connector of wrong type
{ok, 201, _} =
request(
post,
uri(["connectors"]),
(?CONNECTOR(<<"foobla2">>))#{
<<"type">> => <<"azure_event_hub_producer">>,
<<"authentication">> => #{
<<"username">> => <<"emqxuser">>,
<<"password">> => <<"topSecret">>,
<<"mechanism">> => <<"plain">>
},
<<"ssl">> => #{
<<"enable">> => true,
<<"server_name_indication">> => <<"auto">>,
<<"verify">> => <<"verify_none">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
},
Config
),
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message2
}} =
request_json(
put,
uri([?ROOT, BridgeID]),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME, <<"foobla2">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message2)
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% try create with unknown connector name
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message3
}} =
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME, <<"does_not_exist">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message3)
),
%% try create bridge with connector of wrong type
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Message4
}} =
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME, <<"foobla2">>),
Config
),
?assertMatch(
#{<<"reason">> := <<"connector_not_found_or_wrong_type">>},
emqx_utils_json:decode(Message4)
),
%% make sure nothing has been created above
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% update a deleted bridge returns an error
?assertMatch(
{ok, 404, #{
@ -415,15 +509,12 @@ t_bridges_lifecycle(Config) ->
request_json(
put,
uri([?ROOT, BridgeID]),
maps:without(
[<<"type">>, <<"name">>],
?KAFKA_BRIDGE(?BRIDGE_NAME)
),
?KAFKA_BRIDGE_UPDATE(?BRIDGE_NAME),
Config
)
),
%% Deleting a non-existing bridge should result in an error
%% deleting a non-existing bridge should result in an error
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
@ -443,6 +534,7 @@ t_bridges_lifecycle(Config) ->
%% Try create bridge with bad characters as name
{ok, 400, _} = request(post, uri([?ROOT]), ?KAFKA_BRIDGE(<<"隋达"/utf8>>), Config),
{ok, 400, _} = request(post, uri([?ROOT]), ?KAFKA_BRIDGE(<<"a.b">>), Config),
ok.
t_start_bridge_unknown_node(Config) ->
@ -503,6 +595,31 @@ do_start_bridge(TestType, Config) ->
{ok, 400, _} = request(post, {operation, TestType, invalidop, BridgeID}, Config),
%% Make start bridge fail
expect_on_all_nodes(
?CONNECTOR_IMPL,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {error, <<"my_error">>} end,
Config
),
connector_operation(Config, ?BRIDGE_TYPE, ?CONNECTOR_NAME, stop),
connector_operation(Config, ?BRIDGE_TYPE, ?CONNECTOR_NAME, start),
{ok, 400, _} = request(post, {operation, TestType, start, BridgeID}, Config),
%% Make start bridge succeed
expect_on_all_nodes(
?CONNECTOR_IMPL,
on_add_channel,
fun(_, _, _ResId, _Channel) -> {ok, connector_state} end,
Config
),
%% try to start again
{ok, 204, <<>>} = request(post, {operation, TestType, start, BridgeID}, Config),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
@ -513,6 +630,41 @@ do_start_bridge(TestType, Config) ->
{ok, 404, _} = request(post, {operation, TestType, start, <<"webhook:cptn_hook">>}, Config),
ok.
expect_on_all_nodes(Mod, Function, Fun, Config) ->
case ?config(cluster_nodes, Config) of
undefined ->
ok = meck:expect(Mod, Function, Fun);
Nodes ->
[erpc:call(Node, meck, expect, [Mod, Function, Fun]) || Node <- Nodes]
end,
ok.
connector_operation(Config, ConnectorType, ConnectorName, OperationName) ->
case ?config(group, Config) of
cluster ->
case ?config(cluster_nodes, Config) of
undefined ->
Node = ?config(node, Config),
ok = rpc:call(
Node,
emqx_connector_resource,
OperationName,
[ConnectorType, ConnectorName],
500
);
Nodes ->
erpc:multicall(
Nodes,
emqx_connector_resource,
OperationName,
[ConnectorType, ConnectorName],
500
)
end;
_ ->
ok = emqx_connector_resource:OperationName(ConnectorType, ConnectorName)
end.
%% t_start_stop_inconsistent_bridge_node(Config) ->
%% start_stop_inconsistent_bridge(node, Config).
@ -626,7 +778,7 @@ do_start_bridge(TestType, Config) ->
t_bridges_probe(Config) ->
{ok, 204, <<>>} = request(
post,
uri(["bridges_v2_probe"]),
uri(["actions_probe"]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
@ -634,7 +786,7 @@ t_bridges_probe(Config) ->
%% second time with same name is ok since no real bridge created
{ok, 204, <<>>} = request(
post,
uri(["bridges_v2_probe"]),
uri(["actions_probe"]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
@ -648,7 +800,7 @@ t_bridges_probe(Config) ->
}},
request_json(
post,
uri(["bridges_v2_probe"]),
uri(["actions_probe"]),
?KAFKA_BRIDGE(<<"broken_bridge">>, <<"brokenhost:1234">>),
Config
)
@ -660,13 +812,80 @@ t_bridges_probe(Config) ->
{ok, 400, #{<<"code">> := <<"BAD_REQUEST">>}},
request_json(
post,
uri(["bridges_v2_probe"]),
uri(["actions_probe"]),
?RESOURCE(<<"broken_bridge">>, <<"unknown_type">>),
Config
)
),
ok.
t_cascade_delete_actions(Config) ->
%% assert we there's no bridges at first
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% then we add a a bridge, using POST
%% POST /actions/ will create a bridge
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
{ok, 201, _} = request(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
{ok, 201, #{<<"id">> := RuleId}} = request_json(
post,
uri(["rules"]),
#{
<<"name">> => <<"t_http_crud_apis">>,
<<"enable">> => true,
<<"actions">> => [BridgeID],
<<"sql">> => <<"SELECT * from \"t\"">>
},
Config
),
%% delete the bridge will also delete the actions from the rules
{ok, 204, _} = request(
delete,
uri([?ROOT, BridgeID]) ++ "?also_delete_dep_actions=true",
Config
),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
?assertMatch(
{ok, 200, #{<<"actions">> := []}},
request_json(get, uri(["rules", RuleId]), Config)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), Config),
{ok, 201, _} = request(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
{ok, 201, _} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"t_http_crud_apis">>,
<<"enable">> => true,
<<"actions">> => [BridgeID],
<<"sql">> => <<"SELECT * from \"t\"">>
},
Config
),
{ok, 400, _} = request(
delete,
uri([?ROOT, BridgeID]),
Config
),
{ok, 200, [_]} = request_json(get, uri([?ROOT]), Config),
%% Cleanup
{ok, 204, _} = request(
delete,
uri([?ROOT, BridgeID]) ++ "?also_delete_dep_actions=true",
Config
),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config).
%%% helpers
listen_on_random_port() ->
SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}],

View File

@ -0,0 +1,31 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% this module is only intended to be mocked
-module(emqx_bridge_v2_dummy_connector).
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_add_channel/4,
on_get_channel_status/3
]).
callback_mode() -> error(unexpected).
on_start(_, _) -> error(unexpected).
on_stop(_, _) -> error(unexpected).
on_add_channel(_, _, _, _) -> error(unexpected).
on_get_channel_status(_, _, _) -> error(unexpected).

View File

@ -54,6 +54,14 @@ on_add_channel(
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun();
on_add_channel(
InstId,
#{on_add_channel_fun := FunRef} = ConnectorState,
ChannelId,
ChannelConfig
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun(InstId, ConnectorState, ChannelId, ChannelConfig);
on_add_channel(
_InstId,
State,
@ -118,8 +126,8 @@ on_get_channel_status(
ChannelId,
State
) ->
Channels = maps:get(channels, State),
ChannelState = maps:get(ChannelId, Channels),
Channels = maps:get(channels, State, #{}),
ChannelState = maps:get(ChannelId, Channels, #{}),
case ChannelState of
#{on_get_channel_status_fun := FunRef} ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),

View File

@ -121,7 +121,7 @@ bridge_id(Config) ->
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
ConnectorId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
<<"bridge_v2:", BridgeId/binary, ":", ConnectorId/binary>>.
<<"action:", BridgeId/binary, ":", ConnectorId/binary>>.
resource_id(Config) ->
BridgeType = ?config(bridge_type, Config),
@ -161,7 +161,7 @@ create_bridge_api(Config, Overrides) ->
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => BridgeName},
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2"]),
Path = emqx_mgmt_api_test_util:api_path(["actions"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("creating bridge (via http): ~p", [Params]),
@ -184,7 +184,7 @@ update_bridge_api(Config, Overrides) ->
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2", BridgeId]),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("updating bridge (via http): ~p", [BridgeConfig]),
@ -198,7 +198,7 @@ update_bridge_api(Config, Overrides) ->
op_bridge_api(Op, BridgeType, BridgeName) ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2", BridgeId, Op]),
Path = emqx_mgmt_api_test_util:api_path(["actions", BridgeId, Op]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("calling bridge ~p (via http): ~p", [BridgeId, Op]),
@ -228,7 +228,7 @@ probe_bridge_api(Config, Overrides) ->
probe_bridge_api(BridgeType, BridgeName, BridgeConfig) ->
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => BridgeName},
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2_probe"]),
Path = emqx_mgmt_api_test_util:api_path(["actions_probe"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("probing bridge (via http): ~p", [Params]),
@ -260,11 +260,13 @@ create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts) ->
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
SQL = maps:get(sql, Opts, <<"SELECT * FROM \"", RuleTopic/binary, "\"">>),
Params = #{
Params0 = #{
enable => true,
sql => SQL,
actions => [BridgeId]
},
Overrides = maps:get(overrides, Opts, #{}),
Params = emqx_utils_maps:deep_merge(Params0, Overrides),
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ct:pal("rule action params: ~p", [Params]),

View File

@ -79,10 +79,10 @@ fields("post_producer") ->
),
override_documentations(Fields);
fields("config_bridge_v2") ->
fields(bridge_v2);
fields(actions);
fields("config_connector") ->
Fields = override(
emqx_bridge_kafka:fields(kafka_connector),
emqx_bridge_kafka:fields("config_connector"),
connector_overrides()
),
override_documentations(Fields);
@ -114,10 +114,10 @@ fields(kafka_message) ->
Fields0 = emqx_bridge_kafka:fields(kafka_message),
Fields = proplists:delete(timestamp, Fields0),
override_documentations(Fields);
fields(bridge_v2) ->
fields(actions) ->
Fields =
override(
emqx_bridge_kafka:fields(producer_opts),
emqx_bridge_kafka:producer_opts(),
bridge_v2_overrides()
) ++
[
@ -125,7 +125,8 @@ fields(bridge_v2) ->
{connector,
mk(binary(), #{
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})}
})},
{description, emqx_schema:description_schema()}
],
override_documentations(Fields);
fields(Method) ->
@ -153,7 +154,7 @@ struct_names() ->
auth_username_password,
kafka_message,
producer_kafka_opts,
bridge_v2,
actions,
ssl_client_opts
].
@ -205,23 +206,48 @@ values({post, bridge_v2}) ->
values(producer),
#{
enable => true,
connector => <<"my_azure_event_hub_connector">>,
name => <<"my_azure_event_hub_bridge">>,
connector => <<"my_azure_event_hub_producer_connector">>,
name => <<"my_azure_event_hub_producer_bridge">>,
type => ?AEH_CONNECTOR_TYPE_BIN
}
);
values({post, AEHType}) ->
maps:merge(values(common_config), values(AEHType));
values({put, AEHType}) ->
values({post, AEHType});
values(connector) ->
values({post, connector}) ->
maps:merge(
values(common_config),
#{
name => <<"my_azure_event_hub_connector">>,
type => ?AEH_CONNECTOR_TYPE_BIN
name => <<"my_azure_event_hub_producer_connector">>,
type => ?AEH_CONNECTOR_TYPE_BIN,
ssl => #{
enable => true,
server_name_indication => <<"auto">>,
verify => <<"verify_none">>,
versions => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
}
);
values({post, producer}) ->
maps:merge(
#{
name => <<"my_azure_event_hub_producer">>,
type => <<"azure_event_hub_producer">>
},
maps:merge(
values(common_config),
values(producer)
)
);
values({put, connector}) ->
values(common_config);
values({put, bridge_v2}) ->
maps:merge(
values(producer),
#{
enable => true,
connector => <<"my_azure_event_hub_producer_connector">>
}
);
values({put, producer}) ->
values({post, producer});
values(common_config) ->
#{
authentication => #{
@ -232,23 +258,20 @@ values(common_config) ->
enable => true,
metadata_request_timeout => <<"4s">>,
min_metadata_refresh_interval => <<"3s">>,
name => <<"my_azure_event_hub_bridge">>,
socket_opts => #{
sndbuf => <<"1024KB">>,
recbuf => <<"1024KB">>,
nodelay => true,
tcp_keepalive => <<"none">>
},
type => <<"azure_event_hub_producer">>
}
};
values(producer) ->
#{
kafka => #{
parameters => #{
topic => <<"topic">>,
message => #{
key => <<"${.clientid}">>,
value => <<"${.}">>,
timestamp => <<"${.timestamp}">>
value => <<"${.}">>
},
max_batch_bytes => <<"896KB">>,
partition_strategy => <<"random">>,
@ -318,7 +341,13 @@ connector_overrides() ->
)
}
),
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
ssl => mk(
ref(ssl_client_opts),
#{
required => true,
default => #{<<"enable">> => true}
}
),
type => mk(
?AEH_CONNECTOR_TYPE,
#{
@ -349,18 +378,27 @@ producer_overrides() ->
)
}
),
%% NOTE: field 'kafka' is renamed to 'parameters' since e5.3.1
%% We will keep 'kafka' for backward compatibility.
%% TODO: delete this override when we upgrade bridge schema json to 0.2.0
%% See emqx_conf:bridge_schema_json/0
kafka =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
parameters =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
type => mk(azure_event_hub_producer, #{required => true})
}.
bridge_v2_overrides() ->
#{
kafka =>
parameters =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1

View File

@ -222,13 +222,8 @@ encode_payload(State, Selected) ->
OrderingKey = render_key(OrderingKeyTemplate, Selected),
Attributes = proc_attributes(AttributesTemplate, Selected),
Payload0 = #{data => base64:encode(Data)},
Payload1 = put_if(Payload0, attributes, Attributes, map_size(Attributes) > 0),
put_if(Payload1, 'orderingKey', OrderingKey, OrderingKey =/= <<>>).
put_if(Acc, K, V, true) ->
Acc#{K => V};
put_if(Acc, _K, _V, false) ->
Acc.
Payload1 = emqx_utils_maps:put_if(Payload0, attributes, Attributes, map_size(Attributes) > 0),
emqx_utils_maps:put_if(Payload1, 'orderingKey', OrderingKey, OrderingKey =/= <<>>).
-spec render_payload(emqx_placeholder:tmpl_token(), map()) -> binary().
render_payload([] = _Template, Selected) ->

View File

@ -28,23 +28,24 @@
fields/1,
desc/1,
host_opts/0,
ssl_client_opts_fields/0
ssl_client_opts_fields/0,
producer_opts/0
]).
-export([kafka_producer_converter/2, producer_strategy_key_validator/1]).
-export([
kafka_producer_converter/2,
producer_strategy_key_validator/1
]).
%% -------------------------------------------------------------------------------------------------
%% api
connector_examples(_Method) ->
connector_examples(Method) ->
[
#{
<<"kafka_producer">> => #{
summary => <<"Kafka Connector">>,
value => maps:merge(
#{name => <<"my_connector">>, type => <<"kafka_producer">>},
values(common_config)
)
summary => <<"Kafka Producer Connector">>,
value => values({Method, connector})
}
}
].
@ -53,7 +54,7 @@ bridge_v2_examples(Method) ->
[
#{
<<"kafka_producer">> => #{
summary => <<"Kafka Bridge v2">>,
summary => <<"Kafka Producer Action">>,
value => values({Method, bridge_v2_producer})
}
}
@ -88,23 +89,33 @@ values({get, KafkaType}) ->
},
values({post, KafkaType})
);
values({post, connector}) ->
maps:merge(
#{
name => <<"my_kafka_producer_connector">>,
type => <<"kafka_producer">>
},
values(common_config)
);
values({post, KafkaType}) ->
maps:merge(
#{
name => <<"my_kafka_bridge">>,
name => <<"my_kafka_producer_bridge">>,
type => <<"kafka_producer">>
},
values({put, KafkaType})
);
values({put, KafkaType}) when KafkaType =:= bridge_v2_producer ->
values(KafkaType);
values({put, bridge_v2_producer}) ->
values(bridge_v2_producer);
values({put, connector}) ->
values(common_config);
values({put, KafkaType}) ->
maps:merge(values(common_config), values(KafkaType));
values(bridge_v2_producer) ->
maps:merge(
#{
enable => true,
connector => <<"my_kafka_connector">>,
connector => <<"my_kafka_producer_connector">>,
resource_opts => #{
health_check_interval => "32s"
}
@ -244,64 +255,24 @@ fields("get_" ++ Type) ->
fields("config_bridge_v2") ->
fields(kafka_producer_action);
fields("config_connector") ->
fields(kafka_connector);
connector_config_fields();
fields("config_producer") ->
fields(kafka_producer);
fields("config_consumer") ->
fields(kafka_consumer);
fields(kafka_connector) ->
fields("config");
fields(kafka_producer) ->
fields("config") ++ fields(producer_opts);
connector_config_fields() ++ producer_opts();
fields(kafka_producer_action) ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{connector,
mk(binary(), #{
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})}
] ++ fields(producer_opts);
})},
{description, emqx_schema:description_schema()}
] ++ producer_opts();
fields(kafka_consumer) ->
fields("config") ++ fields(consumer_opts);
fields("config") ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{bootstrap_hosts,
mk(
binary(),
#{
required => true,
desc => ?DESC(bootstrap_hosts),
validator => emqx_schema:servers_validator(
host_opts(), _Required = true
)
}
)},
{connect_timeout,
mk(emqx_schema:timeout_duration_ms(), #{
default => <<"5s">>,
desc => ?DESC(connect_timeout)
})},
{min_metadata_refresh_interval,
mk(
emqx_schema:timeout_duration_ms(),
#{
default => <<"3s">>,
desc => ?DESC(min_metadata_refresh_interval)
}
)},
{metadata_request_timeout,
mk(emqx_schema:timeout_duration_ms(), #{
default => <<"5s">>,
desc => ?DESC(metadata_request_timeout)
})},
{authentication,
mk(hoconsc:union([none, ref(auth_username_password), ref(auth_gssapi_kerberos)]), #{
default => none, desc => ?DESC("authentication")
})},
{socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})},
{ssl, mk(ref(ssl_client_opts), #{})}
];
connector_config_fields() ++ fields(consumer_opts);
fields(ssl_client_opts) ->
ssl_client_opts_fields();
fields(auth_username_password) ->
@ -349,7 +320,7 @@ fields(socket_opts) ->
boolean(),
#{
default => true,
importance => ?IMPORTANCE_HIDDEN,
importance => ?IMPORTANCE_LOW,
desc => ?DESC(socket_nodelay)
}
)},
@ -360,20 +331,6 @@ fields(socket_opts) ->
validator => fun emqx_schema:validate_tcp_keepalive/1
})}
];
fields(producer_opts) ->
[
%% Note: there's an implicit convention in `emqx_bridge' that,
%% for egress bridges with this config, the published messages
%% will be forwarded to such bridges.
{local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})},
{kafka,
mk(ref(producer_kafka_opts), #{
required => true,
desc => ?DESC(producer_kafka_opts),
validator => fun producer_strategy_key_validator/1
})},
{resource_opts, mk(ref(resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})}
];
fields(producer_kafka_opts) ->
[
{topic, mk(string(), #{required => true, desc => ?DESC(kafka_topic)})},
@ -571,7 +528,7 @@ fields(resource_opts) ->
CreationOpts = emqx_resource_schema:create_opts(_Overrides = []),
lists:filter(fun({Field, _}) -> lists:member(Field, SupportedFields) end, CreationOpts).
desc("config") ->
desc("config_connector") ->
?DESC("desc_config");
desc(resource_opts) ->
?DESC(emqx_resource_schema, "resource_opts");
@ -590,34 +547,86 @@ desc("post_" ++ Type) when
desc(kafka_producer_action) ->
?DESC("kafka_producer_action");
desc(Name) ->
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
?DESC(Name).
struct_names() ->
connector_config_fields() ->
[
auth_gssapi_kerberos,
auth_username_password,
kafka_message,
kafka_producer,
kafka_consumer,
producer_buffer,
producer_kafka_opts,
socket_opts,
producer_opts,
consumer_opts,
consumer_kafka_opts,
consumer_topic_mapping,
producer_kafka_ext_headers,
ssl_client_opts
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{description, emqx_schema:description_schema()},
{bootstrap_hosts,
mk(
binary(),
#{
required => true,
desc => ?DESC(bootstrap_hosts),
validator => emqx_schema:servers_validator(
host_opts(), _Required = true
)
}
)},
{connect_timeout,
mk(emqx_schema:timeout_duration_ms(), #{
default => <<"5s">>,
desc => ?DESC(connect_timeout)
})},
{min_metadata_refresh_interval,
mk(
emqx_schema:timeout_duration_ms(),
#{
default => <<"3s">>,
desc => ?DESC(min_metadata_refresh_interval)
}
)},
{metadata_request_timeout,
mk(emqx_schema:timeout_duration_ms(), #{
default => <<"5s">>,
desc => ?DESC(metadata_request_timeout)
})},
{authentication,
mk(hoconsc:union([none, ref(auth_username_password), ref(auth_gssapi_kerberos)]), #{
default => none, desc => ?DESC("authentication")
})},
{socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})},
{ssl, mk(ref(ssl_client_opts), #{})}
].
producer_opts() ->
[
%% Note: there's an implicit convention in `emqx_bridge' that,
%% for egress bridges with this config, the published messages
%% will be forwarded to such bridges.
{local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})},
parameters_field(),
{resource_opts, mk(ref(resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})}
].
%% Since e5.3.1, we want to rename the field 'kafka' to 'parameters'
%% Hoever we need to keep it backward compatible for generated schema json (version 0.1.0)
%% since schema is data for the 'schemas' API.
parameters_field() ->
{Name, Alias} =
case get(emqx_bridge_schema_version) of
<<"0.1.0">> ->
{kafka, parameters};
_ ->
{parameters, kafka}
end,
{Name,
mk(ref(producer_kafka_opts), #{
required => true,
aliases => [Alias],
desc => ?DESC(producer_kafka_opts),
validator => fun producer_strategy_key_validator/1
})}.
%% -------------------------------------------------------------------------------------------------
%% internal
type_field(BridgeV2Type) when BridgeV2Type =:= "connector"; BridgeV2Type =:= "bridge_v2" ->
{type, mk(enum([kafka_producer]), #{required => true, desc => ?DESC("desc_type")})};
type_field(_) ->
{type,
mk(enum([kafka_consumer, kafka, kafka_producer]), #{
%% 'kafka' is kept for backward compatibility
mk(enum([kafka, kafka_producer, kafka_consumer]), #{
required => true, desc => ?DESC("desc_type")
})}.
@ -632,17 +641,23 @@ kafka_producer_converter(undefined, _HoconOpts) ->
kafka_producer_converter(
#{<<"producer">> := OldOpts0, <<"bootstrap_hosts">> := _} = Config0, _HoconOpts
) ->
%% old schema
%% prior to e5.0.2
MQTTOpts = maps:get(<<"mqtt">>, OldOpts0, #{}),
LocalTopic = maps:get(<<"topic">>, MQTTOpts, undefined),
KafkaOpts = maps:get(<<"kafka">>, OldOpts0),
Config = maps:without([<<"producer">>], Config0),
case LocalTopic =:= undefined of
true ->
Config#{<<"kafka">> => KafkaOpts};
Config#{<<"parameters">> => KafkaOpts};
false ->
Config#{<<"kafka">> => KafkaOpts, <<"local_topic">> => LocalTopic}
Config#{<<"parameters">> => KafkaOpts, <<"local_topic">> => LocalTopic}
end;
kafka_producer_converter(
#{<<"kafka">> := _} = Config0, _HoconOpts
) ->
%% from e5.0.2 to e5.3.0
{KafkaOpts, Config} = maps:take(<<"kafka">>, Config0),
Config#{<<"parameters">> => KafkaOpts};
kafka_producer_converter(Config, _HoconOpts) ->
%% new schema
Config.

View File

@ -35,7 +35,7 @@
-define(kafka_client_id, kafka_client_id).
-define(kafka_producers, kafka_producers).
query_mode(#{kafka := #{query_mode := sync}}) ->
query_mode(#{parameters := #{query_mode := sync}}) ->
simple_sync_internal_buffer;
query_mode(_) ->
simple_async_internal_buffer.
@ -63,6 +63,11 @@ tr_config(_Key, Value) ->
%% @doc Config schema is defined in emqx_bridge_kafka.
on_start(InstId, Config) ->
?SLOG(debug, #{
msg => "kafka_client_starting",
instance_id => InstId,
config => emqx_utils:redact(Config)
}),
C = fun(Key) -> check_config(Key, Config) end,
Hosts = C(bootstrap_hosts),
ClientConfig = #{
@ -74,36 +79,8 @@ on_start(InstId, Config) ->
ssl => C(ssl)
},
ClientId = InstId,
ok = emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId),
case wolff:ensure_supervised_client(ClientId, Hosts, ClientConfig) of
{ok, _} ->
case wolff_client_sup:find_client(ClientId) of
{ok, Pid} ->
case wolff_client:check_connectivity(Pid) of
ok ->
ok;
{error, Error} ->
deallocate_client(ClientId),
throw({failed_to_connect, Error})
end;
{error, Reason} ->
deallocate_client(ClientId),
throw({failed_to_find_created_client, Reason})
end,
?SLOG(info, #{
msg => "kafka_client_started",
instance_id => InstId,
kafka_hosts => Hosts
});
{error, Reason} ->
?SLOG(error, #{
msg => failed_to_start_kafka_client,
instance_id => InstId,
kafka_hosts => Hosts,
reason => Reason
}),
throw(failed_to_start_kafka_client)
end,
emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId),
ok = ensure_client(ClientId, Hosts, ClientConfig),
%% Check if this is a dry run
{ok, #{
client_id => ClientId,
@ -134,7 +111,7 @@ create_producers_for_bridge_v2(
ClientId,
#{
bridge_type := BridgeType,
kafka := KafkaConfig
parameters := KafkaConfig
}
) ->
#{
@ -156,7 +133,7 @@ create_producers_for_bridge_v2(
end,
ok = check_topic_and_leader_connections(ClientId, KafkaTopic),
WolffProducerConfig = producers_config(
BridgeType, BridgeName, ClientId, KafkaConfig, IsDryRun, BridgeV2Id
BridgeType, BridgeName, KafkaConfig, IsDryRun, BridgeV2Id
),
case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of
{ok, Producers} ->
@ -215,6 +192,32 @@ on_stop(InstanceId, _State) ->
?tp(kafka_producer_stopped, #{instance_id => InstanceId}),
ok.
ensure_client(ClientId, Hosts, ClientConfig) ->
case wolff_client_sup:find_client(ClientId) of
{ok, _Pid} ->
ok;
{error, no_such_client} ->
case wolff:ensure_supervised_client(ClientId, Hosts, ClientConfig) of
{ok, _} ->
?SLOG(info, #{
msg => "kafka_client_started",
client_id => ClientId,
kafka_hosts => Hosts
});
{error, Reason} ->
?SLOG(error, #{
msg => failed_to_start_kafka_client,
client_id => ClientId,
kafka_hosts => Hosts,
reason => Reason
}),
throw(failed_to_start_kafka_client)
end;
{error, Reason} ->
deallocate_client(ClientId),
throw({failed_to_find_created_client, Reason})
end.
deallocate_client(ClientId) ->
_ = with_log_at_error(
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
@ -554,11 +557,8 @@ check_topic_status(ClientId, WolffClientPid, KafkaTopic) ->
ok ->
ok;
{error, unknown_topic_or_partition} ->
throw(#{
error => unknown_kafka_topic,
kafka_client_id => ClientId,
kafka_topic => KafkaTopic
});
Msg = iolist_to_binary([<<"Unknown topic or partition: ">>, KafkaTopic]),
throw({unhealthy_target, Msg});
{error, Reason} ->
throw(#{
error => failed_to_check_topic_status,
@ -573,7 +573,7 @@ ssl(#{enable := true} = SSL) ->
ssl(_) ->
false.
producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun, BridgeV2Id) ->
producers_config(BridgeType, BridgeName, Input, IsDryRun, BridgeV2Id) ->
#{
max_batch_bytes := MaxBatchBytes,
compression := Compression,
@ -596,8 +596,8 @@ producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun, BridgeV2Id)
{OffloadMode, ReplayqDir} =
case BufferMode of
memory -> {false, false};
disk -> {false, replayq_dir(ClientId)};
hybrid -> {true, replayq_dir(ClientId)}
disk -> {false, replayq_dir(BridgeType, BridgeName)};
hybrid -> {true, replayq_dir(BridgeType, BridgeName)}
end,
#{
name => make_producer_name(BridgeType, BridgeName, IsDryRun),
@ -620,8 +620,11 @@ producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun, BridgeV2Id)
partitioner(random) -> random;
partitioner(key_dispatch) -> first_key_dispatch.
replayq_dir(ClientId) ->
filename:join([emqx:data_dir(), "kafka", ClientId]).
replayq_dir(BridgeType, BridgeName) ->
DirName = iolist_to_binary([
emqx_bridge_lib:downgrade_type(BridgeType), ":", BridgeName, ":", atom_to_list(node())
]),
filename:join([emqx:data_dir(), "kafka", DirName]).
%% Producer name must be an atom which will be used as a ETS table name for
%% partition worker lookup.

View File

@ -698,6 +698,20 @@ create_bridge(Config, Overrides) ->
KafkaConfig = emqx_utils_maps:deep_merge(KafkaConfig0, Overrides),
emqx_bridge:create(Type, Name, KafkaConfig).
create_bridge_wait_for_balance(Config) ->
setup_group_subscriber_spy(self()),
try
Res = create_bridge(Config),
receive
{kafka_assignment, _, _} ->
Res
after 20_000 ->
ct:fail("timed out waiting for kafka assignment")
end
after
kill_group_subscriber_spy()
end.
delete_bridge(Config) ->
Type = ?BRIDGE_TYPE_BIN,
Name = ?config(kafka_name, Config),
@ -1020,31 +1034,37 @@ reconstruct_assignments_from_events(KafkaTopic, Events0, Acc0) ->
setup_group_subscriber_spy_fn() ->
TestPid = self(),
fun() ->
ok = meck:new(brod_group_subscriber_v2, [
passthrough, no_link, no_history, non_strict
]),
ok = meck:expect(
brod_group_subscriber_v2,
assignments_received,
fun(Pid, MemberId, GenerationId, TopicAssignments) ->
?tp(
kafka_assignment,
#{
node => node(),
pid => Pid,
member_id => MemberId,
generation_id => GenerationId,
topic_assignments => TopicAssignments
}
),
TestPid !
{kafka_assignment, node(), {Pid, MemberId, GenerationId, TopicAssignments}},
meck:passthrough([Pid, MemberId, GenerationId, TopicAssignments])
end
),
ok
setup_group_subscriber_spy(TestPid)
end.
setup_group_subscriber_spy(TestPid) ->
ok = meck:new(brod_group_subscriber_v2, [
passthrough, no_link, no_history, non_strict
]),
ok = meck:expect(
brod_group_subscriber_v2,
assignments_received,
fun(Pid, MemberId, GenerationId, TopicAssignments) ->
?tp(
kafka_assignment,
#{
node => node(),
pid => Pid,
member_id => MemberId,
generation_id => GenerationId,
topic_assignments => TopicAssignments
}
),
TestPid !
{kafka_assignment, node(), {Pid, MemberId, GenerationId, TopicAssignments}},
meck:passthrough([Pid, MemberId, GenerationId, TopicAssignments])
end
),
ok.
kill_group_subscriber_spy() ->
meck:unload(brod_group_subscriber_v2).
wait_for_cluster_rpc(Node) ->
%% need to wait until the config handler is ready after
%% restarting during the cluster join.
@ -1702,10 +1722,7 @@ t_dynamic_mqtt_topic(Config) ->
MQTTTopic = emqx_topic:join([KafkaTopic, '#']),
?check_trace(
begin
?assertMatch(
{ok, _},
create_bridge(Config)
),
?assertMatch({ok, _}, create_bridge_wait_for_balance(Config)),
wait_until_subscribers_are_ready(NPartitions, 40_000),
ping_until_healthy(Config, _Period = 1_500, _Timeout = 24_000),
{ok, C} = emqtt:start_link(),

View File

@ -474,7 +474,11 @@ t_failed_creation_then_fix(Config) ->
%% before throwing, it should cleanup the client process. we
%% retry because the supervisor might need some time to really
%% remove it from its tree.
?retry(50, 10, ?assertEqual([], supervisor:which_children(wolff_client_sup))),
?retry(
_Sleep0 = 50,
_Attempts0 = 10,
?assertEqual([], supervisor:which_children(wolff_producers_sup))
),
%% must succeed with correct config
{ok, #{config := _ValidConfigAtom1}} = emqx_bridge:create(
list_to_atom(Type), list_to_atom(Name), ValidConf
@ -570,7 +574,15 @@ t_nonexistent_topic(_Config) ->
erlang:list_to_atom(Type), erlang:list_to_atom(Name), Conf
),
% TODO: make sure the user facing APIs for Bridge V1 also get this error
{error, _} = emqx_bridge_v2:health_check(?BRIDGE_TYPE_V2, list_to_atom(Name)),
?assertMatch(
#{
status := disconnected,
error := {unhealthy_target, <<"Unknown topic or partition: undefined-test-topic">>}
},
emqx_bridge_v2:health_check(
?BRIDGE_TYPE_V2, list_to_atom(Name)
)
),
ok = emqx_bridge:remove(list_to_atom(Type), list_to_atom(Name)),
delete_all_bridges(),
ok.

View File

@ -6,6 +6,10 @@
-include_lib("eunit/include/eunit.hrl").
-export([atoms/0]).
%% ensure atoms exist
atoms() -> [myproducer, my_consumer].
%%===========================================================================
%% Test cases
%%===========================================================================
@ -14,7 +18,6 @@ kafka_producer_test() ->
Conf1 = parse(kafka_producer_old_hocon(_WithLocalTopic0 = false)),
Conf2 = parse(kafka_producer_old_hocon(_WithLocalTopic1 = true)),
Conf3 = parse(kafka_producer_new_hocon()),
?assertMatch(
#{
<<"bridges">> :=
@ -22,7 +25,7 @@ kafka_producer_test() ->
<<"kafka_producer">> :=
#{
<<"myproducer">> :=
#{<<"kafka">> := #{}}
#{<<"parameters">> := #{}}
}
}
},
@ -49,7 +52,7 @@ kafka_producer_test() ->
#{
<<"myproducer">> :=
#{
<<"kafka">> := #{},
<<"parameters">> := #{},
<<"local_topic">> := <<"mqtt/local">>
}
}
@ -65,7 +68,7 @@ kafka_producer_test() ->
#{
<<"myproducer">> :=
#{
<<"kafka">> := #{},
<<"parameters">> := #{},
<<"local_topic">> := <<"mqtt/local">>
}
}
@ -156,12 +159,14 @@ message_key_dispatch_validations_test() ->
<<"message">> := #{<<"key">> := <<>>}
}
},
emqx_utils_maps:deep_get([<<"bridges">>, <<"kafka">>, atom_to_binary(Name)], Conf)
emqx_utils_maps:deep_get(
[<<"bridges">>, <<"kafka">>, atom_to_binary(Name)], Conf
)
),
?assertThrow(
{_, [
#{
path := "bridges.kafka_producer.myproducer.kafka",
path := "bridges.kafka_producer.myproducer.parameters",
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
}
]},
@ -170,7 +175,7 @@ message_key_dispatch_validations_test() ->
?assertThrow(
{_, [
#{
path := "bridges.kafka_producer.myproducer.kafka",
path := "bridges.kafka_producer.myproducer.parameters",
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
}
]},
@ -181,8 +186,6 @@ message_key_dispatch_validations_test() ->
tcp_keepalive_validation_test_() ->
ProducerConf = parse(kafka_producer_new_hocon()),
ConsumerConf = parse(kafka_consumer_hocon()),
%% ensure atoms exist
_ = [my_producer, my_consumer],
test_keepalive_validation([<<"kafka">>, <<"myproducer">>], ProducerConf) ++
test_keepalive_validation([<<"kafka_consumer">>, <<"my_consumer">>], ConsumerConf).
@ -358,3 +361,10 @@ bridges.kafka_consumer.my_consumer {
}
}
""".
%% assert compatibility
bridge_schema_json_test() ->
JSON = iolist_to_binary(emqx_conf:bridge_schema_json()),
Map = emqx_utils_json:decode(JSON),
Path = [<<"components">>, <<"schemas">>, <<"bridge_kafka.post_producer">>, <<"properties">>],
?assertMatch(#{<<"kafka">> := _}, emqx_utils_maps:deep_get(Path, Map)).

View File

@ -119,7 +119,7 @@ t_health_check(_) ->
ConnectorConfig = connector_config(),
{ok, _} = emqx_connector:create(?TYPE, test_connector3, ConnectorConfig),
{ok, _} = emqx_bridge_v2:create(?TYPE, test_bridge_v2, BridgeV2Config),
connected = emqx_bridge_v2:health_check(?TYPE, test_bridge_v2),
#{status := connected} = emqx_bridge_v2:health_check(?TYPE, test_bridge_v2),
ok = emqx_bridge_v2:remove(?TYPE, test_bridge_v2),
%% Check behaviour when bridge does not exist
{error, bridge_not_found} = emqx_bridge_v2:health_check(?TYPE, test_bridge_v2),
@ -140,6 +140,33 @@ t_local_topic(_) ->
ok = emqx_connector:remove(?TYPE, test_connector),
ok.
t_unknown_topic(_Config) ->
ConnectorName = <<"test_connector">>,
BridgeName = <<"test_bridge">>,
BridgeV2Config0 = bridge_v2_config(ConnectorName),
BridgeV2Config = emqx_utils_maps:deep_put(
[<<"kafka">>, <<"topic">>],
BridgeV2Config0,
<<"nonexistent">>
),
ConnectorConfig = connector_config(),
{ok, _} = emqx_connector:create(?TYPE, ConnectorName, ConnectorConfig),
{ok, _} = emqx_bridge_v2:create(?TYPE, BridgeName, BridgeV2Config),
Payload = <<"will be dropped">>,
emqx:publish(emqx_message:make(<<"kafka_t/local">>, Payload)),
BridgeV2Id = emqx_bridge_v2:id(?TYPE, BridgeName),
?retry(
_Sleep0 = 50,
_Attempts0 = 100,
begin
?assertEqual(1, emqx_resource_metrics:matched_get(BridgeV2Id)),
?assertEqual(1, emqx_resource_metrics:dropped_get(BridgeV2Id)),
?assertEqual(1, emqx_resource_metrics:dropped_resource_stopped_get(BridgeV2Id)),
ok
end
),
ok.
check_send_message_with_bridge(BridgeName) ->
%% ######################################
%% Create Kafka message

View File

@ -188,8 +188,14 @@ hotconf_schema_json() ->
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
bridge_schema_json() ->
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>},
gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo).
Version = <<"0.1.0">>,
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => Version},
put(emqx_bridge_schema_version, Version),
try
gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo)
after
erase(emqx_bridge_schema_version)
end.
%% TODO: remove it and also remove hocon_md.erl and friends.
%% markdown generation from schema is a failure and we are moving to an interactive

View File

@ -97,7 +97,7 @@ get_response_body_schema() ->
param_path_operation_cluster() ->
{operation,
mk(
enum([start, stop, restart]),
enum([start]),
#{
in => path,
required => true,
@ -109,7 +109,7 @@ param_path_operation_cluster() ->
param_path_operation_on_node() ->
{operation,
mk(
enum([start, stop, restart]),
enum([start]),
#{
in => path,
required => true,
@ -266,7 +266,7 @@ schema("/connectors/:id/:operation") ->
'operationId' => '/connectors/:id/:operation',
post => #{
tags => [<<"connectors">>],
summary => <<"Stop, start or restart connector">>,
summary => <<"Manually start a connector">>,
description => ?DESC("desc_api7"),
parameters => [
param_path_id(),
@ -288,7 +288,7 @@ schema("/nodes/:node/connectors/:id/:operation") ->
'operationId' => '/nodes/:node/connectors/:id/:operation',
post => #{
tags => [<<"connectors">>],
summary => <<"Stop, start or restart connector">>,
summary => <<"Manually start a connector on a given node">>,
description => ?DESC("desc_api8"),
parameters => [
param_path_node(),
@ -453,9 +453,30 @@ update_connector(ConnectorType, ConnectorName, Conf) ->
create_or_update_connector(ConnectorType, ConnectorName, Conf, 200).
create_or_update_connector(ConnectorType, ConnectorName, Conf, HttpStatusCode) ->
Check =
try
is_binary(ConnectorType) andalso emqx_resource:validate_type(ConnectorType),
ok = emqx_resource:validate_name(ConnectorName)
catch
throw:Error ->
?BAD_REQUEST(map_to_json(Error))
end,
case Check of
ok ->
do_create_or_update_connector(ConnectorType, ConnectorName, Conf, HttpStatusCode);
BadRequest ->
BadRequest
end.
do_create_or_update_connector(ConnectorType, ConnectorName, Conf, HttpStatusCode) ->
case emqx_connector:create(ConnectorType, ConnectorName, Conf) of
{ok, _} ->
lookup_from_all_nodes(ConnectorType, ConnectorName, HttpStatusCode);
{error, {PreOrPostConfigUpdate, _HandlerMod, Reason}} when
PreOrPostConfigUpdate =:= pre_config_update;
PreOrPostConfigUpdate =:= post_config_update
->
?BAD_REQUEST(map_to_json(redact(Reason)));
{error, Reason} when is_map(Reason) ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.
@ -531,12 +552,8 @@ is_enabled_connector(ConnectorType, ConnectorName) ->
throw(not_found)
end.
operation_func(all, restart) -> restart_connectors_to_all_nodes;
operation_func(all, start) -> start_connectors_to_all_nodes;
operation_func(all, stop) -> stop_connectors_to_all_nodes;
operation_func(_Node, restart) -> restart_connector_to_node;
operation_func(_Node, start) -> start_connector_to_node;
operation_func(_Node, stop) -> stop_connector_to_node.
operation_func(_Node, start) -> start_connector_to_node.
enable_func(true) -> enable;
enable_func(false) -> disable.

View File

@ -95,20 +95,14 @@ connector_id(ConnectorType, ConnectorName) ->
parse_connector_id(ConnectorId) ->
parse_connector_id(ConnectorId, #{atom_name => true}).
-spec parse_connector_id(list() | binary() | atom(), #{atom_name => boolean()}) ->
-spec parse_connector_id(binary() | atom(), #{atom_name => boolean()}) ->
{atom(), atom() | binary()}.
parse_connector_id(<<"connector:", ConnectorId/binary>>, Opts) ->
parse_connector_id(ConnectorId, Opts);
parse_connector_id(<<?TEST_ID_PREFIX, ConnectorId/binary>>, Opts) ->
parse_connector_id(ConnectorId, Opts);
parse_connector_id(ConnectorId, Opts) ->
case string:split(bin(ConnectorId), ":", all) of
[Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
[_, Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
_ ->
invalid_data(
<<"should be of pattern {type}:{name} or connector:{type}:{name}, but got ",
ConnectorId/binary>>
)
end.
emqx_resource:parse_resource_id(ConnectorId, Opts).
connector_hookpoint(ConnectorId) ->
<<"$connectors/", (bin(ConnectorId))/binary>>.
@ -118,45 +112,6 @@ connector_hookpoint_to_connector_id(?BRIDGE_HOOKPOINT(ConnectorId)) ->
connector_hookpoint_to_connector_id(_) ->
{error, bad_connector_hookpoint}.
validate_name(Name0, Opts) ->
Name = unicode:characters_to_list(Name0, utf8),
case is_list(Name) andalso Name =/= [] of
true ->
case lists:all(fun is_id_char/1, Name) of
true ->
case maps:get(atom_name, Opts, true) of
% NOTE
% Rule may be created before connector, thus not `list_to_existing_atom/1`,
% also it is infrequent user input anyway.
true -> list_to_atom(Name);
false -> Name0
end;
false ->
invalid_data(<<"bad name: ", Name0/binary>>)
end;
false ->
invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end.
-spec invalid_data(binary()) -> no_return().
invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true;
is_id_char(C) when C >= $A andalso C =< $Z -> true;
is_id_char($_) -> true;
is_id_char($-) -> true;
is_id_char($.) -> true;
is_id_char(_) -> false.
to_type_atom(Type) ->
try
erlang:binary_to_existing_atom(Type, utf8)
catch
_:_ ->
invalid_data(<<"unknown connector type: ", Type/binary>>)
end.
restart(Type, Name) ->
emqx_resource:restart(resource_id(Type, Name)).
@ -416,6 +371,13 @@ parse_url(Url) ->
invalid_data(<<"Missing scheme in URL: ", Url/binary>>)
end.
-spec invalid_data(binary()) -> no_return().
invalid_data(Msg) ->
throw(#{
kind => validation_error,
reason => Msg
}).
bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).

View File

@ -22,13 +22,9 @@
introduced_in/0,
list_connectors_on_nodes/1,
restart_connector_to_node/3,
start_connector_to_node/3,
stop_connector_to_node/3,
lookup_from_all_nodes/3,
restart_connectors_to_all_nodes/3,
start_connectors_to_all_nodes/3,
stop_connectors_to_all_nodes/3
start_connector_to_node/3,
start_connectors_to_all_nodes/3
]).
-include_lib("emqx/include/bpapi.hrl").
@ -45,13 +41,13 @@ list_connectors_on_nodes(Nodes) ->
-type key() :: atom() | binary() | [byte()].
-spec restart_connector_to_node(node(), key(), key()) ->
term().
restart_connector_to_node(Node, ConnectorType, ConnectorName) ->
rpc:call(
Node,
emqx_connector_resource,
restart,
-spec lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
lookup_from_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_api,
lookup_from_local_node,
[ConnectorType, ConnectorName],
?TIMEOUT
).
@ -67,28 +63,6 @@ start_connector_to_node(Node, ConnectorType, ConnectorName) ->
?TIMEOUT
).
-spec stop_connector_to_node(node(), key(), key()) ->
term().
stop_connector_to_node(Node, ConnectorType, ConnectorName) ->
rpc:call(
Node,
emqx_connector_resource,
stop,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec restart_connectors_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
restart_connectors_to_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_resource,
restart,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec start_connectors_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
start_connectors_to_all_nodes(Nodes, ConnectorType, ConnectorName) ->
@ -99,25 +73,3 @@ start_connectors_to_all_nodes(Nodes, ConnectorType, ConnectorName) ->
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec stop_connectors_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
stop_connectors_to_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_resource,
stop,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
lookup_from_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_api,
lookup_from_local_node,
[ConnectorType, ConnectorName],
?TIMEOUT
).

View File

@ -43,7 +43,7 @@ connector_structs() ->
[
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, "config")),
hoconsc:map(name, ref(emqx_bridge_kafka, "config_connector")),
#{
desc => <<"Kafka Connector Config">>,
required => false

View File

@ -18,6 +18,7 @@
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("eunit/include/eunit.hrl").
-import(hoconsc, [mk/2, ref/2]).
@ -27,6 +28,8 @@
-export([get_response/0, put_request/0, post_request/0]).
-export([connector_type_to_bridge_types/1]).
-if(?EMQX_RELEASE_EDITION == ee).
enterprise_api_schemas(Method) ->
%% We *must* do this to ensure the module is really loaded, especially when we use
@ -59,7 +62,7 @@ enterprise_fields_connectors() -> [].
connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer];
connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_producer].
actions_config_name() -> <<"bridges_v2">>.
actions_config_name() -> <<"actions">>.
has_connector_field(BridgeConf, ConnectorFields) ->
lists:any(
@ -69,21 +72,31 @@ has_connector_field(BridgeConf, ConnectorFields) ->
ConnectorFields
).
bridge_configs_to_transform(_BridgeType, [] = _BridgeNameBridgeConfList, _ConnectorFields) ->
bridge_configs_to_transform(
_BridgeType, [] = _BridgeNameBridgeConfList, _ConnectorFields, _RawConfig
) ->
[];
bridge_configs_to_transform(BridgeType, [{BridgeName, BridgeConf} | Rest], ConnectorFields) ->
bridge_configs_to_transform(
BridgeType, [{BridgeName, BridgeConf} | Rest], ConnectorFields, RawConfig
) ->
case has_connector_field(BridgeConf, ConnectorFields) of
true ->
PreviousRawConfig =
emqx_utils_maps:deep_get(
[<<"actions">>, to_bin(BridgeType), to_bin(BridgeName)],
RawConfig,
undefined
),
[
{BridgeType, BridgeName, BridgeConf, ConnectorFields}
| bridge_configs_to_transform(BridgeType, Rest, ConnectorFields)
{BridgeType, BridgeName, BridgeConf, ConnectorFields, PreviousRawConfig}
| bridge_configs_to_transform(BridgeType, Rest, ConnectorFields, RawConfig)
];
false ->
bridge_configs_to_transform(BridgeType, Rest, ConnectorFields)
bridge_configs_to_transform(BridgeType, Rest, ConnectorFields, RawConfig)
end.
split_bridge_to_connector_and_action(
{ConnectorsMap, {BridgeType, BridgeName, BridgeConf, ConnectorFields}}
{ConnectorsMap, {BridgeType, BridgeName, BridgeConf, ConnectorFields, PreviousRawConfig}}
) ->
%% Get connector fields from bridge config
ConnectorMap = lists:foldl(
@ -120,8 +133,12 @@ split_bridge_to_connector_and_action(
BridgeConf,
ConnectorFields
),
%% Generate a connector name
ConnectorName = generate_connector_name(ConnectorsMap, BridgeName, 0),
%% Generate a connector name, if needed. Avoid doing so if there was a previous config.
ConnectorName =
case PreviousRawConfig of
#{<<"connector">> := ConnectorName0} -> ConnectorName0;
_ -> generate_connector_name(ConnectorsMap, BridgeName, 0)
end,
%% Add connector field to action map
ActionMap = maps:put(<<"connector">>, ConnectorName, ActionMap0),
{BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}.
@ -143,27 +160,24 @@ generate_connector_name(ConnectorsMap, BridgeName, Attempt) ->
end.
transform_old_style_bridges_to_connector_and_actions_of_type(
{ConnectorType, #{type := {map, name, {ref, ConnectorConfSchemaMod, ConnectorConfSchemaName}}}},
{ConnectorType, #{type := ?MAP(_Name, ?R_REF(ConnectorConfSchemaMod, ConnectorConfSchemaName))}},
RawConfig
) ->
ConnectorFields = ConnectorConfSchemaMod:fields(ConnectorConfSchemaName),
BridgeTypes = connector_type_to_bridge_types(ConnectorType),
BridgeTypes = ?MODULE:connector_type_to_bridge_types(ConnectorType),
BridgesConfMap = maps:get(<<"bridges">>, RawConfig, #{}),
ConnectorsConfMap = maps:get(<<"connectors">>, RawConfig, #{}),
BridgeConfigsToTransform1 =
lists:foldl(
fun(BridgeType, ToTranformSoFar) ->
BridgeConfigsToTransform =
lists:flatmap(
fun(BridgeType) ->
BridgeNameToBridgeMap = maps:get(to_bin(BridgeType), BridgesConfMap, #{}),
BridgeNameBridgeConfList = maps:to_list(BridgeNameToBridgeMap),
NewToTransform = bridge_configs_to_transform(
BridgeType, BridgeNameBridgeConfList, ConnectorFields
),
[NewToTransform, ToTranformSoFar]
bridge_configs_to_transform(
BridgeType, BridgeNameBridgeConfList, ConnectorFields, RawConfig
)
end,
[],
BridgeTypes
),
BridgeConfigsToTransform = lists:flatten(BridgeConfigsToTransform1),
ConnectorsWithTypeMap = maps:get(to_bin(ConnectorType), ConnectorsConfMap, #{}),
BridgeConfigsToTransformWithConnectorConf = lists:zip(
lists:duplicate(length(BridgeConfigsToTransform), ConnectorsWithTypeMap),
@ -187,7 +201,7 @@ transform_old_style_bridges_to_connector_and_actions_of_type(
[<<"bridges">>, to_bin(BridgeType), BridgeName],
RawConfigSoFar1
),
%% Add bridge_v2
%% Add action
RawConfigSoFar3 = emqx_utils_maps:deep_put(
[actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName],
RawConfigSoFar2,
@ -200,7 +214,7 @@ transform_old_style_bridges_to_connector_and_actions_of_type(
).
transform_bridges_v1_to_connectors_and_bridges_v2(RawConfig) ->
ConnectorFields = fields(connectors),
ConnectorFields = ?MODULE:fields(connectors),
NewRawConf = lists:foldl(
fun transform_old_style_bridges_to_connector_and_actions_of_type/2,
RawConfig,
@ -292,3 +306,44 @@ to_bin(Bin) when is_binary(Bin) ->
Bin;
to_bin(Something) ->
Something.
-ifdef(TEST).
-include_lib("hocon/include/hocon_types.hrl").
schema_homogeneous_test() ->
case
lists:filtermap(
fun({_Name, Schema}) ->
is_bad_schema(Schema)
end,
fields(connectors)
)
of
[] ->
ok;
List ->
throw(List)
end.
is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) ->
Fields = Module:fields(TypeName),
ExpectedFieldNames = common_field_names(),
MissingFileds = lists:filter(
fun(Name) -> lists:keyfind(Name, 1, Fields) =:= false end, ExpectedFieldNames
),
case MissingFileds of
[] ->
false;
_ ->
{true, #{
schema_modle => Module,
type_name => TypeName,
missing_fields => MissingFileds
}}
end.
common_field_names() ->
[
enable, description
].
-endif.

View File

@ -22,7 +22,7 @@
-include_lib("common_test/include/ct.hrl").
-define(START_APPS, [emqx, emqx_conf, emqx_connector]).
-define(CONNECTOR, dummy_connector_impl).
-define(CONNECTOR, emqx_connector_dummy_impl).
all() ->
emqx_common_test_helpers:all(?MODULE).

View File

@ -172,8 +172,8 @@ mk_cluster(Name, Config, Opts) ->
Node2Apps = ?APPSPECS,
emqx_cth_cluster:start(
[
{emqx_bridge_api_SUITE_1, Opts#{role => core, apps => Node1Apps}},
{emqx_bridge_api_SUITE_2, Opts#{role => core, apps => Node2Apps}}
{emqx_connector_api_SUITE_1, Opts#{role => core, apps => Node1Apps}},
{emqx_connector_api_SUITE_2, Opts#{role => core, apps => Node2Apps}}
],
#{work_dir => filename:join(?config(priv_dir, Config), Name)}
).
@ -396,13 +396,13 @@ t_start_connector_unknown_node(Config) ->
Config
).
t_start_stop_connectors_node(Config) ->
do_start_stop_connectors(node, Config).
t_start_connector_node(Config) ->
do_start_connector(node, Config).
t_start_stop_connectors_cluster(Config) ->
do_start_stop_connectors(cluster, Config).
t_start_connector_cluster(Config) ->
do_start_connector(cluster, Config).
do_start_stop_connectors(TestType, Config) ->
do_start_connector(TestType, Config) ->
%% assert we there's no connectors at first
{ok, 200, []} = request_json(get, uri(["connectors"]), Config),
@ -424,6 +424,14 @@ do_start_stop_connectors(TestType, Config) ->
),
ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name),
%% Starting a healthy connector shouldn't do any harm
{ok, 204, <<>>} = request(post, {operation, TestType, start, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
ExpectedStatus =
case ?config(group, Config) of
cluster when TestType == node ->
@ -433,7 +441,23 @@ do_start_stop_connectors(TestType, Config) ->
end,
%% stop it
{ok, 204, <<>>} = request(post, {operation, TestType, stop, ConnectorID}, Config),
case ?config(group, Config) of
cluster ->
case TestType of
node ->
Node = ?config(node, Config),
ok = rpc:call(
Node, emqx_connector_resource, stop, [?CONNECTOR_TYPE, Name], 500
);
cluster ->
Nodes = ?config(cluster_nodes, Config),
[{ok, ok}, {ok, ok}] = erpc:multicall(
Nodes, emqx_connector_resource, stop, [?CONNECTOR_TYPE, Name], 500
)
end;
_ ->
ok = emqx_connector_resource:stop(?CONNECTOR_TYPE, Name)
end,
?assertMatch(
{ok, 200, #{<<"status">> := ExpectedStatus}},
request_json(get, uri(["connectors", ConnectorID]), Config)
@ -444,27 +468,8 @@ do_start_stop_connectors(TestType, Config) ->
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% start a started connector
{ok, 204, <<>>} = request(post, {operation, TestType, start, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% restart an already started connector
{ok, 204, <<>>} = request(post, {operation, TestType, restart, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% stop it again
{ok, 204, <<>>} = request(post, {operation, TestType, stop, ConnectorID}, Config),
%% restart a stopped connector
{ok, 204, <<>>} = request(post, {operation, TestType, restart, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% test invalid op
{ok, 400, _} = request(post, {operation, TestType, invalidop, ConnectorID}, Config),
%% delete the connector
@ -506,43 +511,6 @@ do_start_stop_connectors(TestType, Config) ->
ok = gen_tcp:close(Sock),
ok.
t_start_stop_inconsistent_connector_node(Config) ->
start_stop_inconsistent_connector(node, Config).
t_start_stop_inconsistent_connector_cluster(Config) ->
start_stop_inconsistent_connector(cluster, Config).
start_stop_inconsistent_connector(Type, Config) ->
Node = ?config(node, Config),
erpc:call(Node, fun() ->
meck:new(emqx_connector_resource, [passthrough, no_link]),
meck:expect(
emqx_connector_resource,
stop,
fun
(_, <<"connector_not_found">>) -> {error, not_found};
(ConnectorType, Name) -> meck:passthrough([ConnectorType, Name])
end
)
end),
emqx_common_test_helpers:on_exit(fun() ->
erpc:call(Node, fun() ->
meck:unload([emqx_connector_resource])
end)
end),
{ok, 201, _Connector} = request(
post,
uri(["connectors"]),
?KAFKA_CONNECTOR(<<"connector_not_found">>),
Config
),
{ok, 503, _} = request(
post, {operation, Type, stop, <<"kafka_producer:connector_not_found">>}, Config
).
t_enable_disable_connectors(Config) ->
%% assert we there's no connectors at first
{ok, 200, []} = request_json(get, uri(["connectors"]), Config),

View File

@ -0,0 +1,31 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% this module is only intended to be mocked
-module(emqx_connector_dummy_impl).
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_add_channel/4,
on_get_channel_status/3
]).
callback_mode() -> error(unexpected).
on_start(_, _) -> error(unexpected).
on_stop(_, _) -> error(unexpected).
on_add_channel(_, _, _, _) -> error(unexpected).
on_get_channel_status(_, _, _) -> error(unexpected).

View File

@ -45,7 +45,7 @@ paths() ->
%% This is a rather hidden API, so we don't need to add translations for the description.
schema("/schemas/:name") ->
Schemas = [hotconf, bridges, bridges_v2, connectors],
Schemas = [hotconf, bridges, actions, connectors],
#{
'operationId' => get_schema,
get => #{
@ -79,13 +79,14 @@ gen_schema(hotconf) ->
emqx_conf:hotconf_schema_json();
gen_schema(bridges) ->
emqx_conf:bridge_schema_json();
gen_schema(bridges_v2) ->
bridge_v2_schema_json();
gen_schema(actions) ->
actions_schema_json();
gen_schema(connectors) ->
connectors_schema_json().
bridge_v2_schema_json() ->
SchemaInfo = #{title => <<"EMQX Data Bridge V2 API Schema">>, version => <<"0.1.0">>},
actions_schema_json() ->
SchemaInfo = #{title => <<"EMQX Data Actions API Schema">>, version => <<"0.1.0">>},
%% Note: this will be moved to `emqx_actions' application in the future.
gen_api_schema_json_iodata(emqx_bridge_v2_api, SchemaInfo).
connectors_schema_json() ->

View File

@ -33,6 +33,21 @@
-define(LOCAL_PROHIBITED, [halt, q]).
-define(REMOTE_PROHIBITED, [{erlang, halt}, {c, q}, {init, stop}, {init, restart}, {init, reboot}]).
-define(WARN_ONCE(Fn, Args),
case get(Fn) of
true ->
ok;
_ ->
case apply(Fn, Args) of
true ->
put(Fn, true),
ok;
false ->
ok
end
end
).
is_locked() ->
{ok, false} =/= application:get_env(?APP, ?IS_LOCKED).
@ -72,31 +87,31 @@ is_allowed(prohibited) -> false;
is_allowed(_) -> true.
limit_warning(MF, Args) ->
max_heap_size_warning(MF, Args),
max_args_warning(MF, Args).
?WARN_ONCE(fun max_heap_size_warning/2, [MF, Args]),
?WARN_ONCE(fun max_args_warning/2, [MF, Args]).
max_args_warning(MF, Args) ->
ArgsSize = erts_debug:flat_size(Args),
case ArgsSize < ?MAX_ARGS_SIZE of
case ArgsSize > ?MAX_ARGS_SIZE of
true ->
ok;
false ->
warning("[WARNING] current_args_size:~w, max_args_size:~w", [ArgsSize, ?MAX_ARGS_SIZE]),
?SLOG(warning, #{
msg => "execute_function_in_shell_max_args_size",
function => MF,
%%args => Args,
args_size => ArgsSize,
max_heap_size => ?MAX_ARGS_SIZE
})
max_heap_size => ?MAX_ARGS_SIZE,
pid => self()
}),
true;
false ->
false
end.
max_heap_size_warning(MF, Args) ->
{heap_size, HeapSize} = erlang:process_info(self(), heap_size),
case HeapSize < ?MAX_HEAP_SIZE of
case HeapSize > ?MAX_HEAP_SIZE of
true ->
ok;
false ->
warning("[WARNING] current_heap_size:~w, max_heap_size_warning:~w", [
HeapSize, ?MAX_HEAP_SIZE
]),
@ -105,8 +120,12 @@ max_heap_size_warning(MF, Args) ->
current_heap_size => HeapSize,
function => MF,
args => pp_args(Args),
max_heap_size => ?MAX_HEAP_SIZE
})
max_heap_size => ?MAX_HEAP_SIZE,
pid => self()
}),
true;
false ->
false
end.
log(_, {?MODULE, prompt_func}, [[{history, _}]]) ->

View File

@ -22,7 +22,7 @@
-type resource_spec() :: map().
-type resource_state() :: term().
-type resource_status() :: connected | disconnected | connecting | stopped.
-type channel_status() :: connected | connecting.
-type channel_status() :: connected | connecting | disconnected.
-type callback_mode() :: always_sync | async_if_possible.
-type query_mode() ::
simple_sync
@ -47,7 +47,7 @@
simple_query => boolean(),
reply_to => reply_fun(),
query_mode => query_mode(),
query_mode_cache_override => boolean()
connector_resource_id => resource_id()
}.
-type resource_data() :: #{
id := resource_id(),

View File

@ -139,6 +139,13 @@
-export([apply_reply_fun/2]).
%% common validations
-export([
parse_resource_id/2,
validate_type/1,
validate_name/1
]).
-export_type([
query_mode/0,
resource_id/0,
@ -200,6 +207,7 @@
-callback on_get_channel_status(resource_id(), channel_id(), resource_state()) ->
channel_status()
| {channel_status(), Reason :: term()}
| {error, term()}.
-callback query_mode(Config :: term()) -> query_mode().
@ -371,32 +379,32 @@ query(ResId, Request) ->
-spec query(resource_id(), Request :: term(), query_opts()) ->
Result :: term().
query(ResId, Request, Opts) ->
case get_query_mode_error(ResId, Opts) of
case emqx_resource_manager:get_query_mode_and_last_error(ResId, Opts) of
{error, _} = ErrorTuple ->
ErrorTuple;
{_, unhealthy_target} ->
{ok, {_, unhealthy_target}} ->
emqx_resource_metrics:matched_inc(ResId),
emqx_resource_metrics:dropped_resource_stopped_inc(ResId),
?RESOURCE_ERROR(unhealthy_target, "unhealthy target");
{_, {unhealthy_target, _Message}} ->
{ok, {_, {unhealthy_target, Message}}} ->
emqx_resource_metrics:matched_inc(ResId),
emqx_resource_metrics:dropped_resource_stopped_inc(ResId),
?RESOURCE_ERROR(unhealthy_target, "unhealthy target");
{simple_async, _} ->
?RESOURCE_ERROR(unhealthy_target, Message);
{ok, {simple_async, _}} ->
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts);
{simple_sync, _} ->
{ok, {simple_sync, _}} ->
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_sync_query(ResId, Request, Opts);
{simple_async_internal_buffer, _} ->
{ok, {simple_async_internal_buffer, _}} ->
%% This is for bridges/connectors that have internal buffering, such
%% as Kafka and Pulsar producers.
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts);
{simple_sync_internal_buffer, _} ->
{ok, {simple_sync_internal_buffer, _}} ->
%% This is for bridges/connectors that have internal buffering, such
%% as Kafka and Pulsar producers.
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
@ -404,30 +412,12 @@ query(ResId, Request, Opts) ->
emqx_resource_buffer_worker:simple_sync_internal_buffer_query(
ResId, Request, Opts
);
{sync, _} ->
{ok, {sync, _}} ->
emqx_resource_buffer_worker:sync_query(ResId, Request, Opts);
{async, _} ->
{ok, {async, _}} ->
emqx_resource_buffer_worker:async_query(ResId, Request, Opts)
end.
get_query_mode_error(ResId, Opts) ->
case maps:get(query_mode_cache_override, Opts, true) of
false ->
case Opts of
#{query_mode := QueryMode} ->
{QueryMode, ok};
_ ->
{async, unhealthy_target}
end;
true ->
case emqx_resource_manager:lookup_cached(ResId) of
{ok, _Group, #{query_mode := QM, error := Error}} ->
{QM, Error};
{error, not_found} ->
{error, not_found}
end
end.
-spec simple_sync_query(resource_id(), Request :: term()) -> Result :: term().
simple_sync_query(ResId, Request) ->
emqx_resource_buffer_worker:simple_sync_query(ResId, Request).
@ -457,7 +447,7 @@ health_check(ResId) ->
emqx_resource_manager:health_check(ResId).
-spec channel_health_check(resource_id(), channel_id()) ->
{ok, resource_status()} | {error, term()}.
#{status := channel_status(), error := term(), any() => any()}.
channel_health_check(ResId, ChannelId) ->
emqx_resource_manager:channel_health_check(ResId, ChannelId).
@ -534,6 +524,7 @@ call_health_check(ResId, Mod, ResourceState) ->
-spec call_channel_health_check(resource_id(), channel_id(), module(), resource_state()) ->
channel_status()
| {channel_status(), Reason :: term()}
| {error, term()}.
call_channel_health_check(ResId, ChannelId, Mod, ResourceState) ->
?SAFE_CALL(Mod:on_get_channel_status(ResId, ChannelId, ResourceState)).
@ -774,3 +765,79 @@ clean_allocated_resources(ResourceId, ResourceMod) ->
false ->
ok
end.
%% @doc Split : separated resource id into type and name.
%% Type must be an existing atom.
%% Name is converted to atom if `atom_name` option is true.
-spec parse_resource_id(list() | binary(), #{atom_name => boolean()}) ->
{atom(), atom() | binary()}.
parse_resource_id(Id0, Opts) ->
Id = bin(Id0),
case string:split(bin(Id), ":", all) of
[Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
_ ->
invalid_data(
<<"should be of pattern {type}:{name}, but got: ", Id/binary>>
)
end.
to_type_atom(Type) when is_binary(Type) ->
try
erlang:binary_to_existing_atom(Type, utf8)
catch
_:_ ->
throw(#{
kind => validation_error,
reason => <<"unknown resource type: ", Type/binary>>
})
end.
%% @doc Validate if type is valid.
%% Throws and JSON-map error if invalid.
-spec validate_type(binary()) -> ok.
validate_type(Type) ->
_ = to_type_atom(Type),
ok.
bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
%% @doc Validate if name is valid for bridge.
%% Throws and JSON-map error if invalid.
-spec validate_name(binary()) -> ok.
validate_name(Name) ->
_ = validate_name(Name, #{atom_name => false}),
ok.
validate_name(<<>>, _Opts) ->
invalid_data("name cannot be empty string");
validate_name(Name, _Opts) when size(Name) >= 255 ->
invalid_data("name length must be less than 255");
validate_name(Name0, Opts) ->
Name = unicode:characters_to_list(Name0, utf8),
case lists:all(fun is_id_char/1, Name) of
true ->
case maps:get(atom_name, Opts, true) of
% NOTE
% Rule may be created before bridge, thus not `list_to_existing_atom/1`,
% also it is infrequent user input anyway.
true -> list_to_atom(Name);
false -> Name0
end;
false ->
invalid_data(
<<"only 0-9a-zA-Z_- is allowed in resource name, got: ", Name0/binary>>
)
end.
-spec invalid_data(binary()) -> no_return().
invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true;
is_id_char(C) when C >= $A andalso C =< $Z -> true;
is_id_char($_) -> true;
is_id_char($-) -> true;
is_id_char(_) -> false.

View File

@ -1087,7 +1087,7 @@ call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
?RESOURCE_ERROR(not_found, "resource not found")
end.
%% bridge_v2:kafka_producer:myproducer1:connector:kafka_producer:mykakfaclient1
%% action:kafka_producer:myproducer1:connector:kafka_producer:mykakfaclient1
extract_connector_id(Id) when is_binary(Id) ->
case binary:split(Id, <<":">>, [global]) of
[
@ -1112,11 +1112,21 @@ is_channel_id(Id) ->
%% There is no need to query the conncector if the channel is not
%% installed as the query will fail anyway.
pre_query_channel_check({Id, _} = _Request, Channels) when
is_map_key(Id, Channels),
(map_get(Id, Channels) =:= connected orelse map_get(Id, Channels) =:= connecting)
is_map_key(Id, Channels)
->
ok;
ChannelStatus = maps:get(Id, Channels),
case emqx_resource_manager:channel_status_is_channel_added(ChannelStatus) of
true ->
ok;
false ->
maybe_throw_channel_not_installed(Id)
end;
pre_query_channel_check({Id, _} = _Request, _Channels) ->
maybe_throw_channel_not_installed(Id);
pre_query_channel_check(_Request, _Channels) ->
ok.
maybe_throw_channel_not_installed(Id) ->
%% Fail with a recoverable error if the channel is not installed
%% so that the operation can be retried. It is emqx_resource_manager's
%% responsibility to ensure that the channel installation is retried.
@ -1128,9 +1138,7 @@ pre_query_channel_check({Id, _} = _Request, _Channels) ->
);
false ->
ok
end;
pre_query_channel_check(_Request, _Channels) ->
ok.
end.
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{query_mode := ResQM} = Resource) when
ResQM =:= simple_sync_internal_buffer; ResQM =:= simple_async_internal_buffer

View File

@ -44,7 +44,9 @@
list_group/1,
lookup_cached/1,
get_metrics/1,
reset_metrics/1
reset_metrics/1,
channel_status_is_channel_added/1,
get_query_mode_and_last_error/2
]).
-export([
@ -74,6 +76,7 @@
extra
}).
-type data() :: #data{}.
-type channel_status_map() :: #{status := channel_status(), error := term()}.
-define(NAME(ResId), {n, l, {?MODULE, ResId}}).
-define(REF(ResId), {via, gproc, ?NAME(ResId)}).
@ -306,7 +309,7 @@ health_check(ResId) ->
safe_call(ResId, health_check, ?T_OPERATION).
-spec channel_health_check(resource_id(), channel_id()) ->
{ok, resource_status()} | {error, term()}.
#{status := channel_status(), error := term(), any() => any()}.
channel_health_check(ResId, ChannelId) ->
%% Do normal health check first to trigger health checks for channels
%% and update the cached health status for the channels
@ -314,7 +317,10 @@ channel_health_check(ResId, ChannelId) ->
safe_call(ResId, {channel_health_check, ChannelId}, ?T_OPERATION).
add_channel(ResId, ChannelId, Config) ->
safe_call(ResId, {add_channel, ChannelId, Config}, ?T_OPERATION).
Result = safe_call(ResId, {add_channel, ChannelId, Config}, ?T_OPERATION),
%% Wait for health_check to finish
_ = health_check(ResId),
Result.
remove_channel(ResId, ChannelId) ->
safe_call(ResId, {remove_channel, ChannelId}, ?T_OPERATION).
@ -322,6 +328,46 @@ remove_channel(ResId, ChannelId) ->
get_channels(ResId) ->
safe_call(ResId, get_channels, ?T_OPERATION).
-spec get_query_mode_and_last_error(resource_id(), query_opts()) ->
{ok, {query_mode(), LastError}} | {error, not_found}
when
LastError ::
unhealthy_target
| {unhealthy_target, binary()}
| channel_status_map()
| term().
get_query_mode_and_last_error(RequestResId, Opts = #{connector_resource_id := ResId}) ->
do_get_query_mode_error(ResId, RequestResId, Opts);
get_query_mode_and_last_error(RequestResId, Opts) ->
do_get_query_mode_error(RequestResId, RequestResId, Opts).
do_get_query_mode_error(ResId, RequestResId, Opts) ->
case emqx_resource_manager:lookup_cached(ResId) of
{ok, _Group, ResourceData} ->
QM = get_query_mode(ResourceData, Opts),
Error = get_error(RequestResId, ResourceData),
{ok, {QM, Error}};
{error, not_found} ->
{error, not_found}
end.
get_query_mode(_ResourceData, #{query_mode := QM}) ->
QM;
get_query_mode(#{query_mode := QM}, _Opts) ->
QM.
get_error(ResId, #{added_channels := #{} = Channels} = ResourceData) when
is_map_key(ResId, Channels)
->
case maps:get(ResId, Channels) of
#{error := Error} ->
Error;
_ ->
maps:get(error, ResourceData, undefined)
end;
get_error(_ResId, #{error := Error}) ->
Error.
%% Server start/stop callbacks
%% @doc Function called from the supervisor to actually start the server
@ -420,6 +466,10 @@ handle_event(internal, start_resource, connecting, Data) ->
start_resource(Data, undefined);
handle_event(state_timeout, health_check, connecting, Data) ->
handle_connecting_health_check(Data);
handle_event(
{call, From}, {remove_channel, ChannelId}, connecting = _State, Data
) ->
handle_remove_channel(From, ChannelId, Data);
%% State: CONNECTED
%% The connected state is entered after a successful on_start/2 of the callback mod
%% and successful health_checks
@ -459,7 +509,7 @@ handle_event(
handle_event(
{call, From}, {remove_channel, ChannelId}, _State, Data
) ->
handle_not_connected_remove_channel(From, ChannelId, Data);
handle_not_connected_and_not_connecting_remove_channel(From, ChannelId, Data);
handle_event(
{call, From}, get_channels, _State, Data
) ->
@ -570,7 +620,7 @@ add_channels(Data) ->
Channels = Data#data.added_channels,
NewChannels = lists:foldl(
fun({ChannelID, _Conf}, Acc) ->
maps:put(ChannelID, {error, connecting}, Acc)
maps:put(ChannelID, channel_status(), Acc)
end,
Channels,
ChannelIDConfigTuples
@ -589,7 +639,11 @@ add_channels_in_list([{ChannelID, ChannelConfig} | Rest], Data) ->
AddedChannelsMap = Data#data.added_channels,
%% Set the channel status to connecting to indicate that
%% we have not yet performed the initial health_check
NewAddedChannelsMap = maps:put(ChannelID, connecting, AddedChannelsMap),
NewAddedChannelsMap = maps:put(
ChannelID,
channel_status_new_waiting_for_health_check(),
AddedChannelsMap
),
NewData = Data#data{
state = NewState,
added_channels = NewAddedChannelsMap
@ -603,7 +657,11 @@ add_channels_in_list([{ChannelID, ChannelConfig} | Rest], Data) ->
reason => Reason
}),
AddedChannelsMap = Data#data.added_channels,
NewAddedChannelsMap = maps:put(ChannelID, Error, AddedChannelsMap),
NewAddedChannelsMap = maps:put(
ChannelID,
channel_status(Error),
AddedChannelsMap
),
NewData = Data#data{
added_channels = NewAddedChannelsMap
},
@ -680,65 +738,44 @@ make_test_id() ->
RandId = iolist_to_binary(emqx_utils:gen_id(16)),
<<?TEST_ID_PREFIX, RandId/binary>>.
handle_add_channel(From, Data, ChannelId, ChannelConfig) ->
handle_add_channel(From, Data, ChannelId, Config) ->
Channels = Data#data.added_channels,
case maps:get(ChannelId, Channels, {error, not_added}) of
{error, _Reason} ->
case
channel_status_is_channel_added(
maps:get(
ChannelId,
Channels,
channel_status()
)
)
of
false ->
%% The channel is not installed in the connector state
%% We need to install it
handle_add_channel_need_insert(From, Data, ChannelId, Data, ChannelConfig);
_ ->
%% We insert it into the channels map and let the health check
%% take care of the rest
NewChannels = maps:put(ChannelId, channel_status_new_with_config(Config), Channels),
NewData = Data#data{added_channels = NewChannels},
{keep_state, update_state(NewData, Data), [
{reply, From, ok}
]};
true ->
%% The channel is already installed in the connector state
%% We don't need to install it again
{keep_state_and_data, [{reply, From, ok}]}
end.
handle_add_channel_need_insert(From, Data, ChannelId, Data, ChannelConfig) ->
NewData = add_channel_need_insert_update_data(Data, ChannelId, ChannelConfig),
%% Trigger a health check to raise alarm if channel is not healthy
{keep_state, NewData, [{reply, From, ok}, {state_timeout, 0, health_check}]}.
add_channel_need_insert_update_data(Data, ChannelId, ChannelConfig) ->
case
emqx_resource:call_add_channel(
Data#data.id, Data#data.mod, Data#data.state, ChannelId, ChannelConfig
)
of
{ok, NewState} ->
AddedChannelsMap = Data#data.added_channels,
%% Setting channel status to connecting to indicate that an health check
%% has not been performed yet
NewAddedChannelsMap = maps:put(ChannelId, connecting, AddedChannelsMap),
UpdatedData = Data#data{
state = NewState,
added_channels = NewAddedChannelsMap
},
update_state(UpdatedData, Data);
{error, _Reason} = Error ->
ChannelsMap = Data#data.added_channels,
NewChannelsMap = maps:put(ChannelId, Error, ChannelsMap),
UpdatedData = Data#data{
added_channels = NewChannelsMap
},
update_state(UpdatedData, Data)
end.
handle_not_connected_add_channel(From, ChannelId, State, Data) ->
%% When state is not connected the channel will be added to the channels
%% map but nothing else will happen.
Channels = Data#data.added_channels,
NewChannels = maps:put(ChannelId, {error, resource_not_operational}, Channels),
NewData1 = Data#data{added_channels = NewChannels},
%% Do channel health check to trigger alarm
NewData2 = channels_health_check(State, NewData1),
{keep_state, update_state(NewData2, Data), [{reply, From, ok}]}.
NewData = add_channel_status_if_not_exists(Data, ChannelId, State),
{keep_state, update_state(NewData, Data), [{reply, From, ok}]}.
handle_remove_channel(From, ChannelId, Data) ->
Channels = Data#data.added_channels,
%% Deactivate alarm
_ = maybe_clear_alarm(ChannelId),
case maps:get(ChannelId, Channels, {error, not_added}) of
{error, _} ->
case channel_status_is_channel_added(maps:get(ChannelId, Channels, channel_status())) of
false ->
%% The channel is already not installed in the connector state.
%% We still need to remove it from the added_channels map
AddedChannels = Data#data.added_channels,
@ -747,7 +784,7 @@ handle_remove_channel(From, ChannelId, Data) ->
added_channels = NewAddedChannels
},
{keep_state, NewData, [{reply, From, ok}]};
_ ->
true ->
%% The channel is installed in the connector state
handle_remove_channel_exists(From, ChannelId, Data)
end.
@ -777,9 +814,10 @@ handle_remove_channel_exists(From, ChannelId, Data) ->
{keep_state_and_data, [{reply, From, Error}]}
end.
handle_not_connected_remove_channel(From, ChannelId, Data) ->
%% When state is not connected the channel will be removed from the channels
%% map but nothing else will happen.
handle_not_connected_and_not_connecting_remove_channel(From, ChannelId, Data) ->
%% When state is not connected and not connecting the channel will be removed
%% from the channels map but nothing else will happen since the channel
%% is not addded/installed in the resource state.
Channels = Data#data.added_channels,
NewChannels = maps:remove(ChannelId, Channels),
NewData = Data#data{added_channels = NewChannels},
@ -796,7 +834,7 @@ handle_manually_health_check(From, Data) ->
).
handle_manually_channel_health_check(From, #data{state = undefined}, _ChannelId) ->
{keep_state_and_data, [{reply, From, {ok, disconnected}}]};
{keep_state_and_data, [{reply, From, channel_status({error, resource_disconnected})}]};
handle_manually_channel_health_check(
From,
#data{added_channels = Channels} = _Data,
@ -810,10 +848,11 @@ handle_manually_channel_health_check(
_Data,
_ChannelId
) ->
{keep_state_and_data, [{reply, From, {error, channel_not_found}}]}.
{keep_state_and_data, [{reply, From, channel_status({error, channel_not_found})}]}.
get_channel_status_channel_added(#data{id = ResId, mod = Mod, state = State}, ChannelId) ->
emqx_resource:call_channel_health_check(ResId, ChannelId, Mod, State).
RawStatus = emqx_resource:call_channel_health_check(ResId, ChannelId, Mod, State),
channel_status(RawStatus).
handle_connecting_health_check(Data) ->
with_health_check(
@ -833,9 +872,9 @@ handle_connected_health_check(Data) ->
with_health_check(
Data,
fun
(connected, UpdatedData) ->
{keep_state, channels_health_check(connected, UpdatedData),
health_check_actions(UpdatedData)};
(connected, UpdatedData0) ->
UpdatedData1 = channels_health_check(connected, UpdatedData0),
{keep_state, UpdatedData1, health_check_actions(UpdatedData1)};
(Status, UpdatedData) ->
?SLOG(warning, #{
msg => "health_check_failed",
@ -861,20 +900,59 @@ with_health_check(#data{error = PrevError} = Data, Func) ->
channels_health_check(connected = _ResourceStatus, Data0) ->
Channels = maps:to_list(Data0#data.added_channels),
%% All channels with an error status are considered not added
%% All channels with a stutus different from connected or connecting are
%% not added
ChannelsNotAdded = [
ChannelId
|| {ChannelId, Status} <- Channels,
not is_channel_added(Status)
not channel_status_is_channel_added(Status)
],
%% Attempt to add channels that are not added
ChannelsNotAddedWithConfigs = get_config_for_channels(Data0, ChannelsNotAdded),
ChannelsNotAddedWithConfigs =
get_config_for_channels(Data0, ChannelsNotAdded),
Data1 = add_channels_in_list(ChannelsNotAddedWithConfigs, Data0),
%% Now that we have done the adding, we can get the status of all channels
Data2 = channel_status_for_all_channels(Data1),
update_state(Data2, Data0);
channels_health_check(connecting, Data0) ->
%% Whenever the resource is connecting:
%% 1. Change the status of all added channels to connecting
%% 2. Raise alarms (TODO: if it is a probe we should not raise alarms)
Channels = Data0#data.added_channels,
ChannelsToChangeStatusFor = [
ChannelId
|| {ChannelId, Status} <- maps:to_list(Channels),
channel_status_is_channel_added(Status)
],
ChannelsWithNewStatuses =
[
{ChannelId, channel_status({connecting, resource_is_connecting})}
|| ChannelId <- ChannelsToChangeStatusFor
],
%% Update the channels map
NewChannels = lists:foldl(
fun({ChannelId, NewStatus}, Acc) ->
maps:update(ChannelId, NewStatus, Acc)
end,
Channels,
ChannelsWithNewStatuses
),
ChannelsWithNewAndPrevErrorStatuses =
[
{ChannelId, NewStatus, maps:get(ChannelId, Channels)}
|| {ChannelId, NewStatus} <- maps:to_list(NewChannels)
],
%% Raise alarms for all channels
lists:foreach(
fun({ChannelId, Status, PrevStatus}) ->
maybe_alarm(connecting, ChannelId, Status, PrevStatus)
end,
ChannelsWithNewAndPrevErrorStatuses
),
Data1 = Data0#data{added_channels = NewChannels},
update_state(Data1, Data0);
channels_health_check(ResourceStatus, Data0) ->
%% Whenever the resource is not connected:
%% Whenever the resource is not connected and not connecting:
%% 1. Remove all added channels
%% 2. Change the status to an error status
%% 3. Raise alarms
@ -882,13 +960,20 @@ channels_health_check(ResourceStatus, Data0) ->
ChannelsToRemove = [
ChannelId
|| {ChannelId, Status} <- maps:to_list(Channels),
is_channel_added(Status)
channel_status_is_channel_added(Status)
],
Data1 = remove_channels_in_list(ChannelsToRemove, Data0, true),
ChannelsWithNewAndOldStatuses =
[
{ChannelId, OldStatus,
{error, resource_not_connected_channel_error_msg(ResourceStatus, ChannelId, Data1)}}
channel_status(
{error,
resource_not_connected_channel_error_msg(
ResourceStatus,
ChannelId,
Data1
)}
)}
|| {ChannelId, OldStatus} <- maps:to_list(Data1#data.added_channels)
],
%% Raise alarms
@ -928,18 +1013,19 @@ channel_status_for_all_channels(Data) ->
AddedChannelsWithOldAndNewStatus = [
{ChannelId, OldStatus, get_channel_status_channel_added(Data, ChannelId)}
|| {ChannelId, OldStatus} <- Channels,
is_channel_added(OldStatus)
channel_status_is_channel_added(OldStatus)
],
%% Remove the added channels with a new error statuses
%% Remove the added channels with a a status different from connected or connecting
ChannelsToRemove = [
ChannelId
|| {ChannelId, _, {error, _}} <- AddedChannelsWithOldAndNewStatus
|| {ChannelId, _, NewStatus} <- AddedChannelsWithOldAndNewStatus,
not channel_status_is_channel_added(NewStatus)
],
Data1 = remove_channels_in_list(ChannelsToRemove, Data, true),
%% Raise/clear alarms
lists:foreach(
fun
({ID, _OldStatus, connected}) ->
({ID, _OldStatus, #{status := connected}}) ->
_ = maybe_clear_alarm(ID);
({ID, OldStatus, NewStatus}) ->
_ = maybe_alarm(NewStatus, ID, NewStatus, OldStatus)
@ -958,18 +1044,14 @@ channel_status_for_all_channels(Data) ->
),
Data1#data{added_channels = NewChannelsMap}.
is_channel_added({error, _}) ->
false;
is_channel_added(_) ->
true.
get_config_for_channels(Data0, ChannelsWithoutConfig) ->
ResId = Data0#data.id,
Mod = Data0#data.mod,
Channels = emqx_resource:call_get_channels(ResId, Mod),
ChannelIdToConfig = maps:from_list(Channels),
ChannelStatusMap = Data0#data.added_channels,
ChannelsWithConfig = [
{Id, maps:get(Id, ChannelIdToConfig, no_config)}
{Id, get_config_from_map_or_channel_status(Id, ChannelIdToConfig, ChannelStatusMap)}
|| Id <- ChannelsWithoutConfig
],
%% Filter out channels without config
@ -979,6 +1061,16 @@ get_config_for_channels(Data0, ChannelsWithoutConfig) ->
Conf =/= no_config
].
get_config_from_map_or_channel_status(ChannelId, ChannelIdToConfig, ChannelStatusMap) ->
ChannelStatus = maps:get(ChannelId, ChannelStatusMap, #{}),
case maps:get(config, ChannelStatus, undefined) of
undefined ->
%% Channel config
maps:get(ChannelId, ChannelIdToConfig, no_config);
Config ->
Config
end.
update_state(Data) ->
update_state(Data, undefined).
@ -1098,3 +1190,86 @@ safe_call(ResId, Message, Timeout) ->
exit:{timeout, _} ->
{error, timeout}
end.
%% Helper functions for chanel status data
channel_status() ->
#{
%% The status of the channel. Can be one of the following:
%% - disconnected: the channel is not added to the resource (error may contain the reason))
%% - connecting: the channel has been added to the resource state but
%% either the resource status is connecting or the
%% on_channel_get_status callback has returned connecting
%% - connected: the channel is added to the resource, the resource is
%% connected and the on_channel_get_status callback has returned
%% connected. The error field should be undefined.
status => disconnected,
error => not_added_yet
}.
%% If the channel is added with add_channel/2, the config field will be set to
%% the config. This is useful when doing probing since the config is not stored
%% anywhere else in that case.
channel_status_new_with_config(Config) ->
#{
status => disconnected,
error => not_added_yet,
config => Config
}.
channel_status_new_waiting_for_health_check() ->
#{
status => connecting,
error => no_health_check_yet
}.
channel_status({connecting, Error}) ->
#{
status => connecting,
error => Error
};
channel_status(connecting) ->
#{
status => connecting,
error => <<"Not connected for unknown reason">>
};
channel_status(connected) ->
#{
status => connected,
error => undefined
};
%% Probably not so useful but it is permitted to set an error even when the
%% status is connected
channel_status({connected, Error}) ->
#{
status => connected,
error => Error
};
channel_status({error, Reason}) ->
#{
status => disconnected,
error => Reason
}.
channel_status_is_channel_added(#{
status := connected
}) ->
true;
channel_status_is_channel_added(#{
status := connecting
}) ->
true;
channel_status_is_channel_added(_Status) ->
false.
add_channel_status_if_not_exists(Data, ChannelId, State) ->
Channels = Data#data.added_channels,
case maps:is_key(ChannelId, Channels) of
true ->
Data;
false ->
ChannelStatus = channel_status({error, resource_not_operational}),
NewChannels = maps:put(ChannelId, ChannelStatus, Channels),
maybe_alarm(State, ChannelId, ChannelStatus, no_prev),
Data#data{added_channels = NewChannels}
end.

View File

@ -460,12 +460,11 @@ code_change(_OldVsn, State, _Extra) ->
with_parsed_rule(Params = #{id := RuleId, sql := Sql, actions := Actions}, CreatedAt, Fun) ->
case emqx_rule_sqlparser:parse(Sql) of
{ok, Select} ->
Rule = #{
Rule0 = #{
id => RuleId,
name => maps:get(name, Params, <<"">>),
created_at => CreatedAt,
updated_at => now_ms(),
enable => maps:get(enable, Params, true),
sql => Sql,
actions => parse_actions(Actions),
description => maps:get(description, Params, ""),
@ -478,6 +477,19 @@ with_parsed_rule(Params = #{id := RuleId, sql := Sql, actions := Actions}, Creat
conditions => emqx_rule_sqlparser:select_where(Select)
%% -- calculated fields end
},
InputEnable = maps:get(enable, Params, true),
case validate_bridge_existence_in_actions(Rule0) of
ok ->
ok;
{error, NonExistentBridgeIDs} ->
?SLOG(error, #{
msg => "action_references_nonexistent_bridges",
rule_id => RuleId,
nonexistent_bridge_ids => NonExistentBridgeIDs,
hint => "this rule will be disabled"
})
end,
Rule = Rule0#{enable => InputEnable},
ok = Fun(Rule),
{ok, Rule};
{error, Reason} ->
@ -593,3 +605,42 @@ extra_functions_module() ->
set_extra_functions_module(Mod) ->
persistent_term:put({?MODULE, extra_functions}, Mod),
ok.
%% Checks whether the referenced bridges in actions all exist. If there are non-existent
%% ones, the rule shouldn't be allowed to be enabled.
%% The actions here are already parsed.
validate_bridge_existence_in_actions(#{actions := Actions, from := Froms} = _Rule) ->
BridgeIDs0 =
lists:map(
fun(BridgeID) ->
emqx_bridge_resource:parse_bridge_id(BridgeID, #{atom_name => false})
end,
get_referenced_hookpoints(Froms)
),
BridgeIDs1 =
lists:filtermap(
fun
({bridge_v2, Type, Name}) -> {true, {Type, Name}};
({bridge, Type, Name, _ResId}) -> {true, {Type, Name}};
(_) -> false
end,
Actions
),
NonExistentBridgeIDs =
lists:filter(
fun({Type, Name}) ->
try
case emqx_bridge:lookup(Type, Name) of
{ok, _} -> false;
{error, _} -> true
end
catch
_:_ -> true
end
end,
BridgeIDs0 ++ BridgeIDs1
),
case NonExistentBridgeIDs of
[] -> ok;
_ -> {error, #{nonexistent_bridge_ids => NonExistentBridgeIDs}}
end.

View File

@ -522,7 +522,7 @@ format_action(Actions) ->
do_format_action({bridge, BridgeType, BridgeName, _ResId}) ->
emqx_bridge_resource:bridge_id(BridgeType, BridgeName);
do_format_action({bridge_v2, BridgeType, BridgeName}) ->
emqx_bridge_resource:bridge_id(BridgeType, BridgeName);
emqx_bridge_resource:bridge_id(emqx_bridge_lib:downgrade_type(BridgeType), BridgeName);
do_format_action(#{mod := Mod, func := Func, args := Args}) ->
#{
function => printable_function_name(Mod, Func),

View File

@ -253,6 +253,10 @@ init_per_testcase(t_events, Config) ->
),
?assertMatch(#{id := <<"rule:t_events">>}, Rule),
[{hook_points_rules, Rule} | Config];
init_per_testcase(t_get_basic_usage_info_1, Config) ->
meck:new(emqx_bridge, [passthrough, no_link, no_history]),
meck:expect(emqx_bridge, lookup, fun(_Type, _Name) -> {ok, #{mocked => true}} end),
Config;
init_per_testcase(_TestCase, Config) ->
Config.
@ -261,6 +265,10 @@ end_per_testcase(t_events, Config) ->
ok = delete_rule(?config(hook_points_rules, Config)),
emqx_common_test_helpers:call_janitor(),
ok;
end_per_testcase(t_get_basic_usage_info_1, _Config) ->
meck:unload(),
emqx_common_test_helpers:call_janitor(),
ok;
end_per_testcase(_TestCase, _Config) ->
emqx_common_test_helpers:call_janitor(),
ok.

View File

@ -310,6 +310,20 @@ t_rule_engine(_) ->
}),
{400, _} = emqx_rule_engine_api:'/rule_engine'(put, #{body => #{<<"something">> => <<"weird">>}}).
t_downgrade_bridge_type(_) ->
#{id := RuleId} = create_rule((?SIMPLE_RULE(<<>>))#{<<"actions">> => [<<"kafka:name">>]}),
?assertMatch(
%% returns a bridges_v2 ID
{200, #{data := [#{actions := [<<"kafka:name">>]}]}},
emqx_rule_engine_api:'/rules'(get, #{query_string => #{}})
),
?assertMatch(
%% returns a bridges_v2 ID
{200, #{actions := [<<"kafka:name">>]}},
emqx_rule_engine_api:'/rules/:id'(get, #{bindings => #{id => RuleId}})
),
ok.
rules_fixture(N) ->
lists:map(
fun(Seq0) ->

View File

@ -781,8 +781,8 @@ setup_fake_rule_engine_data() ->
[
#{function => <<"erlang:hibernate">>, args => #{}},
#{function => console},
<<"webhook:my_webhook">>,
<<"webhook:my_webhook">>
<<"webhook:basic_usage_info_webhook">>,
<<"webhook:basic_usage_info_webhook_disabled">>
]
}
),
@ -793,8 +793,8 @@ setup_fake_rule_engine_data() ->
sql => <<"select 1 from topic">>,
actions =>
[
<<"mqtt:my_mqtt_bridge">>,
<<"webhook:my_webhook">>
<<"mqtt:basic_usage_info_mqtt">>,
<<"webhook:basic_usage_info_webhook">>
]
}
),
@ -802,7 +802,7 @@ setup_fake_rule_engine_data() ->
emqx_rule_engine:create_rule(
#{
id => <<"rule:t_get_basic_usage_info:3">>,
sql => <<"select 1 from \"$bridges/mqtt:mqtt_in\"">>,
sql => <<"select 1 from \"$bridges/mqtt:basic_usage_info_mqtt\"">>,
actions =>
[
#{function => console}

View File

@ -33,7 +33,8 @@
diff_maps/2,
best_effort_recursive_sum/3,
if_only_to_toggle_enable/2,
update_if_present/3
update_if_present/3,
put_if/4
]).
-export_type([config_key/0, config_key_path/0]).
@ -303,3 +304,8 @@ update_if_present(Key, Fun, Map) ->
_ ->
Map
end.
put_if(Acc, K, V, true) ->
Acc#{K => V};
put_if(Acc, _K, _V, false) ->
Acc.

View File

@ -0,0 +1,2 @@
Fix excessive warning message print in remote console shell.

View File

@ -0,0 +1,26 @@
Preview Feature: Support for Version 2 Bridge Design
- Introduction of Action with a new 'connector' concept
- In the original Bridge v1 design, each connector was exclusively tied to a single bridge.
This design prioritized error isolation and performance but posed challenges for users setting up multiple bridges to the same service.
For instance, setting up 10 bridges to a single Kafka cluster required the same configuration to be repeated for each bridge.
- The revamped Action design provides more flexibility and better scalability:
- Users have the option to either share a connector across multiple bridges or retain it exclusively for one bridge, as in v1.
- For the majority of data bridges, sharing a connector among too many bridges might lead to performance degradation but avoids
overloading the external system with too many connections if the number of bridges is very high.
- In some cases, specific data bridges always utilize dedicated connections, even when the connector is shared.
Right now these are:
- Kafka Producer
- Azure Event Hub Producer
- Management of Connectors
- Connectors can now be managed separately, bringing in more flexibility.
- New API endpoints have been introduced under the `/connectors` path for connector management.
- Actions can be managed via the `/actions` endpoint.
- Limitations in e5.3.1
- Currently, only the Kafka and Azure Event Hub producer bridges have been upgraded to the action design.
- The action feature is accessible through config files and HTTP APIs. However, it's not yet available on the dashboard UI.

View File

@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 5.3.1-alpha.2
version: 5.3.1-alpha.4
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 5.3.1-alpha.2
appVersion: 5.3.1-alpha.4

View File

@ -72,7 +72,7 @@ defmodule EMQXUmbrella.MixProject do
# in conflict by emqtt and hocon
{:getopt, "1.0.2", override: true},
{:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.8", override: true},
{:hocon, github: "emqx/hocon", tag: "0.39.16", override: true},
{:hocon, github: "emqx/hocon", tag: "0.39.19", override: true},
{:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.3", override: true},
{:esasl, github: "emqx/esasl", tag: "0.2.0"},
{:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"},

View File

@ -75,7 +75,7 @@
, {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}}
, {getopt, "1.0.2"}
, {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.8"}}}
, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.16"}}}
, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.19"}}}
, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}}
, {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}}
, {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}}

View File

@ -195,10 +195,10 @@ bridge_v2_type.label:
bridge_v2_type.desc:
"""The type of the bridge."""
bridge_v2.label:
"""Bridge v2 Config"""
bridge_v2.desc:
"""The configuration for a bridge v2."""
actions.label:
"""Action Config"""
actions.desc:
"""The configuration for an action."""
buffer_memory_overload_protection.desc:
"""Applicable when buffer mode is set to <code>memory</code>

View File

@ -283,13 +283,6 @@ config_enable.desc:
config_enable.label:
"""Enable or Disable"""
config_connector.desc:
"""Reference to connector"""
config_connector.label:
"""Connector"""
consumer_mqtt_payload.desc:
"""The template for transforming the incoming Kafka message. By default, it will use JSON format to serialize inputs from the Kafka message. Such fields are:
<code>headers</code>: an object containing string key-value pairs.
@ -316,10 +309,10 @@ kafka_consumer.label:
"""Kafka Consumer"""
desc_config.desc:
"""Configuration for a Kafka bridge."""
"""Configuration for a Kafka Producer Client."""
desc_config.label:
"""Kafka Bridge Configuration"""
"""Kafka Producer Client Configuration"""
consumer_value_encoding_mode.desc:
"""Defines how the value from the Kafka message is encoded before being forwarded via MQTT.

View File

@ -37,20 +37,19 @@ desc_api6.label:
"""Reset Bridge Metrics"""
desc_api7.desc:
"""Stop/restart bridges on all nodes in the cluster."""
"""Start bridge on all nodes in the cluster."""
desc_api7.label:
"""Cluster Bridge Operate"""
"""Cluster Bridge Operation"""
desc_api8.desc:
"""Stop/restart bridges on a specific node."""
"""Start bridge on a specific node."""
desc_api8.label:
"""Node Bridge Operate"""
"""Node Bridge Operation"""
desc_api9.desc:
"""Test creating a new bridge by given id.</br>
The id must be of format '{type}:{name}'."""
"""Test creating a new bridge."""
desc_api9.label:
"""Test Bridge Creation"""
@ -62,7 +61,7 @@ desc_bridge_metrics.label:
"""Get Bridge Metrics"""
desc_enable_bridge.desc:
"""Enable or Disable bridges on all nodes in the cluster."""
"""Enable or Disable bridge on all nodes in the cluster."""
desc_enable_bridge.label:
"""Cluster Bridge Enable"""
@ -79,6 +78,12 @@ desc_param_path_id.desc:
desc_param_path_id.label:
"""Bridge ID"""
desc_qs_also_delete_dep_actions.desc:
"""Whether to cascade delete dependent actions."""
desc_qs_also_delete_dep_actions.label:
"""Cascade delete dependent actions?"""
desc_param_path_node.desc:
"""The node name, e.g. 'emqx@127.0.0.1'."""
@ -86,13 +91,13 @@ desc_param_path_node.label:
"""The node name"""
desc_param_path_operation_cluster.desc:
"""Operations can be one of: 'start'."""
"""Operation can be one of: 'start'."""
desc_param_path_operation_cluster.label:
"""Cluster Operation"""
desc_param_path_operation_on_node.desc:
"""Operations can be one of: 'start'."""
"""Operation can be one of: 'start'."""
desc_param_path_operation_on_node.label:
"""Node Operation """

View File

@ -37,20 +37,19 @@ desc_api6.label:
"""Reset Connector Metrics"""
desc_api7.desc:
"""Stop/restart connectors on all nodes in the cluster."""
"""Start connector on all nodes in the cluster."""
desc_api7.label:
"""Cluster Connector Operate"""
desc_api8.desc:
"""Stop/restart connectors on a specific node."""
"""Start connector on a specific node."""
desc_api8.label:
"""Node Connector Operate"""
desc_api9.desc:
"""Test creating a new connector by given id.</br>
The id must be of format '{type}:{name}'."""
"""Test creating a new connector."""
desc_api9.label:
"""Test Connector Creation"""
@ -62,7 +61,7 @@ desc_connector_metrics.label:
"""Get Connector Metrics"""
desc_enable_connector.desc:
"""Enable or Disable connectors on all nodes in the cluster."""
"""Enable or Disable connector on all nodes in the cluster."""
desc_enable_connector.label:
"""Cluster Connector Enable"""
@ -86,13 +85,13 @@ desc_param_path_node.label:
"""The node name"""
desc_param_path_operation_cluster.desc:
"""Operations can be one of: 'start' or 'stop'."""
"""Operation can be one of: 'start'."""
desc_param_path_operation_cluster.label:
"""Cluster Operation"""
desc_param_path_operation_on_node.desc:
"""Operations can be one of: 'start' or 'start'."""
"""Operation can be one of: 'start'."""
desc_param_path_operation_on_node.label:
"""Node Operation """

View File

@ -1571,4 +1571,9 @@ the system topic <code>$SYS/sysmon/large_heap</code>."""
sysmon_vm_large_heap.label:
"""Enable Large Heap monitoring."""
description.label:
"""Description"""
description.desc:
"""Descriptive text."""
}

View File

@ -123,6 +123,9 @@ if [ -z "${PROFILE+x}" ]; then
apps/emqx_dashboard)
export PROFILE='emqx-enterprise'
;;
apps/emqx_rule_engine)
export PROFILE='emqx-enterprise'
;;
apps/*)
if [[ -f "${WHICH_APP}/BSL.txt" ]]; then
export PROFILE='emqx-enterprise'

View File

@ -97,6 +97,10 @@ matrix() {
entries+=("$(format_app_entry "$app" 1 emqx "$runner")")
entries+=("$(format_app_entry "$app" 1 emqx-enterprise "$runner")")
;;
apps/emqx_rule_engine)
entries+=("$(format_app_entry "$app" 1 emqx "$runner")")
entries+=("$(format_app_entry "$app" 1 emqx-enterprise "$runner")")
;;
apps/*)
if [[ -f "${app}/BSL.txt" ]]; then
profile='emqx-enterprise'